diff --git a/.dockerignore b/.dockerignore index 01f0a578..d1be94f1 100755 --- a/.dockerignore +++ b/.dockerignore @@ -2,13 +2,13 @@ **/*.pyc *.sqlite -node_modules -dist +**/node_modules +**/dist -*/builds/ -*/logs/ -*/users/ -*/repos/ +**/builds/ +**/logs/ +**/users/ +**/repos/ session.cache *.env diff --git a/.go-version b/.go-version new file mode 100644 index 00000000..d2ab029d --- /dev/null +++ b/.go-version @@ -0,0 +1 @@ +1.21 diff --git a/Dockerfile b/Dockerfile index eccff5b6..a3b21a79 100755 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.18.3-bullseye +FROM golang:1.21-bullseye RUN set -eux; \ apt-get update; \ @@ -9,13 +9,13 @@ RUN set -eux; \ rm -rf /var/lib/apt/lists/* WORKDIR / -ADD https://ziglang.org/download/0.9.1/zig-linux-x86_64-0.9.1.tar.xz /zig-linux-x86_64-0.9.1.tar.xz -RUN tar -xvf /zig-linux-x86_64-0.9.1.tar.xz -ENV PATH="/zig-linux-x86_64-0.9.1:${PATH}" +ADD https://ziglang.org/download/0.11.0/zig-linux-x86_64-0.11.0.tar.xz /zig-linux-x86_64-0.11.0.tar.xz +RUN tar -xvf /zig-linux-x86_64-0.11.0.tar.xz +ENV PATH="/zig-linux-x86_64-0.11.0:${PATH}" -ADD https://github.com/mattnite/gyro/releases/download/0.5.0/gyro-0.5.0-linux-x86_64.tar.gz /gyro-0.5.0-linux-x86_64.tar.gz -RUN tar -xvf /gyro-0.5.0-linux-x86_64.tar.gz -ENV PATH="/gyro-0.5.0-linux-x86_64/bin:${PATH}" +# ADD https://github.com/mattnite/gyro/releases/download/0.5.0/gyro-0.5.0-linux-x86_64.tar.gz /gyro-0.5.0-linux-x86_64.tar.gz +# RUN tar -xvf /gyro-0.5.0-linux-x86_64.tar.gz +# ENV PATH="/gyro-0.5.0-linux-x86_64/bin:${PATH}" RUN mkdir /app ADD . /app/ diff --git a/builder/vspherensxt/vspherensxt.go b/builder/vspherensxt/vspherensxt.go index 57f73617..52f576cc 100755 --- a/builder/vspherensxt/vspherensxt.go +++ b/builder/vspherensxt/vspherensxt.go @@ -97,15 +97,15 @@ func (builder VSphereNSXTBuilder) generateBuildID(build *ent.Build) string { } func (builder VSphereNSXTBuilder) generateVmName(competition *ent.Competition, team *ent.Team, host *ent.Host, build *ent.Build) string { - return (competition.HclID + "-Team-" + fmt.Sprintf("%02d", team.TeamNumber) + "-" + host.Hostname + "-" + builder.generateBuildID(build)) + return (competition.HCLID + "-Team-" + fmt.Sprintf("%02d", team.TeamNumber) + "-" + host.Hostname + "-" + builder.generateBuildID(build)) } func (builder VSphereNSXTBuilder) generateRouterName(competition *ent.Competition, team *ent.Team, build *ent.Build) string { - return (competition.HclID + "-Team-" + fmt.Sprintf("%02d", team.TeamNumber) + "-" + builder.generateBuildID(build)) + return (competition.HCLID + "-Team-" + fmt.Sprintf("%02d", team.TeamNumber) + "-" + builder.generateBuildID(build)) } func (builder VSphereNSXTBuilder) generateNetworkName(competition *ent.Competition, team *ent.Team, network *ent.Network, build *ent.Build) string { - return (competition.HclID + "-Team-" + fmt.Sprintf("%02d", team.TeamNumber) + "-" + network.Name + "-" + builder.generateBuildID(build)) + return (competition.HCLID + "-Team-" + fmt.Sprintf("%02d", team.TeamNumber) + "-" + network.Name + "-" + builder.generateBuildID(build)) } // DeployHost deploys a given host from the environment to VSphere diff --git a/ent/adhocplan.go b/ent/adhocplan.go index e7b21a54..1e427ce8 100755 --- a/ent/adhocplan.go +++ b/ent/adhocplan.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -6,6 +6,7 @@ import ( "fmt" "strings" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/gen0cide/laforge/ent/adhocplan" "github.com/gen0cide/laforge/ent/agenttask" @@ -23,6 +24,7 @@ type AdhocPlan struct { // The values are being populated by the AdhocPlanQuery when eager-loading is set. Edges AdhocPlanEdges `json:"edges"` + // vvvvvvvvvvvv CUSTOM vvvvvvvvvvvv // Edges put into the main struct to be loaded via hcl // PrevAdhocPlan holds the value of the PrevAdhocPlan edge. HCLPrevAdhocPlan []*AdhocPlan `json:"PrevAdhocPlan,omitempty"` @@ -34,9 +36,10 @@ type AdhocPlan struct { HCLAdhocPlanToStatus *Status `json:"AdhocPlanToStatus,omitempty"` // AdhocPlanToAgentTask holds the value of the AdhocPlanToAgentTask edge. HCLAdhocPlanToAgentTask *AgentTask `json:"AdhocPlanToAgentTask,omitempty"` - // + // ^^^^^^^^^^^^ CUSTOM ^^^^^^^^^^^^^ adhoc_plan_adhoc_plan_to_build *uuid.UUID adhoc_plan_adhoc_plan_to_agent_task *uuid.UUID + selectValues sql.SelectValues } // AdhocPlanEdges holds the relations/edges for other nodes in the graph. @@ -54,6 +57,11 @@ type AdhocPlanEdges struct { // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. loadedTypes [5]bool + // totalCount holds the count of the edges above. + totalCount [5]map[string]int + + namedPrevAdhocPlan map[string][]*AdhocPlan + namedNextAdhocPlan map[string][]*AdhocPlan } // PrevAdhocPlanOrErr returns the PrevAdhocPlan value or an error if the edge @@ -79,8 +87,7 @@ func (e AdhocPlanEdges) NextAdhocPlanOrErr() ([]*AdhocPlan, error) { func (e AdhocPlanEdges) AdhocPlanToBuildOrErr() (*Build, error) { if e.loadedTypes[2] { if e.AdhocPlanToBuild == nil { - // The edge AdhocPlanToBuild was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: build.Label} } return e.AdhocPlanToBuild, nil @@ -93,8 +100,7 @@ func (e AdhocPlanEdges) AdhocPlanToBuildOrErr() (*Build, error) { func (e AdhocPlanEdges) AdhocPlanToStatusOrErr() (*Status, error) { if e.loadedTypes[3] { if e.AdhocPlanToStatus == nil { - // The edge AdhocPlanToStatus was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: status.Label} } return e.AdhocPlanToStatus, nil @@ -107,8 +113,7 @@ func (e AdhocPlanEdges) AdhocPlanToStatusOrErr() (*Status, error) { func (e AdhocPlanEdges) AdhocPlanToAgentTaskOrErr() (*AgentTask, error) { if e.loadedTypes[4] { if e.AdhocPlanToAgentTask == nil { - // The edge AdhocPlanToAgentTask was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: agenttask.Label} } return e.AdhocPlanToAgentTask, nil @@ -117,8 +122,8 @@ func (e AdhocPlanEdges) AdhocPlanToAgentTaskOrErr() (*AgentTask, error) { } // scanValues returns the types for scanning values from sql.Rows. -func (*AdhocPlan) scanValues(columns []string) ([]interface{}, error) { - values := make([]interface{}, len(columns)) +func (*AdhocPlan) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) for i := range columns { switch columns[i] { case adhocplan.FieldID: @@ -128,7 +133,7 @@ func (*AdhocPlan) scanValues(columns []string) ([]interface{}, error) { case adhocplan.ForeignKeys[1]: // adhoc_plan_adhoc_plan_to_agent_task values[i] = &sql.NullScanner{S: new(uuid.UUID)} default: - return nil, fmt.Errorf("unexpected column %q for type AdhocPlan", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -136,7 +141,7 @@ func (*AdhocPlan) scanValues(columns []string) ([]interface{}, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the AdhocPlan fields. -func (ap *AdhocPlan) assignValues(columns []string, values []interface{}) error { +func (ap *AdhocPlan) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -162,51 +167,59 @@ func (ap *AdhocPlan) assignValues(columns []string, values []interface{}) error ap.adhoc_plan_adhoc_plan_to_agent_task = new(uuid.UUID) *ap.adhoc_plan_adhoc_plan_to_agent_task = *value.S.(*uuid.UUID) } + default: + ap.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the AdhocPlan. +// This includes values selected through modifiers, order, etc. +func (ap *AdhocPlan) Value(name string) (ent.Value, error) { + return ap.selectValues.Get(name) +} + // QueryPrevAdhocPlan queries the "PrevAdhocPlan" edge of the AdhocPlan entity. func (ap *AdhocPlan) QueryPrevAdhocPlan() *AdhocPlanQuery { - return (&AdhocPlanClient{config: ap.config}).QueryPrevAdhocPlan(ap) + return NewAdhocPlanClient(ap.config).QueryPrevAdhocPlan(ap) } // QueryNextAdhocPlan queries the "NextAdhocPlan" edge of the AdhocPlan entity. func (ap *AdhocPlan) QueryNextAdhocPlan() *AdhocPlanQuery { - return (&AdhocPlanClient{config: ap.config}).QueryNextAdhocPlan(ap) + return NewAdhocPlanClient(ap.config).QueryNextAdhocPlan(ap) } // QueryAdhocPlanToBuild queries the "AdhocPlanToBuild" edge of the AdhocPlan entity. func (ap *AdhocPlan) QueryAdhocPlanToBuild() *BuildQuery { - return (&AdhocPlanClient{config: ap.config}).QueryAdhocPlanToBuild(ap) + return NewAdhocPlanClient(ap.config).QueryAdhocPlanToBuild(ap) } // QueryAdhocPlanToStatus queries the "AdhocPlanToStatus" edge of the AdhocPlan entity. func (ap *AdhocPlan) QueryAdhocPlanToStatus() *StatusQuery { - return (&AdhocPlanClient{config: ap.config}).QueryAdhocPlanToStatus(ap) + return NewAdhocPlanClient(ap.config).QueryAdhocPlanToStatus(ap) } // QueryAdhocPlanToAgentTask queries the "AdhocPlanToAgentTask" edge of the AdhocPlan entity. func (ap *AdhocPlan) QueryAdhocPlanToAgentTask() *AgentTaskQuery { - return (&AdhocPlanClient{config: ap.config}).QueryAdhocPlanToAgentTask(ap) + return NewAdhocPlanClient(ap.config).QueryAdhocPlanToAgentTask(ap) } // Update returns a builder for updating this AdhocPlan. // Note that you need to call AdhocPlan.Unwrap() before calling this method if this AdhocPlan // was returned from a transaction, and the transaction was committed or rolled back. func (ap *AdhocPlan) Update() *AdhocPlanUpdateOne { - return (&AdhocPlanClient{config: ap.config}).UpdateOne(ap) + return NewAdhocPlanClient(ap.config).UpdateOne(ap) } // Unwrap unwraps the AdhocPlan entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. func (ap *AdhocPlan) Unwrap() *AdhocPlan { - tx, ok := ap.config.driver.(*txDriver) + _tx, ok := ap.config.driver.(*txDriver) if !ok { panic("ent: AdhocPlan is not a transactional entity") } - ap.config.driver = tx.drv + ap.config.driver = _tx.drv return ap } @@ -219,11 +232,53 @@ func (ap *AdhocPlan) String() string { return builder.String() } -// AdhocPlans is a parsable slice of AdhocPlan. -type AdhocPlans []*AdhocPlan +// NamedPrevAdhocPlan returns the PrevAdhocPlan named value or an error if the edge was not +// loaded in eager-loading with this name. +func (ap *AdhocPlan) NamedPrevAdhocPlan(name string) ([]*AdhocPlan, error) { + if ap.Edges.namedPrevAdhocPlan == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := ap.Edges.namedPrevAdhocPlan[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} -func (ap AdhocPlans) config(cfg config) { - for _i := range ap { - ap[_i].config = cfg +func (ap *AdhocPlan) appendNamedPrevAdhocPlan(name string, edges ...*AdhocPlan) { + if ap.Edges.namedPrevAdhocPlan == nil { + ap.Edges.namedPrevAdhocPlan = make(map[string][]*AdhocPlan) + } + if len(edges) == 0 { + ap.Edges.namedPrevAdhocPlan[name] = []*AdhocPlan{} + } else { + ap.Edges.namedPrevAdhocPlan[name] = append(ap.Edges.namedPrevAdhocPlan[name], edges...) } } + +// NamedNextAdhocPlan returns the NextAdhocPlan named value or an error if the edge was not +// loaded in eager-loading with this name. +func (ap *AdhocPlan) NamedNextAdhocPlan(name string) ([]*AdhocPlan, error) { + if ap.Edges.namedNextAdhocPlan == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := ap.Edges.namedNextAdhocPlan[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} + +func (ap *AdhocPlan) appendNamedNextAdhocPlan(name string, edges ...*AdhocPlan) { + if ap.Edges.namedNextAdhocPlan == nil { + ap.Edges.namedNextAdhocPlan = make(map[string][]*AdhocPlan) + } + if len(edges) == 0 { + ap.Edges.namedNextAdhocPlan[name] = []*AdhocPlan{} + } else { + ap.Edges.namedNextAdhocPlan[name] = append(ap.Edges.namedNextAdhocPlan[name], edges...) + } +} + +// AdhocPlans is a parsable slice of AdhocPlan. +type AdhocPlans []*AdhocPlan diff --git a/ent/adhocplan/adhocplan.go b/ent/adhocplan/adhocplan.go index dcdf077e..2a414556 100755 --- a/ent/adhocplan/adhocplan.go +++ b/ent/adhocplan/adhocplan.go @@ -1,8 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package adhocplan import ( + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -90,3 +92,95 @@ var ( // DefaultID holds the default value on creation for the "id" field. DefaultID func() uuid.UUID ) + +// OrderOption defines the ordering options for the AdhocPlan queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByPrevAdhocPlanCount orders the results by PrevAdhocPlan count. +func ByPrevAdhocPlanCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newPrevAdhocPlanStep(), opts...) + } +} + +// ByPrevAdhocPlan orders the results by PrevAdhocPlan terms. +func ByPrevAdhocPlan(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newPrevAdhocPlanStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByNextAdhocPlanCount orders the results by NextAdhocPlan count. +func ByNextAdhocPlanCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newNextAdhocPlanStep(), opts...) + } +} + +// ByNextAdhocPlan orders the results by NextAdhocPlan terms. +func ByNextAdhocPlan(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newNextAdhocPlanStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByAdhocPlanToBuildField orders the results by AdhocPlanToBuild field. +func ByAdhocPlanToBuildField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAdhocPlanToBuildStep(), sql.OrderByField(field, opts...)) + } +} + +// ByAdhocPlanToStatusField orders the results by AdhocPlanToStatus field. +func ByAdhocPlanToStatusField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAdhocPlanToStatusStep(), sql.OrderByField(field, opts...)) + } +} + +// ByAdhocPlanToAgentTaskField orders the results by AdhocPlanToAgentTask field. +func ByAdhocPlanToAgentTaskField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAdhocPlanToAgentTaskStep(), sql.OrderByField(field, opts...)) + } +} +func newPrevAdhocPlanStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, PrevAdhocPlanTable, PrevAdhocPlanPrimaryKey...), + ) +} +func newNextAdhocPlanStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, NextAdhocPlanTable, NextAdhocPlanPrimaryKey...), + ) +} +func newAdhocPlanToBuildStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AdhocPlanToBuildInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, AdhocPlanToBuildTable, AdhocPlanToBuildColumn), + ) +} +func newAdhocPlanToStatusStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AdhocPlanToStatusInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2O, false, AdhocPlanToStatusTable, AdhocPlanToStatusColumn), + ) +} +func newAdhocPlanToAgentTaskStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AdhocPlanToAgentTaskInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, AdhocPlanToAgentTaskTable, AdhocPlanToAgentTaskColumn), + ) +} diff --git a/ent/adhocplan/where.go b/ent/adhocplan/where.go index cfa221f6..e689470c 100755 --- a/ent/adhocplan/where.go +++ b/ent/adhocplan/where.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package adhocplan @@ -11,85 +11,47 @@ import ( // ID filters vertices based on their ID field. func ID(id uuid.UUID) predicate.AdhocPlan { - return predicate.AdhocPlan(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.AdhocPlan(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id uuid.UUID) predicate.AdhocPlan { - return predicate.AdhocPlan(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.AdhocPlan(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id uuid.UUID) predicate.AdhocPlan { - return predicate.AdhocPlan(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.AdhocPlan(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...uuid.UUID) predicate.AdhocPlan { - return predicate.AdhocPlan(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.AdhocPlan(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...uuid.UUID) predicate.AdhocPlan { - return predicate.AdhocPlan(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.AdhocPlan(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id uuid.UUID) predicate.AdhocPlan { - return predicate.AdhocPlan(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.AdhocPlan(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id uuid.UUID) predicate.AdhocPlan { - return predicate.AdhocPlan(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.AdhocPlan(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id uuid.UUID) predicate.AdhocPlan { - return predicate.AdhocPlan(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.AdhocPlan(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id uuid.UUID) predicate.AdhocPlan { - return predicate.AdhocPlan(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.AdhocPlan(sql.FieldLTE(FieldID, id)) } // HasPrevAdhocPlan applies the HasEdge predicate on the "PrevAdhocPlan" edge. @@ -97,7 +59,6 @@ func HasPrevAdhocPlan() predicate.AdhocPlan { return predicate.AdhocPlan(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(PrevAdhocPlanTable, FieldID), sqlgraph.Edge(sqlgraph.M2M, true, PrevAdhocPlanTable, PrevAdhocPlanPrimaryKey...), ) sqlgraph.HasNeighbors(s, step) @@ -107,11 +68,7 @@ func HasPrevAdhocPlan() predicate.AdhocPlan { // HasPrevAdhocPlanWith applies the HasEdge predicate on the "PrevAdhocPlan" edge with a given conditions (other predicates). func HasPrevAdhocPlanWith(preds ...predicate.AdhocPlan) predicate.AdhocPlan { return predicate.AdhocPlan(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(Table, FieldID), - sqlgraph.Edge(sqlgraph.M2M, true, PrevAdhocPlanTable, PrevAdhocPlanPrimaryKey...), - ) + step := newPrevAdhocPlanStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -125,7 +82,6 @@ func HasNextAdhocPlan() predicate.AdhocPlan { return predicate.AdhocPlan(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(NextAdhocPlanTable, FieldID), sqlgraph.Edge(sqlgraph.M2M, false, NextAdhocPlanTable, NextAdhocPlanPrimaryKey...), ) sqlgraph.HasNeighbors(s, step) @@ -135,11 +91,7 @@ func HasNextAdhocPlan() predicate.AdhocPlan { // HasNextAdhocPlanWith applies the HasEdge predicate on the "NextAdhocPlan" edge with a given conditions (other predicates). func HasNextAdhocPlanWith(preds ...predicate.AdhocPlan) predicate.AdhocPlan { return predicate.AdhocPlan(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(Table, FieldID), - sqlgraph.Edge(sqlgraph.M2M, false, NextAdhocPlanTable, NextAdhocPlanPrimaryKey...), - ) + step := newNextAdhocPlanStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -153,7 +105,6 @@ func HasAdhocPlanToBuild() predicate.AdhocPlan { return predicate.AdhocPlan(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(AdhocPlanToBuildTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, false, AdhocPlanToBuildTable, AdhocPlanToBuildColumn), ) sqlgraph.HasNeighbors(s, step) @@ -163,11 +114,7 @@ func HasAdhocPlanToBuild() predicate.AdhocPlan { // HasAdhocPlanToBuildWith applies the HasEdge predicate on the "AdhocPlanToBuild" edge with a given conditions (other predicates). func HasAdhocPlanToBuildWith(preds ...predicate.Build) predicate.AdhocPlan { return predicate.AdhocPlan(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(AdhocPlanToBuildInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, AdhocPlanToBuildTable, AdhocPlanToBuildColumn), - ) + step := newAdhocPlanToBuildStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -181,7 +128,6 @@ func HasAdhocPlanToStatus() predicate.AdhocPlan { return predicate.AdhocPlan(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(AdhocPlanToStatusTable, FieldID), sqlgraph.Edge(sqlgraph.O2O, false, AdhocPlanToStatusTable, AdhocPlanToStatusColumn), ) sqlgraph.HasNeighbors(s, step) @@ -191,11 +137,7 @@ func HasAdhocPlanToStatus() predicate.AdhocPlan { // HasAdhocPlanToStatusWith applies the HasEdge predicate on the "AdhocPlanToStatus" edge with a given conditions (other predicates). func HasAdhocPlanToStatusWith(preds ...predicate.Status) predicate.AdhocPlan { return predicate.AdhocPlan(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(AdhocPlanToStatusInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2O, false, AdhocPlanToStatusTable, AdhocPlanToStatusColumn), - ) + step := newAdhocPlanToStatusStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -209,7 +151,6 @@ func HasAdhocPlanToAgentTask() predicate.AdhocPlan { return predicate.AdhocPlan(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(AdhocPlanToAgentTaskTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, false, AdhocPlanToAgentTaskTable, AdhocPlanToAgentTaskColumn), ) sqlgraph.HasNeighbors(s, step) @@ -219,11 +160,7 @@ func HasAdhocPlanToAgentTask() predicate.AdhocPlan { // HasAdhocPlanToAgentTaskWith applies the HasEdge predicate on the "AdhocPlanToAgentTask" edge with a given conditions (other predicates). func HasAdhocPlanToAgentTaskWith(preds ...predicate.AgentTask) predicate.AdhocPlan { return predicate.AdhocPlan(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(AdhocPlanToAgentTaskInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, AdhocPlanToAgentTaskTable, AdhocPlanToAgentTaskColumn), - ) + step := newAdhocPlanToAgentTaskStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -234,32 +171,15 @@ func HasAdhocPlanToAgentTaskWith(preds ...predicate.AgentTask) predicate.AdhocPl // And groups predicates with the AND operator between them. func And(predicates ...predicate.AdhocPlan) predicate.AdhocPlan { - return predicate.AdhocPlan(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.AdhocPlan(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.AdhocPlan) predicate.AdhocPlan { - return predicate.AdhocPlan(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.AdhocPlan(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.AdhocPlan) predicate.AdhocPlan { - return predicate.AdhocPlan(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.AdhocPlan(sql.NotPredicates(p)) } diff --git a/ent/adhocplan_create.go b/ent/adhocplan_create.go index 7c5460aa..82326ef0 100755 --- a/ent/adhocplan_create.go +++ b/ent/adhocplan_create.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -107,44 +107,8 @@ func (apc *AdhocPlanCreate) Mutation() *AdhocPlanMutation { // Save creates the AdhocPlan in the database. func (apc *AdhocPlanCreate) Save(ctx context.Context) (*AdhocPlan, error) { - var ( - err error - node *AdhocPlan - ) apc.defaults() - if len(apc.hooks) == 0 { - if err = apc.check(); err != nil { - return nil, err - } - node, err = apc.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*AdhocPlanMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = apc.check(); err != nil { - return nil, err - } - apc.mutation = mutation - if node, err = apc.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(apc.hooks) - 1; i >= 0; i-- { - if apc.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = apc.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, apc.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, apc.sqlSave, apc.mutation, apc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -192,10 +156,13 @@ func (apc *AdhocPlanCreate) check() error { } func (apc *AdhocPlanCreate) sqlSave(ctx context.Context) (*AdhocPlan, error) { + if err := apc.check(); err != nil { + return nil, err + } _node, _spec := apc.createSpec() if err := sqlgraph.CreateNode(ctx, apc.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } @@ -206,19 +173,15 @@ func (apc *AdhocPlanCreate) sqlSave(ctx context.Context) (*AdhocPlan, error) { return nil, err } } + apc.mutation.id = &_node.ID + apc.mutation.done = true return _node, nil } func (apc *AdhocPlanCreate) createSpec() (*AdhocPlan, *sqlgraph.CreateSpec) { var ( _node = &AdhocPlan{config: apc.config} - _spec = &sqlgraph.CreateSpec{ - Table: adhocplan.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: adhocplan.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(adhocplan.Table, sqlgraph.NewFieldSpec(adhocplan.FieldID, field.TypeUUID)) ) if id, ok := apc.mutation.ID(); ok { _node.ID = id @@ -232,10 +195,7 @@ func (apc *AdhocPlanCreate) createSpec() (*AdhocPlan, *sqlgraph.CreateSpec) { Columns: adhocplan.PrevAdhocPlanPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: adhocplan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(adhocplan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -251,10 +211,7 @@ func (apc *AdhocPlanCreate) createSpec() (*AdhocPlan, *sqlgraph.CreateSpec) { Columns: adhocplan.NextAdhocPlanPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: adhocplan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(adhocplan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -270,10 +227,7 @@ func (apc *AdhocPlanCreate) createSpec() (*AdhocPlan, *sqlgraph.CreateSpec) { Columns: []string{adhocplan.AdhocPlanToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -290,10 +244,7 @@ func (apc *AdhocPlanCreate) createSpec() (*AdhocPlan, *sqlgraph.CreateSpec) { Columns: []string{adhocplan.AdhocPlanToStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -309,10 +260,7 @@ func (apc *AdhocPlanCreate) createSpec() (*AdhocPlan, *sqlgraph.CreateSpec) { Columns: []string{adhocplan.AdhocPlanToAgentTaskColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: agenttask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(agenttask.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -327,11 +275,15 @@ func (apc *AdhocPlanCreate) createSpec() (*AdhocPlan, *sqlgraph.CreateSpec) { // AdhocPlanCreateBulk is the builder for creating many AdhocPlan entities in bulk. type AdhocPlanCreateBulk struct { config + err error builders []*AdhocPlanCreate } // Save creates the AdhocPlan entities in the database. func (apcb *AdhocPlanCreateBulk) Save(ctx context.Context) ([]*AdhocPlan, error) { + if apcb.err != nil { + return nil, apcb.err + } specs := make([]*sqlgraph.CreateSpec, len(apcb.builders)) nodes := make([]*AdhocPlan, len(apcb.builders)) mutators := make([]Mutator, len(apcb.builders)) @@ -348,8 +300,8 @@ func (apcb *AdhocPlanCreateBulk) Save(ctx context.Context) ([]*AdhocPlan, error) return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, apcb.builders[i+1].mutation) } else { @@ -357,7 +309,7 @@ func (apcb *AdhocPlanCreateBulk) Save(ctx context.Context) ([]*AdhocPlan, error) // Invoke the actual operation on the latest mutation in the chain. if err = sqlgraph.BatchCreate(ctx, apcb.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } } } diff --git a/ent/adhocplan_delete.go b/ent/adhocplan_delete.go index 920df1db..99affaac 100755 --- a/ent/adhocplan_delete.go +++ b/ent/adhocplan_delete.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (apd *AdhocPlanDelete) Where(ps ...predicate.AdhocPlan) *AdhocPlanDelete { // Exec executes the deletion query and returns how many vertices were deleted. func (apd *AdhocPlanDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(apd.hooks) == 0 { - affected, err = apd.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*AdhocPlanMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - apd.mutation = mutation - affected, err = apd.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(apd.hooks) - 1; i >= 0; i-- { - if apd.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = apd.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, apd.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, apd.sqlExec, apd.mutation, apd.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (apd *AdhocPlanDelete) ExecX(ctx context.Context) int { } func (apd *AdhocPlanDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: adhocplan.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: adhocplan.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(adhocplan.Table, sqlgraph.NewFieldSpec(adhocplan.FieldID, field.TypeUUID)) if ps := apd.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -84,7 +48,12 @@ func (apd *AdhocPlanDelete) sqlExec(ctx context.Context) (int, error) { } } } - return sqlgraph.DeleteNodes(ctx, apd.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, apd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + apd.mutation.done = true + return affected, err } // AdhocPlanDeleteOne is the builder for deleting a single AdhocPlan entity. @@ -92,6 +61,12 @@ type AdhocPlanDeleteOne struct { apd *AdhocPlanDelete } +// Where appends a list predicates to the AdhocPlanDelete builder. +func (apdo *AdhocPlanDeleteOne) Where(ps ...predicate.AdhocPlan) *AdhocPlanDeleteOne { + apdo.apd.mutation.Where(ps...) + return apdo +} + // Exec executes the deletion query. func (apdo *AdhocPlanDeleteOne) Exec(ctx context.Context) error { n, err := apdo.apd.Exec(ctx) @@ -107,5 +82,7 @@ func (apdo *AdhocPlanDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (apdo *AdhocPlanDeleteOne) ExecX(ctx context.Context) { - apdo.apd.ExecX(ctx) + if err := apdo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/ent/adhocplan_query.go b/ent/adhocplan_query.go index 19c9ab3b..517dac7d 100755 --- a/ent/adhocplan_query.go +++ b/ent/adhocplan_query.go @@ -1,11 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" "database/sql/driver" - "errors" "fmt" "math" @@ -23,19 +22,20 @@ import ( // AdhocPlanQuery is the builder for querying AdhocPlan entities. type AdhocPlanQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string - predicates []predicate.AdhocPlan - // eager-loading edges. + ctx *QueryContext + order []adhocplan.OrderOption + inters []Interceptor + predicates []predicate.AdhocPlan withPrevAdhocPlan *AdhocPlanQuery withNextAdhocPlan *AdhocPlanQuery withAdhocPlanToBuild *BuildQuery withAdhocPlanToStatus *StatusQuery withAdhocPlanToAgentTask *AgentTaskQuery withFKs bool + modifiers []func(*sql.Selector) + loadTotal []func(context.Context, []*AdhocPlan) error + withNamedPrevAdhocPlan map[string]*AdhocPlanQuery + withNamedNextAdhocPlan map[string]*AdhocPlanQuery // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -47,34 +47,34 @@ func (apq *AdhocPlanQuery) Where(ps ...predicate.AdhocPlan) *AdhocPlanQuery { return apq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (apq *AdhocPlanQuery) Limit(limit int) *AdhocPlanQuery { - apq.limit = &limit + apq.ctx.Limit = &limit return apq } -// Offset adds an offset step to the query. +// Offset to start from. func (apq *AdhocPlanQuery) Offset(offset int) *AdhocPlanQuery { - apq.offset = &offset + apq.ctx.Offset = &offset return apq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (apq *AdhocPlanQuery) Unique(unique bool) *AdhocPlanQuery { - apq.unique = &unique + apq.ctx.Unique = &unique return apq } -// Order adds an order step to the query. -func (apq *AdhocPlanQuery) Order(o ...OrderFunc) *AdhocPlanQuery { +// Order specifies how the records should be ordered. +func (apq *AdhocPlanQuery) Order(o ...adhocplan.OrderOption) *AdhocPlanQuery { apq.order = append(apq.order, o...) return apq } // QueryPrevAdhocPlan chains the current query on the "PrevAdhocPlan" edge. func (apq *AdhocPlanQuery) QueryPrevAdhocPlan() *AdhocPlanQuery { - query := &AdhocPlanQuery{config: apq.config} + query := (&AdhocPlanClient{config: apq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := apq.prepareQuery(ctx); err != nil { return nil, err @@ -96,7 +96,7 @@ func (apq *AdhocPlanQuery) QueryPrevAdhocPlan() *AdhocPlanQuery { // QueryNextAdhocPlan chains the current query on the "NextAdhocPlan" edge. func (apq *AdhocPlanQuery) QueryNextAdhocPlan() *AdhocPlanQuery { - query := &AdhocPlanQuery{config: apq.config} + query := (&AdhocPlanClient{config: apq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := apq.prepareQuery(ctx); err != nil { return nil, err @@ -118,7 +118,7 @@ func (apq *AdhocPlanQuery) QueryNextAdhocPlan() *AdhocPlanQuery { // QueryAdhocPlanToBuild chains the current query on the "AdhocPlanToBuild" edge. func (apq *AdhocPlanQuery) QueryAdhocPlanToBuild() *BuildQuery { - query := &BuildQuery{config: apq.config} + query := (&BuildClient{config: apq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := apq.prepareQuery(ctx); err != nil { return nil, err @@ -140,7 +140,7 @@ func (apq *AdhocPlanQuery) QueryAdhocPlanToBuild() *BuildQuery { // QueryAdhocPlanToStatus chains the current query on the "AdhocPlanToStatus" edge. func (apq *AdhocPlanQuery) QueryAdhocPlanToStatus() *StatusQuery { - query := &StatusQuery{config: apq.config} + query := (&StatusClient{config: apq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := apq.prepareQuery(ctx); err != nil { return nil, err @@ -162,7 +162,7 @@ func (apq *AdhocPlanQuery) QueryAdhocPlanToStatus() *StatusQuery { // QueryAdhocPlanToAgentTask chains the current query on the "AdhocPlanToAgentTask" edge. func (apq *AdhocPlanQuery) QueryAdhocPlanToAgentTask() *AgentTaskQuery { - query := &AgentTaskQuery{config: apq.config} + query := (&AgentTaskClient{config: apq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := apq.prepareQuery(ctx); err != nil { return nil, err @@ -185,7 +185,7 @@ func (apq *AdhocPlanQuery) QueryAdhocPlanToAgentTask() *AgentTaskQuery { // First returns the first AdhocPlan entity from the query. // Returns a *NotFoundError when no AdhocPlan was found. func (apq *AdhocPlanQuery) First(ctx context.Context) (*AdhocPlan, error) { - nodes, err := apq.Limit(1).All(ctx) + nodes, err := apq.Limit(1).All(setContextOp(ctx, apq.ctx, "First")) if err != nil { return nil, err } @@ -208,7 +208,7 @@ func (apq *AdhocPlanQuery) FirstX(ctx context.Context) *AdhocPlan { // Returns a *NotFoundError when no AdhocPlan ID was found. func (apq *AdhocPlanQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = apq.Limit(1).IDs(ctx); err != nil { + if ids, err = apq.Limit(1).IDs(setContextOp(ctx, apq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -231,7 +231,7 @@ func (apq *AdhocPlanQuery) FirstIDX(ctx context.Context) uuid.UUID { // Returns a *NotSingularError when more than one AdhocPlan entity is found. // Returns a *NotFoundError when no AdhocPlan entities are found. func (apq *AdhocPlanQuery) Only(ctx context.Context) (*AdhocPlan, error) { - nodes, err := apq.Limit(2).All(ctx) + nodes, err := apq.Limit(2).All(setContextOp(ctx, apq.ctx, "Only")) if err != nil { return nil, err } @@ -259,7 +259,7 @@ func (apq *AdhocPlanQuery) OnlyX(ctx context.Context) *AdhocPlan { // Returns a *NotFoundError when no entities are found. func (apq *AdhocPlanQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = apq.Limit(2).IDs(ctx); err != nil { + if ids, err = apq.Limit(2).IDs(setContextOp(ctx, apq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -284,10 +284,12 @@ func (apq *AdhocPlanQuery) OnlyIDX(ctx context.Context) uuid.UUID { // All executes the query and returns a list of AdhocPlans. func (apq *AdhocPlanQuery) All(ctx context.Context) ([]*AdhocPlan, error) { + ctx = setContextOp(ctx, apq.ctx, "All") if err := apq.prepareQuery(ctx); err != nil { return nil, err } - return apq.sqlAll(ctx) + qr := querierAll[[]*AdhocPlan, *AdhocPlanQuery]() + return withInterceptors[[]*AdhocPlan](ctx, apq, qr, apq.inters) } // AllX is like All, but panics if an error occurs. @@ -300,9 +302,12 @@ func (apq *AdhocPlanQuery) AllX(ctx context.Context) []*AdhocPlan { } // IDs executes the query and returns a list of AdhocPlan IDs. -func (apq *AdhocPlanQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { - var ids []uuid.UUID - if err := apq.Select(adhocplan.FieldID).Scan(ctx, &ids); err != nil { +func (apq *AdhocPlanQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if apq.ctx.Unique == nil && apq.path != nil { + apq.Unique(true) + } + ctx = setContextOp(ctx, apq.ctx, "IDs") + if err = apq.Select(adhocplan.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -319,10 +324,11 @@ func (apq *AdhocPlanQuery) IDsX(ctx context.Context) []uuid.UUID { // Count returns the count of the given query. func (apq *AdhocPlanQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, apq.ctx, "Count") if err := apq.prepareQuery(ctx); err != nil { return 0, err } - return apq.sqlCount(ctx) + return withInterceptors[int](ctx, apq, querierCount[*AdhocPlanQuery](), apq.inters) } // CountX is like Count, but panics if an error occurs. @@ -336,10 +342,15 @@ func (apq *AdhocPlanQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (apq *AdhocPlanQuery) Exist(ctx context.Context) (bool, error) { - if err := apq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, apq.ctx, "Exist") + switch _, err := apq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return apq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -359,9 +370,9 @@ func (apq *AdhocPlanQuery) Clone() *AdhocPlanQuery { } return &AdhocPlanQuery{ config: apq.config, - limit: apq.limit, - offset: apq.offset, - order: append([]OrderFunc{}, apq.order...), + ctx: apq.ctx.Clone(), + order: append([]adhocplan.OrderOption{}, apq.order...), + inters: append([]Interceptor{}, apq.inters...), predicates: append([]predicate.AdhocPlan{}, apq.predicates...), withPrevAdhocPlan: apq.withPrevAdhocPlan.Clone(), withNextAdhocPlan: apq.withNextAdhocPlan.Clone(), @@ -369,16 +380,15 @@ func (apq *AdhocPlanQuery) Clone() *AdhocPlanQuery { withAdhocPlanToStatus: apq.withAdhocPlanToStatus.Clone(), withAdhocPlanToAgentTask: apq.withAdhocPlanToAgentTask.Clone(), // clone intermediate query. - sql: apq.sql.Clone(), - path: apq.path, - unique: apq.unique, + sql: apq.sql.Clone(), + path: apq.path, } } // WithPrevAdhocPlan tells the query-builder to eager-load the nodes that are connected to // the "PrevAdhocPlan" edge. The optional arguments are used to configure the query builder of the edge. func (apq *AdhocPlanQuery) WithPrevAdhocPlan(opts ...func(*AdhocPlanQuery)) *AdhocPlanQuery { - query := &AdhocPlanQuery{config: apq.config} + query := (&AdhocPlanClient{config: apq.config}).Query() for _, opt := range opts { opt(query) } @@ -389,7 +399,7 @@ func (apq *AdhocPlanQuery) WithPrevAdhocPlan(opts ...func(*AdhocPlanQuery)) *Adh // WithNextAdhocPlan tells the query-builder to eager-load the nodes that are connected to // the "NextAdhocPlan" edge. The optional arguments are used to configure the query builder of the edge. func (apq *AdhocPlanQuery) WithNextAdhocPlan(opts ...func(*AdhocPlanQuery)) *AdhocPlanQuery { - query := &AdhocPlanQuery{config: apq.config} + query := (&AdhocPlanClient{config: apq.config}).Query() for _, opt := range opts { opt(query) } @@ -400,7 +410,7 @@ func (apq *AdhocPlanQuery) WithNextAdhocPlan(opts ...func(*AdhocPlanQuery)) *Adh // WithAdhocPlanToBuild tells the query-builder to eager-load the nodes that are connected to // the "AdhocPlanToBuild" edge. The optional arguments are used to configure the query builder of the edge. func (apq *AdhocPlanQuery) WithAdhocPlanToBuild(opts ...func(*BuildQuery)) *AdhocPlanQuery { - query := &BuildQuery{config: apq.config} + query := (&BuildClient{config: apq.config}).Query() for _, opt := range opts { opt(query) } @@ -411,7 +421,7 @@ func (apq *AdhocPlanQuery) WithAdhocPlanToBuild(opts ...func(*BuildQuery)) *Adho // WithAdhocPlanToStatus tells the query-builder to eager-load the nodes that are connected to // the "AdhocPlanToStatus" edge. The optional arguments are used to configure the query builder of the edge. func (apq *AdhocPlanQuery) WithAdhocPlanToStatus(opts ...func(*StatusQuery)) *AdhocPlanQuery { - query := &StatusQuery{config: apq.config} + query := (&StatusClient{config: apq.config}).Query() for _, opt := range opts { opt(query) } @@ -422,7 +432,7 @@ func (apq *AdhocPlanQuery) WithAdhocPlanToStatus(opts ...func(*StatusQuery)) *Ad // WithAdhocPlanToAgentTask tells the query-builder to eager-load the nodes that are connected to // the "AdhocPlanToAgentTask" edge. The optional arguments are used to configure the query builder of the edge. func (apq *AdhocPlanQuery) WithAdhocPlanToAgentTask(opts ...func(*AgentTaskQuery)) *AdhocPlanQuery { - query := &AgentTaskQuery{config: apq.config} + query := (&AgentTaskClient{config: apq.config}).Query() for _, opt := range opts { opt(query) } @@ -433,26 +443,41 @@ func (apq *AdhocPlanQuery) WithAdhocPlanToAgentTask(opts ...func(*AgentTaskQuery // GroupBy is used to group vertices by one or more fields/columns. // It is often used with aggregate functions, like: count, max, mean, min, sum. func (apq *AdhocPlanQuery) GroupBy(field string, fields ...string) *AdhocPlanGroupBy { - group := &AdhocPlanGroupBy{config: apq.config} - group.fields = append([]string{field}, fields...) - group.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := apq.prepareQuery(ctx); err != nil { - return nil, err - } - return apq.sqlQuery(ctx), nil - } - return group + apq.ctx.Fields = append([]string{field}, fields...) + grbuild := &AdhocPlanGroupBy{build: apq} + grbuild.flds = &apq.ctx.Fields + grbuild.label = adhocplan.Label + grbuild.scan = grbuild.Scan + return grbuild } // Select allows the selection one or more fields/columns for the given query, // instead of selecting all fields in the entity. func (apq *AdhocPlanQuery) Select(fields ...string) *AdhocPlanSelect { - apq.fields = append(apq.fields, fields...) - return &AdhocPlanSelect{AdhocPlanQuery: apq} + apq.ctx.Fields = append(apq.ctx.Fields, fields...) + sbuild := &AdhocPlanSelect{AdhocPlanQuery: apq} + sbuild.label = adhocplan.Label + sbuild.flds, sbuild.scan = &apq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a AdhocPlanSelect configured with the given aggregations. +func (apq *AdhocPlanQuery) Aggregate(fns ...AggregateFunc) *AdhocPlanSelect { + return apq.Select().Aggregate(fns...) } func (apq *AdhocPlanQuery) prepareQuery(ctx context.Context) error { - for _, f := range apq.fields { + for _, inter := range apq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, apq); err != nil { + return err + } + } + } + for _, f := range apq.ctx.Fields { if !adhocplan.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -467,7 +492,7 @@ func (apq *AdhocPlanQuery) prepareQuery(ctx context.Context) error { return nil } -func (apq *AdhocPlanQuery) sqlAll(ctx context.Context) ([]*AdhocPlan, error) { +func (apq *AdhocPlanQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*AdhocPlan, error) { var ( nodes = []*AdhocPlan{} withFKs = apq.withFKs @@ -486,279 +511,317 @@ func (apq *AdhocPlanQuery) sqlAll(ctx context.Context) ([]*AdhocPlan, error) { if withFKs { _spec.Node.Columns = append(_spec.Node.Columns, adhocplan.ForeignKeys...) } - _spec.ScanValues = func(columns []string) ([]interface{}, error) { + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*AdhocPlan).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { node := &AdhocPlan{config: apq.config} nodes = append(nodes, node) - return node.scanValues(columns) - } - _spec.Assign = func(columns []string, values []interface{}) error { - if len(nodes) == 0 { - return fmt.Errorf("ent: Assign called without calling ScanValues") - } - node := nodes[len(nodes)-1] node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(apq.modifiers) > 0 { + _spec.Modifiers = apq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } if err := sqlgraph.QueryNodes(ctx, apq.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := apq.withPrevAdhocPlan; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - ids := make(map[uuid.UUID]*AdhocPlan, len(nodes)) - for _, node := range nodes { - ids[node.ID] = node - fks = append(fks, node.ID) - node.Edges.PrevAdhocPlan = []*AdhocPlan{} - } - var ( - edgeids []uuid.UUID - edges = make(map[uuid.UUID][]*AdhocPlan) - ) - _spec := &sqlgraph.EdgeQuerySpec{ - Edge: &sqlgraph.EdgeSpec{ - Inverse: true, - Table: adhocplan.PrevAdhocPlanTable, - Columns: adhocplan.PrevAdhocPlanPrimaryKey, - }, - Predicate: func(s *sql.Selector) { - s.Where(sql.InValues(adhocplan.PrevAdhocPlanPrimaryKey[1], fks...)) - }, - ScanValues: func() [2]interface{} { - return [2]interface{}{new(uuid.UUID), new(uuid.UUID)} - }, - Assign: func(out, in interface{}) error { - eout, ok := out.(*uuid.UUID) - if !ok || eout == nil { - return fmt.Errorf("unexpected id value for edge-out") - } - ein, ok := in.(*uuid.UUID) - if !ok || ein == nil { - return fmt.Errorf("unexpected id value for edge-in") - } - outValue := *eout - inValue := *ein - node, ok := ids[outValue] - if !ok { - return fmt.Errorf("unexpected node id in edges: %v", outValue) - } - if _, ok := edges[inValue]; !ok { - edgeids = append(edgeids, inValue) - } - edges[inValue] = append(edges[inValue], node) - return nil - }, - } - if err := sqlgraph.QueryEdges(ctx, apq.driver, _spec); err != nil { - return nil, fmt.Errorf(`query edges "PrevAdhocPlan": %w`, err) + if err := apq.loadPrevAdhocPlan(ctx, query, nodes, + func(n *AdhocPlan) { n.Edges.PrevAdhocPlan = []*AdhocPlan{} }, + func(n *AdhocPlan, e *AdhocPlan) { n.Edges.PrevAdhocPlan = append(n.Edges.PrevAdhocPlan, e) }); err != nil { + return nil, err } - query.Where(adhocplan.IDIn(edgeids...)) - neighbors, err := query.All(ctx) - if err != nil { + } + if query := apq.withNextAdhocPlan; query != nil { + if err := apq.loadNextAdhocPlan(ctx, query, nodes, + func(n *AdhocPlan) { n.Edges.NextAdhocPlan = []*AdhocPlan{} }, + func(n *AdhocPlan, e *AdhocPlan) { n.Edges.NextAdhocPlan = append(n.Edges.NextAdhocPlan, e) }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := edges[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected "PrevAdhocPlan" node returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.PrevAdhocPlan = append(nodes[i].Edges.PrevAdhocPlan, n) - } + } + if query := apq.withAdhocPlanToBuild; query != nil { + if err := apq.loadAdhocPlanToBuild(ctx, query, nodes, nil, + func(n *AdhocPlan, e *Build) { n.Edges.AdhocPlanToBuild = e }); err != nil { + return nil, err } } - - if query := apq.withNextAdhocPlan; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - ids := make(map[uuid.UUID]*AdhocPlan, len(nodes)) - for _, node := range nodes { - ids[node.ID] = node - fks = append(fks, node.ID) - node.Edges.NextAdhocPlan = []*AdhocPlan{} + if query := apq.withAdhocPlanToStatus; query != nil { + if err := apq.loadAdhocPlanToStatus(ctx, query, nodes, nil, + func(n *AdhocPlan, e *Status) { n.Edges.AdhocPlanToStatus = e }); err != nil { + return nil, err } - var ( - edgeids []uuid.UUID - edges = make(map[uuid.UUID][]*AdhocPlan) - ) - _spec := &sqlgraph.EdgeQuerySpec{ - Edge: &sqlgraph.EdgeSpec{ - Inverse: false, - Table: adhocplan.NextAdhocPlanTable, - Columns: adhocplan.NextAdhocPlanPrimaryKey, - }, - Predicate: func(s *sql.Selector) { - s.Where(sql.InValues(adhocplan.NextAdhocPlanPrimaryKey[0], fks...)) - }, - ScanValues: func() [2]interface{} { - return [2]interface{}{new(uuid.UUID), new(uuid.UUID)} - }, - Assign: func(out, in interface{}) error { - eout, ok := out.(*uuid.UUID) - if !ok || eout == nil { - return fmt.Errorf("unexpected id value for edge-out") - } - ein, ok := in.(*uuid.UUID) - if !ok || ein == nil { - return fmt.Errorf("unexpected id value for edge-in") - } - outValue := *eout - inValue := *ein - node, ok := ids[outValue] - if !ok { - return fmt.Errorf("unexpected node id in edges: %v", outValue) - } - if _, ok := edges[inValue]; !ok { - edgeids = append(edgeids, inValue) - } - edges[inValue] = append(edges[inValue], node) - return nil - }, + } + if query := apq.withAdhocPlanToAgentTask; query != nil { + if err := apq.loadAdhocPlanToAgentTask(ctx, query, nodes, nil, + func(n *AdhocPlan, e *AgentTask) { n.Edges.AdhocPlanToAgentTask = e }); err != nil { + return nil, err } - if err := sqlgraph.QueryEdges(ctx, apq.driver, _spec); err != nil { - return nil, fmt.Errorf(`query edges "NextAdhocPlan": %w`, err) + } + for name, query := range apq.withNamedPrevAdhocPlan { + if err := apq.loadPrevAdhocPlan(ctx, query, nodes, + func(n *AdhocPlan) { n.appendNamedPrevAdhocPlan(name) }, + func(n *AdhocPlan, e *AdhocPlan) { n.appendNamedPrevAdhocPlan(name, e) }); err != nil { + return nil, err } - query.Where(adhocplan.IDIn(edgeids...)) - neighbors, err := query.All(ctx) - if err != nil { + } + for name, query := range apq.withNamedNextAdhocPlan { + if err := apq.loadNextAdhocPlan(ctx, query, nodes, + func(n *AdhocPlan) { n.appendNamedNextAdhocPlan(name) }, + func(n *AdhocPlan, e *AdhocPlan) { n.appendNamedNextAdhocPlan(name, e) }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := edges[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected "NextAdhocPlan" node returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.NextAdhocPlan = append(nodes[i].Edges.NextAdhocPlan, n) - } + } + for i := range apq.loadTotal { + if err := apq.loadTotal[i](ctx, nodes); err != nil { + return nil, err } } + return nodes, nil +} - if query := apq.withAdhocPlanToBuild; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*AdhocPlan) - for i := range nodes { - if nodes[i].adhoc_plan_adhoc_plan_to_build == nil { - continue +func (apq *AdhocPlanQuery) loadPrevAdhocPlan(ctx context.Context, query *AdhocPlanQuery, nodes []*AdhocPlan, init func(*AdhocPlan), assign func(*AdhocPlan, *AdhocPlan)) error { + edgeIDs := make([]driver.Value, len(nodes)) + byID := make(map[uuid.UUID]*AdhocPlan) + nids := make(map[uuid.UUID]map[*AdhocPlan]struct{}) + for i, node := range nodes { + edgeIDs[i] = node.ID + byID[node.ID] = node + if init != nil { + init(node) + } + } + query.Where(func(s *sql.Selector) { + joinT := sql.Table(adhocplan.PrevAdhocPlanTable) + s.Join(joinT).On(s.C(adhocplan.FieldID), joinT.C(adhocplan.PrevAdhocPlanPrimaryKey[0])) + s.Where(sql.InValues(joinT.C(adhocplan.PrevAdhocPlanPrimaryKey[1]), edgeIDs...)) + columns := s.SelectedColumns() + s.Select(joinT.C(adhocplan.PrevAdhocPlanPrimaryKey[1])) + s.AppendSelect(columns...) + s.SetDistinct(false) + }) + if err := query.prepareQuery(ctx); err != nil { + return err + } + qr := QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + return query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) { + assign := spec.Assign + values := spec.ScanValues + spec.ScanValues = func(columns []string) ([]any, error) { + values, err := values(columns[1:]) + if err != nil { + return nil, err + } + return append([]any{new(uuid.UUID)}, values...), nil } - fk := *nodes[i].adhoc_plan_adhoc_plan_to_build - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) + spec.Assign = func(columns []string, values []any) error { + outValue := *values[0].(*uuid.UUID) + inValue := *values[1].(*uuid.UUID) + if nids[inValue] == nil { + nids[inValue] = map[*AdhocPlan]struct{}{byID[outValue]: {}} + return assign(columns[1:], values[1:]) + } + nids[inValue][byID[outValue]] = struct{}{} + return nil } - nodeids[fk] = append(nodeids[fk], nodes[i]) + }) + }) + neighbors, err := withInterceptors[[]*AdhocPlan](ctx, query, qr, query.inters) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nids[n.ID] + if !ok { + return fmt.Errorf(`unexpected "PrevAdhocPlan" node returned %v`, n.ID) } - query.Where(build.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + for kn := range nodes { + assign(kn, n) } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "adhoc_plan_adhoc_plan_to_build" returned %v`, n.ID) + } + return nil +} +func (apq *AdhocPlanQuery) loadNextAdhocPlan(ctx context.Context, query *AdhocPlanQuery, nodes []*AdhocPlan, init func(*AdhocPlan), assign func(*AdhocPlan, *AdhocPlan)) error { + edgeIDs := make([]driver.Value, len(nodes)) + byID := make(map[uuid.UUID]*AdhocPlan) + nids := make(map[uuid.UUID]map[*AdhocPlan]struct{}) + for i, node := range nodes { + edgeIDs[i] = node.ID + byID[node.ID] = node + if init != nil { + init(node) + } + } + query.Where(func(s *sql.Selector) { + joinT := sql.Table(adhocplan.NextAdhocPlanTable) + s.Join(joinT).On(s.C(adhocplan.FieldID), joinT.C(adhocplan.NextAdhocPlanPrimaryKey[1])) + s.Where(sql.InValues(joinT.C(adhocplan.NextAdhocPlanPrimaryKey[0]), edgeIDs...)) + columns := s.SelectedColumns() + s.Select(joinT.C(adhocplan.NextAdhocPlanPrimaryKey[0])) + s.AppendSelect(columns...) + s.SetDistinct(false) + }) + if err := query.prepareQuery(ctx); err != nil { + return err + } + qr := QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + return query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) { + assign := spec.Assign + values := spec.ScanValues + spec.ScanValues = func(columns []string) ([]any, error) { + values, err := values(columns[1:]) + if err != nil { + return nil, err + } + return append([]any{new(uuid.UUID)}, values...), nil } - for i := range nodes { - nodes[i].Edges.AdhocPlanToBuild = n + spec.Assign = func(columns []string, values []any) error { + outValue := *values[0].(*uuid.UUID) + inValue := *values[1].(*uuid.UUID) + if nids[inValue] == nil { + nids[inValue] = map[*AdhocPlan]struct{}{byID[outValue]: {}} + return assign(columns[1:], values[1:]) + } + nids[inValue][byID[outValue]] = struct{}{} + return nil } + }) + }) + neighbors, err := withInterceptors[[]*AdhocPlan](ctx, query, qr, query.inters) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nids[n.ID] + if !ok { + return fmt.Errorf(`unexpected "NextAdhocPlan" node returned %v`, n.ID) + } + for kn := range nodes { + assign(kn, n) } } - - if query := apq.withAdhocPlanToStatus; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*AdhocPlan) + return nil +} +func (apq *AdhocPlanQuery) loadAdhocPlanToBuild(ctx context.Context, query *BuildQuery, nodes []*AdhocPlan, init func(*AdhocPlan), assign func(*AdhocPlan, *Build)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*AdhocPlan) + for i := range nodes { + if nodes[i].adhoc_plan_adhoc_plan_to_build == nil { + continue + } + fk := *nodes[i].adhoc_plan_adhoc_plan_to_build + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(build.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "adhoc_plan_adhoc_plan_to_build" returned %v`, n.ID) + } for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] + assign(nodes[i], n) } - query.withFKs = true - query.Where(predicate.Status(func(s *sql.Selector) { - s.Where(sql.InValues(adhocplan.AdhocPlanToStatusColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + } + return nil +} +func (apq *AdhocPlanQuery) loadAdhocPlanToStatus(ctx context.Context, query *StatusQuery, nodes []*AdhocPlan, init func(*AdhocPlan), assign func(*AdhocPlan, *Status)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*AdhocPlan) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + } + query.withFKs = true + query.Where(predicate.Status(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(adhocplan.AdhocPlanToStatusColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.adhoc_plan_adhoc_plan_to_status + if fk == nil { + return fmt.Errorf(`foreign-key "adhoc_plan_adhoc_plan_to_status" is nil for node %v`, n.ID) } - for _, n := range neighbors { - fk := n.adhoc_plan_adhoc_plan_to_status - if fk == nil { - return nil, fmt.Errorf(`foreign-key "adhoc_plan_adhoc_plan_to_status" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "adhoc_plan_adhoc_plan_to_status" returned %v for node %v`, *fk, n.ID) - } - node.Edges.AdhocPlanToStatus = n + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "adhoc_plan_adhoc_plan_to_status" returned %v for node %v`, *fk, n.ID) } + assign(node, n) } - - if query := apq.withAdhocPlanToAgentTask; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*AdhocPlan) - for i := range nodes { - if nodes[i].adhoc_plan_adhoc_plan_to_agent_task == nil { - continue - } - fk := *nodes[i].adhoc_plan_adhoc_plan_to_agent_task - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + return nil +} +func (apq *AdhocPlanQuery) loadAdhocPlanToAgentTask(ctx context.Context, query *AgentTaskQuery, nodes []*AdhocPlan, init func(*AdhocPlan), assign func(*AdhocPlan, *AgentTask)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*AdhocPlan) + for i := range nodes { + if nodes[i].adhoc_plan_adhoc_plan_to_agent_task == nil { + continue } - query.Where(agenttask.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + fk := *nodes[i].adhoc_plan_adhoc_plan_to_agent_task + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "adhoc_plan_adhoc_plan_to_agent_task" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.AdhocPlanToAgentTask = n - } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(agenttask.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "adhoc_plan_adhoc_plan_to_agent_task" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) } } - - return nodes, nil + return nil } func (apq *AdhocPlanQuery) sqlCount(ctx context.Context) (int, error) { _spec := apq.querySpec() - _spec.Node.Columns = apq.fields - if len(apq.fields) > 0 { - _spec.Unique = apq.unique != nil && *apq.unique + if len(apq.modifiers) > 0 { + _spec.Modifiers = apq.modifiers } - return sqlgraph.CountNodes(ctx, apq.driver, _spec) -} - -func (apq *AdhocPlanQuery) sqlExist(ctx context.Context) (bool, error) { - n, err := apq.sqlCount(ctx) - if err != nil { - return false, fmt.Errorf("ent: check existence: %w", err) + _spec.Node.Columns = apq.ctx.Fields + if len(apq.ctx.Fields) > 0 { + _spec.Unique = apq.ctx.Unique != nil && *apq.ctx.Unique } - return n > 0, nil + return sqlgraph.CountNodes(ctx, apq.driver, _spec) } func (apq *AdhocPlanQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: adhocplan.Table, - Columns: adhocplan.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: adhocplan.FieldID, - }, - }, - From: apq.sql, - Unique: true, - } - if unique := apq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(adhocplan.Table, adhocplan.Columns, sqlgraph.NewFieldSpec(adhocplan.FieldID, field.TypeUUID)) + _spec.From = apq.sql + if unique := apq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if apq.path != nil { + _spec.Unique = true } - if fields := apq.fields; len(fields) > 0 { + if fields := apq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, adhocplan.FieldID) for i := range fields { @@ -774,10 +837,10 @@ func (apq *AdhocPlanQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := apq.limit; limit != nil { + if limit := apq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := apq.offset; offset != nil { + if offset := apq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := apq.order; len(ps) > 0 { @@ -793,7 +856,7 @@ func (apq *AdhocPlanQuery) querySpec() *sqlgraph.QuerySpec { func (apq *AdhocPlanQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(apq.driver.Dialect()) t1 := builder.Table(adhocplan.Table) - columns := apq.fields + columns := apq.ctx.Fields if len(columns) == 0 { columns = adhocplan.Columns } @@ -802,7 +865,7 @@ func (apq *AdhocPlanQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = apq.sql selector.Select(selector.Columns(columns...)...) } - if apq.unique != nil && *apq.unique { + if apq.ctx.Unique != nil && *apq.ctx.Unique { selector.Distinct() } for _, p := range apq.predicates { @@ -811,498 +874,128 @@ func (apq *AdhocPlanQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range apq.order { p(selector) } - if offset := apq.offset; offset != nil { + if offset := apq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := apq.limit; limit != nil { + if limit := apq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector } -// AdhocPlanGroupBy is the group-by builder for AdhocPlan entities. -type AdhocPlanGroupBy struct { - config - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) -} - -// Aggregate adds the given aggregation functions to the group-by query. -func (apgb *AdhocPlanGroupBy) Aggregate(fns ...AggregateFunc) *AdhocPlanGroupBy { - apgb.fns = append(apgb.fns, fns...) - return apgb -} - -// Scan applies the group-by query and scans the result into the given value. -func (apgb *AdhocPlanGroupBy) Scan(ctx context.Context, v interface{}) error { - query, err := apgb.path(ctx) - if err != nil { - return err - } - apgb.sql = query - return apgb.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (apgb *AdhocPlanGroupBy) ScanX(ctx context.Context, v interface{}) { - if err := apgb.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from group-by. -// It is only allowed when executing a group-by query with one field. -func (apgb *AdhocPlanGroupBy) Strings(ctx context.Context) ([]string, error) { - if len(apgb.fields) > 1 { - return nil, errors.New("ent: AdhocPlanGroupBy.Strings is not achievable when grouping more than 1 field") - } - var v []string - if err := apgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (apgb *AdhocPlanGroupBy) StringsX(ctx context.Context) []string { - v, err := apgb.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (apgb *AdhocPlanGroupBy) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = apgb.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{adhocplan.Label} - default: - err = fmt.Errorf("ent: AdhocPlanGroupBy.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (apgb *AdhocPlanGroupBy) StringX(ctx context.Context) string { - v, err := apgb.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from group-by. -// It is only allowed when executing a group-by query with one field. -func (apgb *AdhocPlanGroupBy) Ints(ctx context.Context) ([]int, error) { - if len(apgb.fields) > 1 { - return nil, errors.New("ent: AdhocPlanGroupBy.Ints is not achievable when grouping more than 1 field") - } - var v []int - if err := apgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (apgb *AdhocPlanGroupBy) IntsX(ctx context.Context) []int { - v, err := apgb.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (apgb *AdhocPlanGroupBy) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = apgb.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{adhocplan.Label} - default: - err = fmt.Errorf("ent: AdhocPlanGroupBy.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (apgb *AdhocPlanGroupBy) IntX(ctx context.Context) int { - v, err := apgb.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from group-by. -// It is only allowed when executing a group-by query with one field. -func (apgb *AdhocPlanGroupBy) Float64s(ctx context.Context) ([]float64, error) { - if len(apgb.fields) > 1 { - return nil, errors.New("ent: AdhocPlanGroupBy.Float64s is not achievable when grouping more than 1 field") - } - var v []float64 - if err := apgb.Scan(ctx, &v); err != nil { - return nil, err +// WithNamedPrevAdhocPlan tells the query-builder to eager-load the nodes that are connected to the "PrevAdhocPlan" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (apq *AdhocPlanQuery) WithNamedPrevAdhocPlan(name string, opts ...func(*AdhocPlanQuery)) *AdhocPlanQuery { + query := (&AdhocPlanClient{config: apq.config}).Query() + for _, opt := range opts { + opt(query) } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (apgb *AdhocPlanGroupBy) Float64sX(ctx context.Context) []float64 { - v, err := apgb.Float64s(ctx) - if err != nil { - panic(err) + if apq.withNamedPrevAdhocPlan == nil { + apq.withNamedPrevAdhocPlan = make(map[string]*AdhocPlanQuery) } - return v + apq.withNamedPrevAdhocPlan[name] = query + return apq } -// Float64 returns a single float64 from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (apgb *AdhocPlanGroupBy) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = apgb.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{adhocplan.Label} - default: - err = fmt.Errorf("ent: AdhocPlanGroupBy.Float64s returned %d results when one was expected", len(v)) +// WithNamedNextAdhocPlan tells the query-builder to eager-load the nodes that are connected to the "NextAdhocPlan" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (apq *AdhocPlanQuery) WithNamedNextAdhocPlan(name string, opts ...func(*AdhocPlanQuery)) *AdhocPlanQuery { + query := (&AdhocPlanClient{config: apq.config}).Query() + for _, opt := range opts { + opt(query) } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (apgb *AdhocPlanGroupBy) Float64X(ctx context.Context) float64 { - v, err := apgb.Float64(ctx) - if err != nil { - panic(err) + if apq.withNamedNextAdhocPlan == nil { + apq.withNamedNextAdhocPlan = make(map[string]*AdhocPlanQuery) } - return v + apq.withNamedNextAdhocPlan[name] = query + return apq } -// Bools returns list of bools from group-by. -// It is only allowed when executing a group-by query with one field. -func (apgb *AdhocPlanGroupBy) Bools(ctx context.Context) ([]bool, error) { - if len(apgb.fields) > 1 { - return nil, errors.New("ent: AdhocPlanGroupBy.Bools is not achievable when grouping more than 1 field") - } - var v []bool - if err := apgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil +// AdhocPlanGroupBy is the group-by builder for AdhocPlan entities. +type AdhocPlanGroupBy struct { + selector + build *AdhocPlanQuery } -// BoolsX is like Bools, but panics if an error occurs. -func (apgb *AdhocPlanGroupBy) BoolsX(ctx context.Context) []bool { - v, err := apgb.Bools(ctx) - if err != nil { - panic(err) - } - return v +// Aggregate adds the given aggregation functions to the group-by query. +func (apgb *AdhocPlanGroupBy) Aggregate(fns ...AggregateFunc) *AdhocPlanGroupBy { + apgb.fns = append(apgb.fns, fns...) + return apgb } -// Bool returns a single bool from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (apgb *AdhocPlanGroupBy) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = apgb.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{adhocplan.Label} - default: - err = fmt.Errorf("ent: AdhocPlanGroupBy.Bools returned %d results when one was expected", len(v)) +// Scan applies the selector query and scans the result into the given value. +func (apgb *AdhocPlanGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, apgb.build.ctx, "GroupBy") + if err := apgb.build.prepareQuery(ctx); err != nil { + return err } - return + return scanWithInterceptors[*AdhocPlanQuery, *AdhocPlanGroupBy](ctx, apgb.build, apgb, apgb.build.inters, v) } -// BoolX is like Bool, but panics if an error occurs. -func (apgb *AdhocPlanGroupBy) BoolX(ctx context.Context) bool { - v, err := apgb.Bool(ctx) - if err != nil { - panic(err) +func (apgb *AdhocPlanGroupBy) sqlScan(ctx context.Context, root *AdhocPlanQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(apgb.fns)) + for _, fn := range apgb.fns { + aggregation = append(aggregation, fn(selector)) } - return v -} - -func (apgb *AdhocPlanGroupBy) sqlScan(ctx context.Context, v interface{}) error { - for _, f := range apgb.fields { - if !adhocplan.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*apgb.flds)+len(apgb.fns)) + for _, f := range *apgb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := apgb.sqlQuery() + selector.GroupBy(selector.Columns(*apgb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := apgb.driver.Query(ctx, query, args, rows); err != nil { + if err := apgb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (apgb *AdhocPlanGroupBy) sqlQuery() *sql.Selector { - selector := apgb.sql.Select() - aggregation := make([]string, 0, len(apgb.fns)) - for _, fn := range apgb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(apgb.fields)+len(apgb.fns)) - for _, f := range apgb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(apgb.fields...)...) -} - // AdhocPlanSelect is the builder for selecting fields of AdhocPlan entities. type AdhocPlanSelect struct { *AdhocPlanQuery - // intermediate query (i.e. traversal path). - sql *sql.Selector + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (aps *AdhocPlanSelect) Aggregate(fns ...AggregateFunc) *AdhocPlanSelect { + aps.fns = append(aps.fns, fns...) + return aps } // Scan applies the selector query and scans the result into the given value. -func (aps *AdhocPlanSelect) Scan(ctx context.Context, v interface{}) error { +func (aps *AdhocPlanSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, aps.ctx, "Select") if err := aps.prepareQuery(ctx); err != nil { return err } - aps.sql = aps.AdhocPlanQuery.sqlQuery(ctx) - return aps.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (aps *AdhocPlanSelect) ScanX(ctx context.Context, v interface{}) { - if err := aps.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from a selector. It is only allowed when selecting one field. -func (aps *AdhocPlanSelect) Strings(ctx context.Context) ([]string, error) { - if len(aps.fields) > 1 { - return nil, errors.New("ent: AdhocPlanSelect.Strings is not achievable when selecting more than 1 field") - } - var v []string - if err := aps.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil + return scanWithInterceptors[*AdhocPlanQuery, *AdhocPlanSelect](ctx, aps.AdhocPlanQuery, aps, aps.inters, v) } -// StringsX is like Strings, but panics if an error occurs. -func (aps *AdhocPlanSelect) StringsX(ctx context.Context) []string { - v, err := aps.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a selector. It is only allowed when selecting one field. -func (aps *AdhocPlanSelect) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = aps.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{adhocplan.Label} - default: - err = fmt.Errorf("ent: AdhocPlanSelect.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (aps *AdhocPlanSelect) StringX(ctx context.Context) string { - v, err := aps.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from a selector. It is only allowed when selecting one field. -func (aps *AdhocPlanSelect) Ints(ctx context.Context) ([]int, error) { - if len(aps.fields) > 1 { - return nil, errors.New("ent: AdhocPlanSelect.Ints is not achievable when selecting more than 1 field") - } - var v []int - if err := aps.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (aps *AdhocPlanSelect) IntsX(ctx context.Context) []int { - v, err := aps.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a selector. It is only allowed when selecting one field. -func (aps *AdhocPlanSelect) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = aps.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{adhocplan.Label} - default: - err = fmt.Errorf("ent: AdhocPlanSelect.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (aps *AdhocPlanSelect) IntX(ctx context.Context) int { - v, err := aps.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. -func (aps *AdhocPlanSelect) Float64s(ctx context.Context) ([]float64, error) { - if len(aps.fields) > 1 { - return nil, errors.New("ent: AdhocPlanSelect.Float64s is not achievable when selecting more than 1 field") - } - var v []float64 - if err := aps.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (aps *AdhocPlanSelect) Float64sX(ctx context.Context) []float64 { - v, err := aps.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. -func (aps *AdhocPlanSelect) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = aps.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{adhocplan.Label} - default: - err = fmt.Errorf("ent: AdhocPlanSelect.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (aps *AdhocPlanSelect) Float64X(ctx context.Context) float64 { - v, err := aps.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from a selector. It is only allowed when selecting one field. -func (aps *AdhocPlanSelect) Bools(ctx context.Context) ([]bool, error) { - if len(aps.fields) > 1 { - return nil, errors.New("ent: AdhocPlanSelect.Bools is not achievable when selecting more than 1 field") - } - var v []bool - if err := aps.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (aps *AdhocPlanSelect) BoolsX(ctx context.Context) []bool { - v, err := aps.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a selector. It is only allowed when selecting one field. -func (aps *AdhocPlanSelect) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = aps.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{adhocplan.Label} - default: - err = fmt.Errorf("ent: AdhocPlanSelect.Bools returned %d results when one was expected", len(v)) +func (aps *AdhocPlanSelect) sqlScan(ctx context.Context, root *AdhocPlanQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(aps.fns)) + for _, fn := range aps.fns { + aggregation = append(aggregation, fn(selector)) } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (aps *AdhocPlanSelect) BoolX(ctx context.Context) bool { - v, err := aps.Bool(ctx) - if err != nil { - panic(err) + switch n := len(*aps.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) } - return v -} - -func (aps *AdhocPlanSelect) sqlScan(ctx context.Context, v interface{}) error { rows := &sql.Rows{} - query, args := aps.sql.Query() + query, args := selector.Query() if err := aps.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/ent/adhocplan_update.go b/ent/adhocplan_update.go index eee1c7c4..1d43fdb4 100755 --- a/ent/adhocplan_update.go +++ b/ent/adhocplan_update.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -161,40 +161,7 @@ func (apu *AdhocPlanUpdate) ClearAdhocPlanToAgentTask() *AdhocPlanUpdate { // Save executes the query and returns the number of nodes affected by the update operation. func (apu *AdhocPlanUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(apu.hooks) == 0 { - if err = apu.check(); err != nil { - return 0, err - } - affected, err = apu.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*AdhocPlanMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = apu.check(); err != nil { - return 0, err - } - apu.mutation = mutation - affected, err = apu.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(apu.hooks) - 1; i >= 0; i-- { - if apu.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = apu.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, apu.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, apu.sqlSave, apu.mutation, apu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -234,16 +201,10 @@ func (apu *AdhocPlanUpdate) check() error { } func (apu *AdhocPlanUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: adhocplan.Table, - Columns: adhocplan.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: adhocplan.FieldID, - }, - }, + if err := apu.check(); err != nil { + return n, err } + _spec := sqlgraph.NewUpdateSpec(adhocplan.Table, adhocplan.Columns, sqlgraph.NewFieldSpec(adhocplan.FieldID, field.TypeUUID)) if ps := apu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -259,10 +220,7 @@ func (apu *AdhocPlanUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: adhocplan.PrevAdhocPlanPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: adhocplan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(adhocplan.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -275,10 +233,7 @@ func (apu *AdhocPlanUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: adhocplan.PrevAdhocPlanPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: adhocplan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(adhocplan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -294,10 +249,7 @@ func (apu *AdhocPlanUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: adhocplan.PrevAdhocPlanPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: adhocplan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(adhocplan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -313,10 +265,7 @@ func (apu *AdhocPlanUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: adhocplan.NextAdhocPlanPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: adhocplan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(adhocplan.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -329,10 +278,7 @@ func (apu *AdhocPlanUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: adhocplan.NextAdhocPlanPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: adhocplan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(adhocplan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -348,10 +294,7 @@ func (apu *AdhocPlanUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: adhocplan.NextAdhocPlanPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: adhocplan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(adhocplan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -367,10 +310,7 @@ func (apu *AdhocPlanUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{adhocplan.AdhocPlanToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -383,10 +323,7 @@ func (apu *AdhocPlanUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{adhocplan.AdhocPlanToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -402,10 +339,7 @@ func (apu *AdhocPlanUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{adhocplan.AdhocPlanToStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -418,10 +352,7 @@ func (apu *AdhocPlanUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{adhocplan.AdhocPlanToStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -437,10 +368,7 @@ func (apu *AdhocPlanUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{adhocplan.AdhocPlanToAgentTaskColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: agenttask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(agenttask.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -453,10 +381,7 @@ func (apu *AdhocPlanUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{adhocplan.AdhocPlanToAgentTaskColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: agenttask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(agenttask.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -468,10 +393,11 @@ func (apu *AdhocPlanUpdate) sqlSave(ctx context.Context) (n int, err error) { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{adhocplan.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return 0, err } + apu.mutation.done = true return n, nil } @@ -611,6 +537,12 @@ func (apuo *AdhocPlanUpdateOne) ClearAdhocPlanToAgentTask() *AdhocPlanUpdateOne return apuo } +// Where appends a list predicates to the AdhocPlanUpdate builder. +func (apuo *AdhocPlanUpdateOne) Where(ps ...predicate.AdhocPlan) *AdhocPlanUpdateOne { + apuo.mutation.Where(ps...) + return apuo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (apuo *AdhocPlanUpdateOne) Select(field string, fields ...string) *AdhocPlanUpdateOne { @@ -620,40 +552,7 @@ func (apuo *AdhocPlanUpdateOne) Select(field string, fields ...string) *AdhocPla // Save executes the query and returns the updated AdhocPlan entity. func (apuo *AdhocPlanUpdateOne) Save(ctx context.Context) (*AdhocPlan, error) { - var ( - err error - node *AdhocPlan - ) - if len(apuo.hooks) == 0 { - if err = apuo.check(); err != nil { - return nil, err - } - node, err = apuo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*AdhocPlanMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = apuo.check(); err != nil { - return nil, err - } - apuo.mutation = mutation - node, err = apuo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(apuo.hooks) - 1; i >= 0; i-- { - if apuo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = apuo.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, apuo.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, apuo.sqlSave, apuo.mutation, apuo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -693,16 +592,10 @@ func (apuo *AdhocPlanUpdateOne) check() error { } func (apuo *AdhocPlanUpdateOne) sqlSave(ctx context.Context) (_node *AdhocPlan, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: adhocplan.Table, - Columns: adhocplan.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: adhocplan.FieldID, - }, - }, + if err := apuo.check(); err != nil { + return _node, err } + _spec := sqlgraph.NewUpdateSpec(adhocplan.Table, adhocplan.Columns, sqlgraph.NewFieldSpec(adhocplan.FieldID, field.TypeUUID)) id, ok := apuo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "AdhocPlan.id" for update`)} @@ -735,10 +628,7 @@ func (apuo *AdhocPlanUpdateOne) sqlSave(ctx context.Context) (_node *AdhocPlan, Columns: adhocplan.PrevAdhocPlanPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: adhocplan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(adhocplan.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -751,10 +641,7 @@ func (apuo *AdhocPlanUpdateOne) sqlSave(ctx context.Context) (_node *AdhocPlan, Columns: adhocplan.PrevAdhocPlanPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: adhocplan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(adhocplan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -770,10 +657,7 @@ func (apuo *AdhocPlanUpdateOne) sqlSave(ctx context.Context) (_node *AdhocPlan, Columns: adhocplan.PrevAdhocPlanPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: adhocplan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(adhocplan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -789,10 +673,7 @@ func (apuo *AdhocPlanUpdateOne) sqlSave(ctx context.Context) (_node *AdhocPlan, Columns: adhocplan.NextAdhocPlanPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: adhocplan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(adhocplan.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -805,10 +686,7 @@ func (apuo *AdhocPlanUpdateOne) sqlSave(ctx context.Context) (_node *AdhocPlan, Columns: adhocplan.NextAdhocPlanPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: adhocplan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(adhocplan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -824,10 +702,7 @@ func (apuo *AdhocPlanUpdateOne) sqlSave(ctx context.Context) (_node *AdhocPlan, Columns: adhocplan.NextAdhocPlanPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: adhocplan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(adhocplan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -843,10 +718,7 @@ func (apuo *AdhocPlanUpdateOne) sqlSave(ctx context.Context) (_node *AdhocPlan, Columns: []string{adhocplan.AdhocPlanToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -859,10 +731,7 @@ func (apuo *AdhocPlanUpdateOne) sqlSave(ctx context.Context) (_node *AdhocPlan, Columns: []string{adhocplan.AdhocPlanToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -878,10 +747,7 @@ func (apuo *AdhocPlanUpdateOne) sqlSave(ctx context.Context) (_node *AdhocPlan, Columns: []string{adhocplan.AdhocPlanToStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -894,10 +760,7 @@ func (apuo *AdhocPlanUpdateOne) sqlSave(ctx context.Context) (_node *AdhocPlan, Columns: []string{adhocplan.AdhocPlanToStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -913,10 +776,7 @@ func (apuo *AdhocPlanUpdateOne) sqlSave(ctx context.Context) (_node *AdhocPlan, Columns: []string{adhocplan.AdhocPlanToAgentTaskColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: agenttask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(agenttask.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -929,10 +789,7 @@ func (apuo *AdhocPlanUpdateOne) sqlSave(ctx context.Context) (_node *AdhocPlan, Columns: []string{adhocplan.AdhocPlanToAgentTaskColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: agenttask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(agenttask.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -947,9 +804,10 @@ func (apuo *AdhocPlanUpdateOne) sqlSave(ctx context.Context) (_node *AdhocPlan, if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{adhocplan.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } + apuo.mutation.done = true return _node, nil } diff --git a/ent/agentstatus.go b/ent/agentstatus.go index a646c9e9..f49f44ea 100755 --- a/ent/agentstatus.go +++ b/ent/agentstatus.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -6,6 +6,7 @@ import ( "fmt" "strings" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/gen0cide/laforge/ent/agentstatus" "github.com/gen0cide/laforge/ent/build" @@ -51,6 +52,7 @@ type AgentStatus struct { // The values are being populated by the AgentStatusQuery when eager-loading is set. Edges AgentStatusEdges `json:"edges"` + // vvvvvvvvvvvv CUSTOM vvvvvvvvvvvv // Edges put into the main struct to be loaded via hcl // AgentStatusToProvisionedHost holds the value of the AgentStatusToProvisionedHost edge. HCLAgentStatusToProvisionedHost *ProvisionedHost `json:"AgentStatusToProvisionedHost,omitempty"` @@ -58,10 +60,11 @@ type AgentStatus struct { HCLAgentStatusToProvisionedNetwork *ProvisionedNetwork `json:"AgentStatusToProvisionedNetwork,omitempty"` // AgentStatusToBuild holds the value of the AgentStatusToBuild edge. HCLAgentStatusToBuild *Build `json:"AgentStatusToBuild,omitempty"` - // + // ^^^^^^^^^^^^ CUSTOM ^^^^^^^^^^^^^ agent_status_agent_status_to_provisioned_host *uuid.UUID agent_status_agent_status_to_provisioned_network *uuid.UUID agent_status_agent_status_to_build *uuid.UUID + selectValues sql.SelectValues } // AgentStatusEdges holds the relations/edges for other nodes in the graph. @@ -75,6 +78,8 @@ type AgentStatusEdges struct { // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. loadedTypes [3]bool + // totalCount holds the count of the edges above. + totalCount [3]map[string]int } // AgentStatusToProvisionedHostOrErr returns the AgentStatusToProvisionedHost value or an error if the edge @@ -82,8 +87,7 @@ type AgentStatusEdges struct { func (e AgentStatusEdges) AgentStatusToProvisionedHostOrErr() (*ProvisionedHost, error) { if e.loadedTypes[0] { if e.AgentStatusToProvisionedHost == nil { - // The edge AgentStatusToProvisionedHost was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: provisionedhost.Label} } return e.AgentStatusToProvisionedHost, nil @@ -96,8 +100,7 @@ func (e AgentStatusEdges) AgentStatusToProvisionedHostOrErr() (*ProvisionedHost, func (e AgentStatusEdges) AgentStatusToProvisionedNetworkOrErr() (*ProvisionedNetwork, error) { if e.loadedTypes[1] { if e.AgentStatusToProvisionedNetwork == nil { - // The edge AgentStatusToProvisionedNetwork was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: provisionednetwork.Label} } return e.AgentStatusToProvisionedNetwork, nil @@ -110,8 +113,7 @@ func (e AgentStatusEdges) AgentStatusToProvisionedNetworkOrErr() (*ProvisionedNe func (e AgentStatusEdges) AgentStatusToBuildOrErr() (*Build, error) { if e.loadedTypes[2] { if e.AgentStatusToBuild == nil { - // The edge AgentStatusToBuild was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: build.Label} } return e.AgentStatusToBuild, nil @@ -120,8 +122,8 @@ func (e AgentStatusEdges) AgentStatusToBuildOrErr() (*Build, error) { } // scanValues returns the types for scanning values from sql.Rows. -func (*AgentStatus) scanValues(columns []string) ([]interface{}, error) { - values := make([]interface{}, len(columns)) +func (*AgentStatus) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) for i := range columns { switch columns[i] { case agentstatus.FieldLoad1, agentstatus.FieldLoad5, agentstatus.FieldLoad15: @@ -139,7 +141,7 @@ func (*AgentStatus) scanValues(columns []string) ([]interface{}, error) { case agentstatus.ForeignKeys[2]: // agent_status_agent_status_to_build values[i] = &sql.NullScanner{S: new(uuid.UUID)} default: - return nil, fmt.Errorf("unexpected column %q for type AgentStatus", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -147,7 +149,7 @@ func (*AgentStatus) scanValues(columns []string) ([]interface{}, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the AgentStatus fields. -func (as *AgentStatus) assignValues(columns []string, values []interface{}) error { +func (as *AgentStatus) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -264,41 +266,49 @@ func (as *AgentStatus) assignValues(columns []string, values []interface{}) erro as.agent_status_agent_status_to_build = new(uuid.UUID) *as.agent_status_agent_status_to_build = *value.S.(*uuid.UUID) } + default: + as.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the AgentStatus. +// This includes values selected through modifiers, order, etc. +func (as *AgentStatus) Value(name string) (ent.Value, error) { + return as.selectValues.Get(name) +} + // QueryAgentStatusToProvisionedHost queries the "AgentStatusToProvisionedHost" edge of the AgentStatus entity. func (as *AgentStatus) QueryAgentStatusToProvisionedHost() *ProvisionedHostQuery { - return (&AgentStatusClient{config: as.config}).QueryAgentStatusToProvisionedHost(as) + return NewAgentStatusClient(as.config).QueryAgentStatusToProvisionedHost(as) } // QueryAgentStatusToProvisionedNetwork queries the "AgentStatusToProvisionedNetwork" edge of the AgentStatus entity. func (as *AgentStatus) QueryAgentStatusToProvisionedNetwork() *ProvisionedNetworkQuery { - return (&AgentStatusClient{config: as.config}).QueryAgentStatusToProvisionedNetwork(as) + return NewAgentStatusClient(as.config).QueryAgentStatusToProvisionedNetwork(as) } // QueryAgentStatusToBuild queries the "AgentStatusToBuild" edge of the AgentStatus entity. func (as *AgentStatus) QueryAgentStatusToBuild() *BuildQuery { - return (&AgentStatusClient{config: as.config}).QueryAgentStatusToBuild(as) + return NewAgentStatusClient(as.config).QueryAgentStatusToBuild(as) } // Update returns a builder for updating this AgentStatus. // Note that you need to call AgentStatus.Unwrap() before calling this method if this AgentStatus // was returned from a transaction, and the transaction was committed or rolled back. func (as *AgentStatus) Update() *AgentStatusUpdateOne { - return (&AgentStatusClient{config: as.config}).UpdateOne(as) + return NewAgentStatusClient(as.config).UpdateOne(as) } // Unwrap unwraps the AgentStatus entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. func (as *AgentStatus) Unwrap() *AgentStatus { - tx, ok := as.config.driver.(*txDriver) + _tx, ok := as.config.driver.(*txDriver) if !ok { panic("ent: AgentStatus is not a transactional entity") } - as.config.driver = tx.drv + as.config.driver = _tx.drv return as } @@ -306,34 +316,47 @@ func (as *AgentStatus) Unwrap() *AgentStatus { func (as *AgentStatus) String() string { var builder strings.Builder builder.WriteString("AgentStatus(") - builder.WriteString(fmt.Sprintf("id=%v", as.ID)) - builder.WriteString(", ClientID=") + builder.WriteString(fmt.Sprintf("id=%v, ", as.ID)) + builder.WriteString("ClientID=") builder.WriteString(as.ClientID) - builder.WriteString(", Hostname=") + builder.WriteString(", ") + builder.WriteString("Hostname=") builder.WriteString(as.Hostname) - builder.WriteString(", UpTime=") + builder.WriteString(", ") + builder.WriteString("UpTime=") builder.WriteString(fmt.Sprintf("%v", as.UpTime)) - builder.WriteString(", BootTime=") + builder.WriteString(", ") + builder.WriteString("BootTime=") builder.WriteString(fmt.Sprintf("%v", as.BootTime)) - builder.WriteString(", NumProcs=") + builder.WriteString(", ") + builder.WriteString("NumProcs=") builder.WriteString(fmt.Sprintf("%v", as.NumProcs)) - builder.WriteString(", Os=") + builder.WriteString(", ") + builder.WriteString("Os=") builder.WriteString(as.Os) - builder.WriteString(", HostID=") + builder.WriteString(", ") + builder.WriteString("HostID=") builder.WriteString(as.HostID) - builder.WriteString(", Load1=") + builder.WriteString(", ") + builder.WriteString("Load1=") builder.WriteString(fmt.Sprintf("%v", as.Load1)) - builder.WriteString(", Load5=") + builder.WriteString(", ") + builder.WriteString("Load5=") builder.WriteString(fmt.Sprintf("%v", as.Load5)) - builder.WriteString(", Load15=") + builder.WriteString(", ") + builder.WriteString("Load15=") builder.WriteString(fmt.Sprintf("%v", as.Load15)) - builder.WriteString(", TotalMem=") + builder.WriteString(", ") + builder.WriteString("TotalMem=") builder.WriteString(fmt.Sprintf("%v", as.TotalMem)) - builder.WriteString(", FreeMem=") + builder.WriteString(", ") + builder.WriteString("FreeMem=") builder.WriteString(fmt.Sprintf("%v", as.FreeMem)) - builder.WriteString(", UsedMem=") + builder.WriteString(", ") + builder.WriteString("UsedMem=") builder.WriteString(fmt.Sprintf("%v", as.UsedMem)) - builder.WriteString(", Timestamp=") + builder.WriteString(", ") + builder.WriteString("Timestamp=") builder.WriteString(fmt.Sprintf("%v", as.Timestamp)) builder.WriteByte(')') return builder.String() @@ -341,9 +364,3 @@ func (as *AgentStatus) String() string { // AgentStatusSlice is a parsable slice of AgentStatus. type AgentStatusSlice []*AgentStatus - -func (as AgentStatusSlice) config(cfg config) { - for _i := range as { - as[_i].config = cfg - } -} diff --git a/ent/agentstatus/agentstatus.go b/ent/agentstatus/agentstatus.go index cbba8683..d0922e0a 100755 --- a/ent/agentstatus/agentstatus.go +++ b/ent/agentstatus/agentstatus.go @@ -1,8 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package agentstatus import ( + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -116,3 +118,123 @@ var ( // DefaultID holds the default value on creation for the "id" field. DefaultID func() uuid.UUID ) + +// OrderOption defines the ordering options for the AgentStatus queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByClientID orders the results by the ClientID field. +func ByClientID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldClientID, opts...).ToFunc() +} + +// ByHostname orders the results by the Hostname field. +func ByHostname(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldHostname, opts...).ToFunc() +} + +// ByUpTime orders the results by the UpTime field. +func ByUpTime(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpTime, opts...).ToFunc() +} + +// ByBootTime orders the results by the BootTime field. +func ByBootTime(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldBootTime, opts...).ToFunc() +} + +// ByNumProcs orders the results by the NumProcs field. +func ByNumProcs(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldNumProcs, opts...).ToFunc() +} + +// ByOs orders the results by the Os field. +func ByOs(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldOs, opts...).ToFunc() +} + +// ByHostID orders the results by the HostID field. +func ByHostID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldHostID, opts...).ToFunc() +} + +// ByLoad1 orders the results by the Load1 field. +func ByLoad1(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldLoad1, opts...).ToFunc() +} + +// ByLoad5 orders the results by the Load5 field. +func ByLoad5(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldLoad5, opts...).ToFunc() +} + +// ByLoad15 orders the results by the Load15 field. +func ByLoad15(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldLoad15, opts...).ToFunc() +} + +// ByTotalMem orders the results by the TotalMem field. +func ByTotalMem(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldTotalMem, opts...).ToFunc() +} + +// ByFreeMem orders the results by the FreeMem field. +func ByFreeMem(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldFreeMem, opts...).ToFunc() +} + +// ByUsedMem orders the results by the UsedMem field. +func ByUsedMem(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUsedMem, opts...).ToFunc() +} + +// ByTimestamp orders the results by the Timestamp field. +func ByTimestamp(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldTimestamp, opts...).ToFunc() +} + +// ByAgentStatusToProvisionedHostField orders the results by AgentStatusToProvisionedHost field. +func ByAgentStatusToProvisionedHostField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAgentStatusToProvisionedHostStep(), sql.OrderByField(field, opts...)) + } +} + +// ByAgentStatusToProvisionedNetworkField orders the results by AgentStatusToProvisionedNetwork field. +func ByAgentStatusToProvisionedNetworkField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAgentStatusToProvisionedNetworkStep(), sql.OrderByField(field, opts...)) + } +} + +// ByAgentStatusToBuildField orders the results by AgentStatusToBuild field. +func ByAgentStatusToBuildField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAgentStatusToBuildStep(), sql.OrderByField(field, opts...)) + } +} +func newAgentStatusToProvisionedHostStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AgentStatusToProvisionedHostInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, AgentStatusToProvisionedHostTable, AgentStatusToProvisionedHostColumn), + ) +} +func newAgentStatusToProvisionedNetworkStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AgentStatusToProvisionedNetworkInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, AgentStatusToProvisionedNetworkTable, AgentStatusToProvisionedNetworkColumn), + ) +} +func newAgentStatusToBuildStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AgentStatusToBuildInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, AgentStatusToBuildTable, AgentStatusToBuildColumn), + ) +} diff --git a/ent/agentstatus/where.go b/ent/agentstatus/where.go index 28ee9b7a..61493ed5 100755 --- a/ent/agentstatus/where.go +++ b/ent/agentstatus/where.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package agentstatus @@ -11,1387 +11,777 @@ import ( // ID filters vertices based on their ID field. func ID(id uuid.UUID) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.AgentStatus(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id uuid.UUID) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.AgentStatus(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id uuid.UUID) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.AgentStatus(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...uuid.UUID) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.AgentStatus(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...uuid.UUID) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.AgentStatus(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id uuid.UUID) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.AgentStatus(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id uuid.UUID) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.AgentStatus(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id uuid.UUID) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.AgentStatus(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id uuid.UUID) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.AgentStatus(sql.FieldLTE(FieldID, id)) } // ClientID applies equality check predicate on the "ClientID" field. It's identical to ClientIDEQ. func ClientID(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldClientID), v)) - }) + return predicate.AgentStatus(sql.FieldEQ(FieldClientID, v)) } // Hostname applies equality check predicate on the "Hostname" field. It's identical to HostnameEQ. func Hostname(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldHostname), v)) - }) + return predicate.AgentStatus(sql.FieldEQ(FieldHostname, v)) } // UpTime applies equality check predicate on the "UpTime" field. It's identical to UpTimeEQ. func UpTime(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUpTime), v)) - }) + return predicate.AgentStatus(sql.FieldEQ(FieldUpTime, v)) } // BootTime applies equality check predicate on the "BootTime" field. It's identical to BootTimeEQ. func BootTime(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldBootTime), v)) - }) + return predicate.AgentStatus(sql.FieldEQ(FieldBootTime, v)) } // NumProcs applies equality check predicate on the "NumProcs" field. It's identical to NumProcsEQ. func NumProcs(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldNumProcs), v)) - }) + return predicate.AgentStatus(sql.FieldEQ(FieldNumProcs, v)) } // Os applies equality check predicate on the "Os" field. It's identical to OsEQ. func Os(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldOs), v)) - }) + return predicate.AgentStatus(sql.FieldEQ(FieldOs, v)) } // HostID applies equality check predicate on the "HostID" field. It's identical to HostIDEQ. func HostID(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldHostID), v)) - }) + return predicate.AgentStatus(sql.FieldEQ(FieldHostID, v)) } // Load1 applies equality check predicate on the "Load1" field. It's identical to Load1EQ. func Load1(v float64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldLoad1), v)) - }) + return predicate.AgentStatus(sql.FieldEQ(FieldLoad1, v)) } // Load5 applies equality check predicate on the "Load5" field. It's identical to Load5EQ. func Load5(v float64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldLoad5), v)) - }) + return predicate.AgentStatus(sql.FieldEQ(FieldLoad5, v)) } // Load15 applies equality check predicate on the "Load15" field. It's identical to Load15EQ. func Load15(v float64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldLoad15), v)) - }) + return predicate.AgentStatus(sql.FieldEQ(FieldLoad15, v)) } // TotalMem applies equality check predicate on the "TotalMem" field. It's identical to TotalMemEQ. func TotalMem(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldTotalMem), v)) - }) + return predicate.AgentStatus(sql.FieldEQ(FieldTotalMem, v)) } // FreeMem applies equality check predicate on the "FreeMem" field. It's identical to FreeMemEQ. func FreeMem(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldFreeMem), v)) - }) + return predicate.AgentStatus(sql.FieldEQ(FieldFreeMem, v)) } // UsedMem applies equality check predicate on the "UsedMem" field. It's identical to UsedMemEQ. func UsedMem(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUsedMem), v)) - }) + return predicate.AgentStatus(sql.FieldEQ(FieldUsedMem, v)) } // Timestamp applies equality check predicate on the "Timestamp" field. It's identical to TimestampEQ. func Timestamp(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldTimestamp), v)) - }) + return predicate.AgentStatus(sql.FieldEQ(FieldTimestamp, v)) } // ClientIDEQ applies the EQ predicate on the "ClientID" field. func ClientIDEQ(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldClientID), v)) - }) + return predicate.AgentStatus(sql.FieldEQ(FieldClientID, v)) } // ClientIDNEQ applies the NEQ predicate on the "ClientID" field. func ClientIDNEQ(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldClientID), v)) - }) + return predicate.AgentStatus(sql.FieldNEQ(FieldClientID, v)) } // ClientIDIn applies the In predicate on the "ClientID" field. func ClientIDIn(vs ...string) predicate.AgentStatus { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AgentStatus(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldClientID), v...)) - }) + return predicate.AgentStatus(sql.FieldIn(FieldClientID, vs...)) } // ClientIDNotIn applies the NotIn predicate on the "ClientID" field. func ClientIDNotIn(vs ...string) predicate.AgentStatus { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AgentStatus(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldClientID), v...)) - }) + return predicate.AgentStatus(sql.FieldNotIn(FieldClientID, vs...)) } // ClientIDGT applies the GT predicate on the "ClientID" field. func ClientIDGT(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldClientID), v)) - }) + return predicate.AgentStatus(sql.FieldGT(FieldClientID, v)) } // ClientIDGTE applies the GTE predicate on the "ClientID" field. func ClientIDGTE(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldClientID), v)) - }) + return predicate.AgentStatus(sql.FieldGTE(FieldClientID, v)) } // ClientIDLT applies the LT predicate on the "ClientID" field. func ClientIDLT(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldClientID), v)) - }) + return predicate.AgentStatus(sql.FieldLT(FieldClientID, v)) } // ClientIDLTE applies the LTE predicate on the "ClientID" field. func ClientIDLTE(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldClientID), v)) - }) + return predicate.AgentStatus(sql.FieldLTE(FieldClientID, v)) } // ClientIDContains applies the Contains predicate on the "ClientID" field. func ClientIDContains(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldClientID), v)) - }) + return predicate.AgentStatus(sql.FieldContains(FieldClientID, v)) } // ClientIDHasPrefix applies the HasPrefix predicate on the "ClientID" field. func ClientIDHasPrefix(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldClientID), v)) - }) + return predicate.AgentStatus(sql.FieldHasPrefix(FieldClientID, v)) } // ClientIDHasSuffix applies the HasSuffix predicate on the "ClientID" field. func ClientIDHasSuffix(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldClientID), v)) - }) + return predicate.AgentStatus(sql.FieldHasSuffix(FieldClientID, v)) } // ClientIDEqualFold applies the EqualFold predicate on the "ClientID" field. func ClientIDEqualFold(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldClientID), v)) - }) + return predicate.AgentStatus(sql.FieldEqualFold(FieldClientID, v)) } // ClientIDContainsFold applies the ContainsFold predicate on the "ClientID" field. func ClientIDContainsFold(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldClientID), v)) - }) + return predicate.AgentStatus(sql.FieldContainsFold(FieldClientID, v)) } // HostnameEQ applies the EQ predicate on the "Hostname" field. func HostnameEQ(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldHostname), v)) - }) + return predicate.AgentStatus(sql.FieldEQ(FieldHostname, v)) } // HostnameNEQ applies the NEQ predicate on the "Hostname" field. func HostnameNEQ(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldHostname), v)) - }) + return predicate.AgentStatus(sql.FieldNEQ(FieldHostname, v)) } // HostnameIn applies the In predicate on the "Hostname" field. func HostnameIn(vs ...string) predicate.AgentStatus { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AgentStatus(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldHostname), v...)) - }) + return predicate.AgentStatus(sql.FieldIn(FieldHostname, vs...)) } // HostnameNotIn applies the NotIn predicate on the "Hostname" field. func HostnameNotIn(vs ...string) predicate.AgentStatus { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AgentStatus(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldHostname), v...)) - }) + return predicate.AgentStatus(sql.FieldNotIn(FieldHostname, vs...)) } // HostnameGT applies the GT predicate on the "Hostname" field. func HostnameGT(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldHostname), v)) - }) + return predicate.AgentStatus(sql.FieldGT(FieldHostname, v)) } // HostnameGTE applies the GTE predicate on the "Hostname" field. func HostnameGTE(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldHostname), v)) - }) + return predicate.AgentStatus(sql.FieldGTE(FieldHostname, v)) } // HostnameLT applies the LT predicate on the "Hostname" field. func HostnameLT(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldHostname), v)) - }) + return predicate.AgentStatus(sql.FieldLT(FieldHostname, v)) } // HostnameLTE applies the LTE predicate on the "Hostname" field. func HostnameLTE(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldHostname), v)) - }) + return predicate.AgentStatus(sql.FieldLTE(FieldHostname, v)) } // HostnameContains applies the Contains predicate on the "Hostname" field. func HostnameContains(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldHostname), v)) - }) + return predicate.AgentStatus(sql.FieldContains(FieldHostname, v)) } // HostnameHasPrefix applies the HasPrefix predicate on the "Hostname" field. func HostnameHasPrefix(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldHostname), v)) - }) + return predicate.AgentStatus(sql.FieldHasPrefix(FieldHostname, v)) } // HostnameHasSuffix applies the HasSuffix predicate on the "Hostname" field. func HostnameHasSuffix(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldHostname), v)) - }) + return predicate.AgentStatus(sql.FieldHasSuffix(FieldHostname, v)) } // HostnameEqualFold applies the EqualFold predicate on the "Hostname" field. func HostnameEqualFold(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldHostname), v)) - }) + return predicate.AgentStatus(sql.FieldEqualFold(FieldHostname, v)) } // HostnameContainsFold applies the ContainsFold predicate on the "Hostname" field. func HostnameContainsFold(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldHostname), v)) - }) + return predicate.AgentStatus(sql.FieldContainsFold(FieldHostname, v)) } // UpTimeEQ applies the EQ predicate on the "UpTime" field. func UpTimeEQ(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUpTime), v)) - }) + return predicate.AgentStatus(sql.FieldEQ(FieldUpTime, v)) } // UpTimeNEQ applies the NEQ predicate on the "UpTime" field. func UpTimeNEQ(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldUpTime), v)) - }) + return predicate.AgentStatus(sql.FieldNEQ(FieldUpTime, v)) } // UpTimeIn applies the In predicate on the "UpTime" field. func UpTimeIn(vs ...int64) predicate.AgentStatus { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AgentStatus(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldUpTime), v...)) - }) + return predicate.AgentStatus(sql.FieldIn(FieldUpTime, vs...)) } // UpTimeNotIn applies the NotIn predicate on the "UpTime" field. func UpTimeNotIn(vs ...int64) predicate.AgentStatus { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AgentStatus(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldUpTime), v...)) - }) + return predicate.AgentStatus(sql.FieldNotIn(FieldUpTime, vs...)) } // UpTimeGT applies the GT predicate on the "UpTime" field. func UpTimeGT(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldUpTime), v)) - }) + return predicate.AgentStatus(sql.FieldGT(FieldUpTime, v)) } // UpTimeGTE applies the GTE predicate on the "UpTime" field. func UpTimeGTE(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldUpTime), v)) - }) + return predicate.AgentStatus(sql.FieldGTE(FieldUpTime, v)) } // UpTimeLT applies the LT predicate on the "UpTime" field. func UpTimeLT(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldUpTime), v)) - }) + return predicate.AgentStatus(sql.FieldLT(FieldUpTime, v)) } // UpTimeLTE applies the LTE predicate on the "UpTime" field. func UpTimeLTE(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldUpTime), v)) - }) + return predicate.AgentStatus(sql.FieldLTE(FieldUpTime, v)) } // BootTimeEQ applies the EQ predicate on the "BootTime" field. func BootTimeEQ(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldBootTime), v)) - }) + return predicate.AgentStatus(sql.FieldEQ(FieldBootTime, v)) } // BootTimeNEQ applies the NEQ predicate on the "BootTime" field. func BootTimeNEQ(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldBootTime), v)) - }) + return predicate.AgentStatus(sql.FieldNEQ(FieldBootTime, v)) } // BootTimeIn applies the In predicate on the "BootTime" field. func BootTimeIn(vs ...int64) predicate.AgentStatus { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AgentStatus(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldBootTime), v...)) - }) + return predicate.AgentStatus(sql.FieldIn(FieldBootTime, vs...)) } // BootTimeNotIn applies the NotIn predicate on the "BootTime" field. func BootTimeNotIn(vs ...int64) predicate.AgentStatus { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AgentStatus(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldBootTime), v...)) - }) + return predicate.AgentStatus(sql.FieldNotIn(FieldBootTime, vs...)) } // BootTimeGT applies the GT predicate on the "BootTime" field. func BootTimeGT(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldBootTime), v)) - }) + return predicate.AgentStatus(sql.FieldGT(FieldBootTime, v)) } // BootTimeGTE applies the GTE predicate on the "BootTime" field. func BootTimeGTE(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldBootTime), v)) - }) + return predicate.AgentStatus(sql.FieldGTE(FieldBootTime, v)) } // BootTimeLT applies the LT predicate on the "BootTime" field. func BootTimeLT(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldBootTime), v)) - }) + return predicate.AgentStatus(sql.FieldLT(FieldBootTime, v)) } // BootTimeLTE applies the LTE predicate on the "BootTime" field. func BootTimeLTE(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldBootTime), v)) - }) + return predicate.AgentStatus(sql.FieldLTE(FieldBootTime, v)) } // NumProcsEQ applies the EQ predicate on the "NumProcs" field. func NumProcsEQ(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldNumProcs), v)) - }) + return predicate.AgentStatus(sql.FieldEQ(FieldNumProcs, v)) } // NumProcsNEQ applies the NEQ predicate on the "NumProcs" field. func NumProcsNEQ(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldNumProcs), v)) - }) + return predicate.AgentStatus(sql.FieldNEQ(FieldNumProcs, v)) } // NumProcsIn applies the In predicate on the "NumProcs" field. func NumProcsIn(vs ...int64) predicate.AgentStatus { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AgentStatus(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldNumProcs), v...)) - }) + return predicate.AgentStatus(sql.FieldIn(FieldNumProcs, vs...)) } // NumProcsNotIn applies the NotIn predicate on the "NumProcs" field. func NumProcsNotIn(vs ...int64) predicate.AgentStatus { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AgentStatus(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldNumProcs), v...)) - }) + return predicate.AgentStatus(sql.FieldNotIn(FieldNumProcs, vs...)) } // NumProcsGT applies the GT predicate on the "NumProcs" field. func NumProcsGT(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldNumProcs), v)) - }) + return predicate.AgentStatus(sql.FieldGT(FieldNumProcs, v)) } // NumProcsGTE applies the GTE predicate on the "NumProcs" field. func NumProcsGTE(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldNumProcs), v)) - }) + return predicate.AgentStatus(sql.FieldGTE(FieldNumProcs, v)) } // NumProcsLT applies the LT predicate on the "NumProcs" field. func NumProcsLT(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldNumProcs), v)) - }) + return predicate.AgentStatus(sql.FieldLT(FieldNumProcs, v)) } // NumProcsLTE applies the LTE predicate on the "NumProcs" field. func NumProcsLTE(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldNumProcs), v)) - }) + return predicate.AgentStatus(sql.FieldLTE(FieldNumProcs, v)) } // OsEQ applies the EQ predicate on the "Os" field. func OsEQ(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldOs), v)) - }) + return predicate.AgentStatus(sql.FieldEQ(FieldOs, v)) } // OsNEQ applies the NEQ predicate on the "Os" field. func OsNEQ(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldOs), v)) - }) + return predicate.AgentStatus(sql.FieldNEQ(FieldOs, v)) } // OsIn applies the In predicate on the "Os" field. func OsIn(vs ...string) predicate.AgentStatus { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AgentStatus(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldOs), v...)) - }) + return predicate.AgentStatus(sql.FieldIn(FieldOs, vs...)) } // OsNotIn applies the NotIn predicate on the "Os" field. func OsNotIn(vs ...string) predicate.AgentStatus { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AgentStatus(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldOs), v...)) - }) + return predicate.AgentStatus(sql.FieldNotIn(FieldOs, vs...)) } // OsGT applies the GT predicate on the "Os" field. func OsGT(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldOs), v)) - }) + return predicate.AgentStatus(sql.FieldGT(FieldOs, v)) } // OsGTE applies the GTE predicate on the "Os" field. func OsGTE(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldOs), v)) - }) + return predicate.AgentStatus(sql.FieldGTE(FieldOs, v)) } // OsLT applies the LT predicate on the "Os" field. func OsLT(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldOs), v)) - }) + return predicate.AgentStatus(sql.FieldLT(FieldOs, v)) } // OsLTE applies the LTE predicate on the "Os" field. func OsLTE(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldOs), v)) - }) + return predicate.AgentStatus(sql.FieldLTE(FieldOs, v)) } // OsContains applies the Contains predicate on the "Os" field. func OsContains(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldOs), v)) - }) + return predicate.AgentStatus(sql.FieldContains(FieldOs, v)) } // OsHasPrefix applies the HasPrefix predicate on the "Os" field. func OsHasPrefix(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldOs), v)) - }) + return predicate.AgentStatus(sql.FieldHasPrefix(FieldOs, v)) } // OsHasSuffix applies the HasSuffix predicate on the "Os" field. func OsHasSuffix(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldOs), v)) - }) + return predicate.AgentStatus(sql.FieldHasSuffix(FieldOs, v)) } // OsEqualFold applies the EqualFold predicate on the "Os" field. func OsEqualFold(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldOs), v)) - }) + return predicate.AgentStatus(sql.FieldEqualFold(FieldOs, v)) } // OsContainsFold applies the ContainsFold predicate on the "Os" field. func OsContainsFold(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldOs), v)) - }) + return predicate.AgentStatus(sql.FieldContainsFold(FieldOs, v)) } // HostIDEQ applies the EQ predicate on the "HostID" field. func HostIDEQ(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldHostID), v)) - }) + return predicate.AgentStatus(sql.FieldEQ(FieldHostID, v)) } // HostIDNEQ applies the NEQ predicate on the "HostID" field. func HostIDNEQ(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldHostID), v)) - }) + return predicate.AgentStatus(sql.FieldNEQ(FieldHostID, v)) } // HostIDIn applies the In predicate on the "HostID" field. func HostIDIn(vs ...string) predicate.AgentStatus { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AgentStatus(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldHostID), v...)) - }) + return predicate.AgentStatus(sql.FieldIn(FieldHostID, vs...)) } // HostIDNotIn applies the NotIn predicate on the "HostID" field. func HostIDNotIn(vs ...string) predicate.AgentStatus { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AgentStatus(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldHostID), v...)) - }) + return predicate.AgentStatus(sql.FieldNotIn(FieldHostID, vs...)) } // HostIDGT applies the GT predicate on the "HostID" field. func HostIDGT(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldHostID), v)) - }) + return predicate.AgentStatus(sql.FieldGT(FieldHostID, v)) } // HostIDGTE applies the GTE predicate on the "HostID" field. func HostIDGTE(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldHostID), v)) - }) + return predicate.AgentStatus(sql.FieldGTE(FieldHostID, v)) } // HostIDLT applies the LT predicate on the "HostID" field. func HostIDLT(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldHostID), v)) - }) + return predicate.AgentStatus(sql.FieldLT(FieldHostID, v)) } // HostIDLTE applies the LTE predicate on the "HostID" field. func HostIDLTE(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldHostID), v)) - }) + return predicate.AgentStatus(sql.FieldLTE(FieldHostID, v)) } // HostIDContains applies the Contains predicate on the "HostID" field. func HostIDContains(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldHostID), v)) - }) + return predicate.AgentStatus(sql.FieldContains(FieldHostID, v)) } // HostIDHasPrefix applies the HasPrefix predicate on the "HostID" field. func HostIDHasPrefix(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldHostID), v)) - }) + return predicate.AgentStatus(sql.FieldHasPrefix(FieldHostID, v)) } // HostIDHasSuffix applies the HasSuffix predicate on the "HostID" field. func HostIDHasSuffix(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldHostID), v)) - }) + return predicate.AgentStatus(sql.FieldHasSuffix(FieldHostID, v)) } // HostIDEqualFold applies the EqualFold predicate on the "HostID" field. func HostIDEqualFold(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldHostID), v)) - }) + return predicate.AgentStatus(sql.FieldEqualFold(FieldHostID, v)) } // HostIDContainsFold applies the ContainsFold predicate on the "HostID" field. func HostIDContainsFold(v string) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldHostID), v)) - }) + return predicate.AgentStatus(sql.FieldContainsFold(FieldHostID, v)) } // Load1EQ applies the EQ predicate on the "Load1" field. func Load1EQ(v float64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldLoad1), v)) - }) + return predicate.AgentStatus(sql.FieldEQ(FieldLoad1, v)) } // Load1NEQ applies the NEQ predicate on the "Load1" field. func Load1NEQ(v float64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldLoad1), v)) - }) + return predicate.AgentStatus(sql.FieldNEQ(FieldLoad1, v)) } // Load1In applies the In predicate on the "Load1" field. func Load1In(vs ...float64) predicate.AgentStatus { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AgentStatus(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldLoad1), v...)) - }) + return predicate.AgentStatus(sql.FieldIn(FieldLoad1, vs...)) } // Load1NotIn applies the NotIn predicate on the "Load1" field. func Load1NotIn(vs ...float64) predicate.AgentStatus { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AgentStatus(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldLoad1), v...)) - }) + return predicate.AgentStatus(sql.FieldNotIn(FieldLoad1, vs...)) } // Load1GT applies the GT predicate on the "Load1" field. func Load1GT(v float64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldLoad1), v)) - }) + return predicate.AgentStatus(sql.FieldGT(FieldLoad1, v)) } // Load1GTE applies the GTE predicate on the "Load1" field. func Load1GTE(v float64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldLoad1), v)) - }) + return predicate.AgentStatus(sql.FieldGTE(FieldLoad1, v)) } // Load1LT applies the LT predicate on the "Load1" field. func Load1LT(v float64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldLoad1), v)) - }) + return predicate.AgentStatus(sql.FieldLT(FieldLoad1, v)) } // Load1LTE applies the LTE predicate on the "Load1" field. func Load1LTE(v float64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldLoad1), v)) - }) + return predicate.AgentStatus(sql.FieldLTE(FieldLoad1, v)) } // Load5EQ applies the EQ predicate on the "Load5" field. func Load5EQ(v float64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldLoad5), v)) - }) + return predicate.AgentStatus(sql.FieldEQ(FieldLoad5, v)) } // Load5NEQ applies the NEQ predicate on the "Load5" field. func Load5NEQ(v float64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldLoad5), v)) - }) + return predicate.AgentStatus(sql.FieldNEQ(FieldLoad5, v)) } // Load5In applies the In predicate on the "Load5" field. func Load5In(vs ...float64) predicate.AgentStatus { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AgentStatus(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldLoad5), v...)) - }) + return predicate.AgentStatus(sql.FieldIn(FieldLoad5, vs...)) } // Load5NotIn applies the NotIn predicate on the "Load5" field. func Load5NotIn(vs ...float64) predicate.AgentStatus { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AgentStatus(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldLoad5), v...)) - }) + return predicate.AgentStatus(sql.FieldNotIn(FieldLoad5, vs...)) } // Load5GT applies the GT predicate on the "Load5" field. func Load5GT(v float64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldLoad5), v)) - }) + return predicate.AgentStatus(sql.FieldGT(FieldLoad5, v)) } // Load5GTE applies the GTE predicate on the "Load5" field. func Load5GTE(v float64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldLoad5), v)) - }) + return predicate.AgentStatus(sql.FieldGTE(FieldLoad5, v)) } // Load5LT applies the LT predicate on the "Load5" field. func Load5LT(v float64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldLoad5), v)) - }) + return predicate.AgentStatus(sql.FieldLT(FieldLoad5, v)) } // Load5LTE applies the LTE predicate on the "Load5" field. func Load5LTE(v float64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldLoad5), v)) - }) + return predicate.AgentStatus(sql.FieldLTE(FieldLoad5, v)) } // Load15EQ applies the EQ predicate on the "Load15" field. func Load15EQ(v float64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldLoad15), v)) - }) + return predicate.AgentStatus(sql.FieldEQ(FieldLoad15, v)) } // Load15NEQ applies the NEQ predicate on the "Load15" field. func Load15NEQ(v float64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldLoad15), v)) - }) + return predicate.AgentStatus(sql.FieldNEQ(FieldLoad15, v)) } // Load15In applies the In predicate on the "Load15" field. func Load15In(vs ...float64) predicate.AgentStatus { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AgentStatus(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldLoad15), v...)) - }) + return predicate.AgentStatus(sql.FieldIn(FieldLoad15, vs...)) } // Load15NotIn applies the NotIn predicate on the "Load15" field. func Load15NotIn(vs ...float64) predicate.AgentStatus { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AgentStatus(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldLoad15), v...)) - }) + return predicate.AgentStatus(sql.FieldNotIn(FieldLoad15, vs...)) } // Load15GT applies the GT predicate on the "Load15" field. func Load15GT(v float64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldLoad15), v)) - }) + return predicate.AgentStatus(sql.FieldGT(FieldLoad15, v)) } // Load15GTE applies the GTE predicate on the "Load15" field. func Load15GTE(v float64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldLoad15), v)) - }) + return predicate.AgentStatus(sql.FieldGTE(FieldLoad15, v)) } // Load15LT applies the LT predicate on the "Load15" field. func Load15LT(v float64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldLoad15), v)) - }) + return predicate.AgentStatus(sql.FieldLT(FieldLoad15, v)) } // Load15LTE applies the LTE predicate on the "Load15" field. func Load15LTE(v float64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldLoad15), v)) - }) + return predicate.AgentStatus(sql.FieldLTE(FieldLoad15, v)) } // TotalMemEQ applies the EQ predicate on the "TotalMem" field. func TotalMemEQ(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldTotalMem), v)) - }) + return predicate.AgentStatus(sql.FieldEQ(FieldTotalMem, v)) } // TotalMemNEQ applies the NEQ predicate on the "TotalMem" field. func TotalMemNEQ(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldTotalMem), v)) - }) + return predicate.AgentStatus(sql.FieldNEQ(FieldTotalMem, v)) } // TotalMemIn applies the In predicate on the "TotalMem" field. func TotalMemIn(vs ...int64) predicate.AgentStatus { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AgentStatus(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldTotalMem), v...)) - }) + return predicate.AgentStatus(sql.FieldIn(FieldTotalMem, vs...)) } // TotalMemNotIn applies the NotIn predicate on the "TotalMem" field. func TotalMemNotIn(vs ...int64) predicate.AgentStatus { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AgentStatus(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldTotalMem), v...)) - }) + return predicate.AgentStatus(sql.FieldNotIn(FieldTotalMem, vs...)) } // TotalMemGT applies the GT predicate on the "TotalMem" field. func TotalMemGT(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldTotalMem), v)) - }) + return predicate.AgentStatus(sql.FieldGT(FieldTotalMem, v)) } // TotalMemGTE applies the GTE predicate on the "TotalMem" field. func TotalMemGTE(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldTotalMem), v)) - }) + return predicate.AgentStatus(sql.FieldGTE(FieldTotalMem, v)) } // TotalMemLT applies the LT predicate on the "TotalMem" field. func TotalMemLT(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldTotalMem), v)) - }) + return predicate.AgentStatus(sql.FieldLT(FieldTotalMem, v)) } // TotalMemLTE applies the LTE predicate on the "TotalMem" field. func TotalMemLTE(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldTotalMem), v)) - }) + return predicate.AgentStatus(sql.FieldLTE(FieldTotalMem, v)) } // FreeMemEQ applies the EQ predicate on the "FreeMem" field. func FreeMemEQ(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldFreeMem), v)) - }) + return predicate.AgentStatus(sql.FieldEQ(FieldFreeMem, v)) } // FreeMemNEQ applies the NEQ predicate on the "FreeMem" field. func FreeMemNEQ(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldFreeMem), v)) - }) + return predicate.AgentStatus(sql.FieldNEQ(FieldFreeMem, v)) } // FreeMemIn applies the In predicate on the "FreeMem" field. func FreeMemIn(vs ...int64) predicate.AgentStatus { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AgentStatus(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldFreeMem), v...)) - }) + return predicate.AgentStatus(sql.FieldIn(FieldFreeMem, vs...)) } // FreeMemNotIn applies the NotIn predicate on the "FreeMem" field. func FreeMemNotIn(vs ...int64) predicate.AgentStatus { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AgentStatus(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldFreeMem), v...)) - }) + return predicate.AgentStatus(sql.FieldNotIn(FieldFreeMem, vs...)) } // FreeMemGT applies the GT predicate on the "FreeMem" field. func FreeMemGT(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldFreeMem), v)) - }) + return predicate.AgentStatus(sql.FieldGT(FieldFreeMem, v)) } // FreeMemGTE applies the GTE predicate on the "FreeMem" field. func FreeMemGTE(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldFreeMem), v)) - }) + return predicate.AgentStatus(sql.FieldGTE(FieldFreeMem, v)) } // FreeMemLT applies the LT predicate on the "FreeMem" field. func FreeMemLT(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldFreeMem), v)) - }) + return predicate.AgentStatus(sql.FieldLT(FieldFreeMem, v)) } // FreeMemLTE applies the LTE predicate on the "FreeMem" field. func FreeMemLTE(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldFreeMem), v)) - }) + return predicate.AgentStatus(sql.FieldLTE(FieldFreeMem, v)) } // UsedMemEQ applies the EQ predicate on the "UsedMem" field. func UsedMemEQ(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUsedMem), v)) - }) + return predicate.AgentStatus(sql.FieldEQ(FieldUsedMem, v)) } // UsedMemNEQ applies the NEQ predicate on the "UsedMem" field. func UsedMemNEQ(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldUsedMem), v)) - }) + return predicate.AgentStatus(sql.FieldNEQ(FieldUsedMem, v)) } // UsedMemIn applies the In predicate on the "UsedMem" field. func UsedMemIn(vs ...int64) predicate.AgentStatus { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AgentStatus(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldUsedMem), v...)) - }) + return predicate.AgentStatus(sql.FieldIn(FieldUsedMem, vs...)) } // UsedMemNotIn applies the NotIn predicate on the "UsedMem" field. func UsedMemNotIn(vs ...int64) predicate.AgentStatus { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AgentStatus(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldUsedMem), v...)) - }) + return predicate.AgentStatus(sql.FieldNotIn(FieldUsedMem, vs...)) } // UsedMemGT applies the GT predicate on the "UsedMem" field. func UsedMemGT(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldUsedMem), v)) - }) + return predicate.AgentStatus(sql.FieldGT(FieldUsedMem, v)) } // UsedMemGTE applies the GTE predicate on the "UsedMem" field. func UsedMemGTE(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldUsedMem), v)) - }) + return predicate.AgentStatus(sql.FieldGTE(FieldUsedMem, v)) } // UsedMemLT applies the LT predicate on the "UsedMem" field. func UsedMemLT(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldUsedMem), v)) - }) + return predicate.AgentStatus(sql.FieldLT(FieldUsedMem, v)) } // UsedMemLTE applies the LTE predicate on the "UsedMem" field. func UsedMemLTE(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldUsedMem), v)) - }) + return predicate.AgentStatus(sql.FieldLTE(FieldUsedMem, v)) } // TimestampEQ applies the EQ predicate on the "Timestamp" field. func TimestampEQ(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldTimestamp), v)) - }) + return predicate.AgentStatus(sql.FieldEQ(FieldTimestamp, v)) } // TimestampNEQ applies the NEQ predicate on the "Timestamp" field. func TimestampNEQ(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldTimestamp), v)) - }) + return predicate.AgentStatus(sql.FieldNEQ(FieldTimestamp, v)) } // TimestampIn applies the In predicate on the "Timestamp" field. func TimestampIn(vs ...int64) predicate.AgentStatus { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AgentStatus(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldTimestamp), v...)) - }) + return predicate.AgentStatus(sql.FieldIn(FieldTimestamp, vs...)) } // TimestampNotIn applies the NotIn predicate on the "Timestamp" field. func TimestampNotIn(vs ...int64) predicate.AgentStatus { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AgentStatus(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldTimestamp), v...)) - }) + return predicate.AgentStatus(sql.FieldNotIn(FieldTimestamp, vs...)) } // TimestampGT applies the GT predicate on the "Timestamp" field. func TimestampGT(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldTimestamp), v)) - }) + return predicate.AgentStatus(sql.FieldGT(FieldTimestamp, v)) } // TimestampGTE applies the GTE predicate on the "Timestamp" field. func TimestampGTE(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldTimestamp), v)) - }) + return predicate.AgentStatus(sql.FieldGTE(FieldTimestamp, v)) } // TimestampLT applies the LT predicate on the "Timestamp" field. func TimestampLT(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldTimestamp), v)) - }) + return predicate.AgentStatus(sql.FieldLT(FieldTimestamp, v)) } // TimestampLTE applies the LTE predicate on the "Timestamp" field. func TimestampLTE(v int64) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldTimestamp), v)) - }) + return predicate.AgentStatus(sql.FieldLTE(FieldTimestamp, v)) } // HasAgentStatusToProvisionedHost applies the HasEdge predicate on the "AgentStatusToProvisionedHost" edge. @@ -1399,7 +789,6 @@ func HasAgentStatusToProvisionedHost() predicate.AgentStatus { return predicate.AgentStatus(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(AgentStatusToProvisionedHostTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, false, AgentStatusToProvisionedHostTable, AgentStatusToProvisionedHostColumn), ) sqlgraph.HasNeighbors(s, step) @@ -1409,11 +798,7 @@ func HasAgentStatusToProvisionedHost() predicate.AgentStatus { // HasAgentStatusToProvisionedHostWith applies the HasEdge predicate on the "AgentStatusToProvisionedHost" edge with a given conditions (other predicates). func HasAgentStatusToProvisionedHostWith(preds ...predicate.ProvisionedHost) predicate.AgentStatus { return predicate.AgentStatus(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(AgentStatusToProvisionedHostInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, AgentStatusToProvisionedHostTable, AgentStatusToProvisionedHostColumn), - ) + step := newAgentStatusToProvisionedHostStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -1427,7 +812,6 @@ func HasAgentStatusToProvisionedNetwork() predicate.AgentStatus { return predicate.AgentStatus(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(AgentStatusToProvisionedNetworkTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, false, AgentStatusToProvisionedNetworkTable, AgentStatusToProvisionedNetworkColumn), ) sqlgraph.HasNeighbors(s, step) @@ -1437,11 +821,7 @@ func HasAgentStatusToProvisionedNetwork() predicate.AgentStatus { // HasAgentStatusToProvisionedNetworkWith applies the HasEdge predicate on the "AgentStatusToProvisionedNetwork" edge with a given conditions (other predicates). func HasAgentStatusToProvisionedNetworkWith(preds ...predicate.ProvisionedNetwork) predicate.AgentStatus { return predicate.AgentStatus(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(AgentStatusToProvisionedNetworkInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, AgentStatusToProvisionedNetworkTable, AgentStatusToProvisionedNetworkColumn), - ) + step := newAgentStatusToProvisionedNetworkStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -1455,7 +835,6 @@ func HasAgentStatusToBuild() predicate.AgentStatus { return predicate.AgentStatus(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(AgentStatusToBuildTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, false, AgentStatusToBuildTable, AgentStatusToBuildColumn), ) sqlgraph.HasNeighbors(s, step) @@ -1465,11 +844,7 @@ func HasAgentStatusToBuild() predicate.AgentStatus { // HasAgentStatusToBuildWith applies the HasEdge predicate on the "AgentStatusToBuild" edge with a given conditions (other predicates). func HasAgentStatusToBuildWith(preds ...predicate.Build) predicate.AgentStatus { return predicate.AgentStatus(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(AgentStatusToBuildInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, AgentStatusToBuildTable, AgentStatusToBuildColumn), - ) + step := newAgentStatusToBuildStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -1480,32 +855,15 @@ func HasAgentStatusToBuildWith(preds ...predicate.Build) predicate.AgentStatus { // And groups predicates with the AND operator between them. func And(predicates ...predicate.AgentStatus) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.AgentStatus(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.AgentStatus) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.AgentStatus(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.AgentStatus) predicate.AgentStatus { - return predicate.AgentStatus(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.AgentStatus(sql.NotPredicates(p)) } diff --git a/ent/agentstatus_create.go b/ent/agentstatus_create.go index ed322881..446b3bf8 100755 --- a/ent/agentstatus_create.go +++ b/ent/agentstatus_create.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -185,44 +185,8 @@ func (asc *AgentStatusCreate) Mutation() *AgentStatusMutation { // Save creates the AgentStatus in the database. func (asc *AgentStatusCreate) Save(ctx context.Context) (*AgentStatus, error) { - var ( - err error - node *AgentStatus - ) asc.defaults() - if len(asc.hooks) == 0 { - if err = asc.check(); err != nil { - return nil, err - } - node, err = asc.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*AgentStatusMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = asc.check(); err != nil { - return nil, err - } - asc.mutation = mutation - if node, err = asc.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(asc.hooks) - 1; i >= 0; i-- { - if asc.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = asc.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, asc.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, asc.sqlSave, asc.mutation, asc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -303,10 +267,13 @@ func (asc *AgentStatusCreate) check() error { } func (asc *AgentStatusCreate) sqlSave(ctx context.Context) (*AgentStatus, error) { + if err := asc.check(); err != nil { + return nil, err + } _node, _spec := asc.createSpec() if err := sqlgraph.CreateNode(ctx, asc.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } @@ -317,134 +284,74 @@ func (asc *AgentStatusCreate) sqlSave(ctx context.Context) (*AgentStatus, error) return nil, err } } + asc.mutation.id = &_node.ID + asc.mutation.done = true return _node, nil } func (asc *AgentStatusCreate) createSpec() (*AgentStatus, *sqlgraph.CreateSpec) { var ( _node = &AgentStatus{config: asc.config} - _spec = &sqlgraph.CreateSpec{ - Table: agentstatus.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: agentstatus.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(agentstatus.Table, sqlgraph.NewFieldSpec(agentstatus.FieldID, field.TypeUUID)) ) if id, ok := asc.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } if value, ok := asc.mutation.ClientID(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: agentstatus.FieldClientID, - }) + _spec.SetField(agentstatus.FieldClientID, field.TypeString, value) _node.ClientID = value } if value, ok := asc.mutation.Hostname(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: agentstatus.FieldHostname, - }) + _spec.SetField(agentstatus.FieldHostname, field.TypeString, value) _node.Hostname = value } if value, ok := asc.mutation.UpTime(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: agentstatus.FieldUpTime, - }) + _spec.SetField(agentstatus.FieldUpTime, field.TypeInt64, value) _node.UpTime = value } if value, ok := asc.mutation.BootTime(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: agentstatus.FieldBootTime, - }) + _spec.SetField(agentstatus.FieldBootTime, field.TypeInt64, value) _node.BootTime = value } if value, ok := asc.mutation.NumProcs(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: agentstatus.FieldNumProcs, - }) + _spec.SetField(agentstatus.FieldNumProcs, field.TypeInt64, value) _node.NumProcs = value } if value, ok := asc.mutation.Os(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: agentstatus.FieldOs, - }) + _spec.SetField(agentstatus.FieldOs, field.TypeString, value) _node.Os = value } if value, ok := asc.mutation.HostID(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: agentstatus.FieldHostID, - }) + _spec.SetField(agentstatus.FieldHostID, field.TypeString, value) _node.HostID = value } if value, ok := asc.mutation.Load1(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeFloat64, - Value: value, - Column: agentstatus.FieldLoad1, - }) + _spec.SetField(agentstatus.FieldLoad1, field.TypeFloat64, value) _node.Load1 = value } if value, ok := asc.mutation.Load5(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeFloat64, - Value: value, - Column: agentstatus.FieldLoad5, - }) + _spec.SetField(agentstatus.FieldLoad5, field.TypeFloat64, value) _node.Load5 = value } if value, ok := asc.mutation.Load15(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeFloat64, - Value: value, - Column: agentstatus.FieldLoad15, - }) + _spec.SetField(agentstatus.FieldLoad15, field.TypeFloat64, value) _node.Load15 = value } if value, ok := asc.mutation.TotalMem(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: agentstatus.FieldTotalMem, - }) + _spec.SetField(agentstatus.FieldTotalMem, field.TypeInt64, value) _node.TotalMem = value } if value, ok := asc.mutation.FreeMem(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: agentstatus.FieldFreeMem, - }) + _spec.SetField(agentstatus.FieldFreeMem, field.TypeInt64, value) _node.FreeMem = value } if value, ok := asc.mutation.UsedMem(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: agentstatus.FieldUsedMem, - }) + _spec.SetField(agentstatus.FieldUsedMem, field.TypeInt64, value) _node.UsedMem = value } if value, ok := asc.mutation.Timestamp(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: agentstatus.FieldTimestamp, - }) + _spec.SetField(agentstatus.FieldTimestamp, field.TypeInt64, value) _node.Timestamp = value } if nodes := asc.mutation.AgentStatusToProvisionedHostIDs(); len(nodes) > 0 { @@ -455,10 +362,7 @@ func (asc *AgentStatusCreate) createSpec() (*AgentStatus, *sqlgraph.CreateSpec) Columns: []string{agentstatus.AgentStatusToProvisionedHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionedhost.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionedhost.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -475,10 +379,7 @@ func (asc *AgentStatusCreate) createSpec() (*AgentStatus, *sqlgraph.CreateSpec) Columns: []string{agentstatus.AgentStatusToProvisionedNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionednetwork.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -495,10 +396,7 @@ func (asc *AgentStatusCreate) createSpec() (*AgentStatus, *sqlgraph.CreateSpec) Columns: []string{agentstatus.AgentStatusToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -513,11 +411,15 @@ func (asc *AgentStatusCreate) createSpec() (*AgentStatus, *sqlgraph.CreateSpec) // AgentStatusCreateBulk is the builder for creating many AgentStatus entities in bulk. type AgentStatusCreateBulk struct { config + err error builders []*AgentStatusCreate } // Save creates the AgentStatus entities in the database. func (ascb *AgentStatusCreateBulk) Save(ctx context.Context) ([]*AgentStatus, error) { + if ascb.err != nil { + return nil, ascb.err + } specs := make([]*sqlgraph.CreateSpec, len(ascb.builders)) nodes := make([]*AgentStatus, len(ascb.builders)) mutators := make([]Mutator, len(ascb.builders)) @@ -534,8 +436,8 @@ func (ascb *AgentStatusCreateBulk) Save(ctx context.Context) ([]*AgentStatus, er return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, ascb.builders[i+1].mutation) } else { @@ -543,7 +445,7 @@ func (ascb *AgentStatusCreateBulk) Save(ctx context.Context) ([]*AgentStatus, er // Invoke the actual operation on the latest mutation in the chain. if err = sqlgraph.BatchCreate(ctx, ascb.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } } } diff --git a/ent/agentstatus_delete.go b/ent/agentstatus_delete.go index e8083d0f..42bc34c1 100755 --- a/ent/agentstatus_delete.go +++ b/ent/agentstatus_delete.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (asd *AgentStatusDelete) Where(ps ...predicate.AgentStatus) *AgentStatusDel // Exec executes the deletion query and returns how many vertices were deleted. func (asd *AgentStatusDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(asd.hooks) == 0 { - affected, err = asd.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*AgentStatusMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - asd.mutation = mutation - affected, err = asd.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(asd.hooks) - 1; i >= 0; i-- { - if asd.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = asd.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, asd.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, asd.sqlExec, asd.mutation, asd.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (asd *AgentStatusDelete) ExecX(ctx context.Context) int { } func (asd *AgentStatusDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: agentstatus.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: agentstatus.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(agentstatus.Table, sqlgraph.NewFieldSpec(agentstatus.FieldID, field.TypeUUID)) if ps := asd.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -84,7 +48,12 @@ func (asd *AgentStatusDelete) sqlExec(ctx context.Context) (int, error) { } } } - return sqlgraph.DeleteNodes(ctx, asd.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, asd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + asd.mutation.done = true + return affected, err } // AgentStatusDeleteOne is the builder for deleting a single AgentStatus entity. @@ -92,6 +61,12 @@ type AgentStatusDeleteOne struct { asd *AgentStatusDelete } +// Where appends a list predicates to the AgentStatusDelete builder. +func (asdo *AgentStatusDeleteOne) Where(ps ...predicate.AgentStatus) *AgentStatusDeleteOne { + asdo.asd.mutation.Where(ps...) + return asdo +} + // Exec executes the deletion query. func (asdo *AgentStatusDeleteOne) Exec(ctx context.Context) error { n, err := asdo.asd.Exec(ctx) @@ -107,5 +82,7 @@ func (asdo *AgentStatusDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (asdo *AgentStatusDeleteOne) ExecX(ctx context.Context) { - asdo.asd.ExecX(ctx) + if err := asdo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/ent/agentstatus_query.go b/ent/agentstatus_query.go index f67bb206..376faddb 100755 --- a/ent/agentstatus_query.go +++ b/ent/agentstatus_query.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "errors" "fmt" "math" @@ -22,17 +21,16 @@ import ( // AgentStatusQuery is the builder for querying AgentStatus entities. type AgentStatusQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string - predicates []predicate.AgentStatus - // eager-loading edges. + ctx *QueryContext + order []agentstatus.OrderOption + inters []Interceptor + predicates []predicate.AgentStatus withAgentStatusToProvisionedHost *ProvisionedHostQuery withAgentStatusToProvisionedNetwork *ProvisionedNetworkQuery withAgentStatusToBuild *BuildQuery withFKs bool + modifiers []func(*sql.Selector) + loadTotal []func(context.Context, []*AgentStatus) error // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -44,34 +42,34 @@ func (asq *AgentStatusQuery) Where(ps ...predicate.AgentStatus) *AgentStatusQuer return asq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (asq *AgentStatusQuery) Limit(limit int) *AgentStatusQuery { - asq.limit = &limit + asq.ctx.Limit = &limit return asq } -// Offset adds an offset step to the query. +// Offset to start from. func (asq *AgentStatusQuery) Offset(offset int) *AgentStatusQuery { - asq.offset = &offset + asq.ctx.Offset = &offset return asq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (asq *AgentStatusQuery) Unique(unique bool) *AgentStatusQuery { - asq.unique = &unique + asq.ctx.Unique = &unique return asq } -// Order adds an order step to the query. -func (asq *AgentStatusQuery) Order(o ...OrderFunc) *AgentStatusQuery { +// Order specifies how the records should be ordered. +func (asq *AgentStatusQuery) Order(o ...agentstatus.OrderOption) *AgentStatusQuery { asq.order = append(asq.order, o...) return asq } // QueryAgentStatusToProvisionedHost chains the current query on the "AgentStatusToProvisionedHost" edge. func (asq *AgentStatusQuery) QueryAgentStatusToProvisionedHost() *ProvisionedHostQuery { - query := &ProvisionedHostQuery{config: asq.config} + query := (&ProvisionedHostClient{config: asq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := asq.prepareQuery(ctx); err != nil { return nil, err @@ -93,7 +91,7 @@ func (asq *AgentStatusQuery) QueryAgentStatusToProvisionedHost() *ProvisionedHos // QueryAgentStatusToProvisionedNetwork chains the current query on the "AgentStatusToProvisionedNetwork" edge. func (asq *AgentStatusQuery) QueryAgentStatusToProvisionedNetwork() *ProvisionedNetworkQuery { - query := &ProvisionedNetworkQuery{config: asq.config} + query := (&ProvisionedNetworkClient{config: asq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := asq.prepareQuery(ctx); err != nil { return nil, err @@ -115,7 +113,7 @@ func (asq *AgentStatusQuery) QueryAgentStatusToProvisionedNetwork() *Provisioned // QueryAgentStatusToBuild chains the current query on the "AgentStatusToBuild" edge. func (asq *AgentStatusQuery) QueryAgentStatusToBuild() *BuildQuery { - query := &BuildQuery{config: asq.config} + query := (&BuildClient{config: asq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := asq.prepareQuery(ctx); err != nil { return nil, err @@ -138,7 +136,7 @@ func (asq *AgentStatusQuery) QueryAgentStatusToBuild() *BuildQuery { // First returns the first AgentStatus entity from the query. // Returns a *NotFoundError when no AgentStatus was found. func (asq *AgentStatusQuery) First(ctx context.Context) (*AgentStatus, error) { - nodes, err := asq.Limit(1).All(ctx) + nodes, err := asq.Limit(1).All(setContextOp(ctx, asq.ctx, "First")) if err != nil { return nil, err } @@ -161,7 +159,7 @@ func (asq *AgentStatusQuery) FirstX(ctx context.Context) *AgentStatus { // Returns a *NotFoundError when no AgentStatus ID was found. func (asq *AgentStatusQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = asq.Limit(1).IDs(ctx); err != nil { + if ids, err = asq.Limit(1).IDs(setContextOp(ctx, asq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -184,7 +182,7 @@ func (asq *AgentStatusQuery) FirstIDX(ctx context.Context) uuid.UUID { // Returns a *NotSingularError when more than one AgentStatus entity is found. // Returns a *NotFoundError when no AgentStatus entities are found. func (asq *AgentStatusQuery) Only(ctx context.Context) (*AgentStatus, error) { - nodes, err := asq.Limit(2).All(ctx) + nodes, err := asq.Limit(2).All(setContextOp(ctx, asq.ctx, "Only")) if err != nil { return nil, err } @@ -212,7 +210,7 @@ func (asq *AgentStatusQuery) OnlyX(ctx context.Context) *AgentStatus { // Returns a *NotFoundError when no entities are found. func (asq *AgentStatusQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = asq.Limit(2).IDs(ctx); err != nil { + if ids, err = asq.Limit(2).IDs(setContextOp(ctx, asq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -237,10 +235,12 @@ func (asq *AgentStatusQuery) OnlyIDX(ctx context.Context) uuid.UUID { // All executes the query and returns a list of AgentStatusSlice. func (asq *AgentStatusQuery) All(ctx context.Context) ([]*AgentStatus, error) { + ctx = setContextOp(ctx, asq.ctx, "All") if err := asq.prepareQuery(ctx); err != nil { return nil, err } - return asq.sqlAll(ctx) + qr := querierAll[[]*AgentStatus, *AgentStatusQuery]() + return withInterceptors[[]*AgentStatus](ctx, asq, qr, asq.inters) } // AllX is like All, but panics if an error occurs. @@ -253,9 +253,12 @@ func (asq *AgentStatusQuery) AllX(ctx context.Context) []*AgentStatus { } // IDs executes the query and returns a list of AgentStatus IDs. -func (asq *AgentStatusQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { - var ids []uuid.UUID - if err := asq.Select(agentstatus.FieldID).Scan(ctx, &ids); err != nil { +func (asq *AgentStatusQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if asq.ctx.Unique == nil && asq.path != nil { + asq.Unique(true) + } + ctx = setContextOp(ctx, asq.ctx, "IDs") + if err = asq.Select(agentstatus.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -272,10 +275,11 @@ func (asq *AgentStatusQuery) IDsX(ctx context.Context) []uuid.UUID { // Count returns the count of the given query. func (asq *AgentStatusQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, asq.ctx, "Count") if err := asq.prepareQuery(ctx); err != nil { return 0, err } - return asq.sqlCount(ctx) + return withInterceptors[int](ctx, asq, querierCount[*AgentStatusQuery](), asq.inters) } // CountX is like Count, but panics if an error occurs. @@ -289,10 +293,15 @@ func (asq *AgentStatusQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (asq *AgentStatusQuery) Exist(ctx context.Context) (bool, error) { - if err := asq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, asq.ctx, "Exist") + switch _, err := asq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return asq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -312,24 +321,23 @@ func (asq *AgentStatusQuery) Clone() *AgentStatusQuery { } return &AgentStatusQuery{ config: asq.config, - limit: asq.limit, - offset: asq.offset, - order: append([]OrderFunc{}, asq.order...), + ctx: asq.ctx.Clone(), + order: append([]agentstatus.OrderOption{}, asq.order...), + inters: append([]Interceptor{}, asq.inters...), predicates: append([]predicate.AgentStatus{}, asq.predicates...), withAgentStatusToProvisionedHost: asq.withAgentStatusToProvisionedHost.Clone(), withAgentStatusToProvisionedNetwork: asq.withAgentStatusToProvisionedNetwork.Clone(), withAgentStatusToBuild: asq.withAgentStatusToBuild.Clone(), // clone intermediate query. - sql: asq.sql.Clone(), - path: asq.path, - unique: asq.unique, + sql: asq.sql.Clone(), + path: asq.path, } } // WithAgentStatusToProvisionedHost tells the query-builder to eager-load the nodes that are connected to // the "AgentStatusToProvisionedHost" edge. The optional arguments are used to configure the query builder of the edge. func (asq *AgentStatusQuery) WithAgentStatusToProvisionedHost(opts ...func(*ProvisionedHostQuery)) *AgentStatusQuery { - query := &ProvisionedHostQuery{config: asq.config} + query := (&ProvisionedHostClient{config: asq.config}).Query() for _, opt := range opts { opt(query) } @@ -340,7 +348,7 @@ func (asq *AgentStatusQuery) WithAgentStatusToProvisionedHost(opts ...func(*Prov // WithAgentStatusToProvisionedNetwork tells the query-builder to eager-load the nodes that are connected to // the "AgentStatusToProvisionedNetwork" edge. The optional arguments are used to configure the query builder of the edge. func (asq *AgentStatusQuery) WithAgentStatusToProvisionedNetwork(opts ...func(*ProvisionedNetworkQuery)) *AgentStatusQuery { - query := &ProvisionedNetworkQuery{config: asq.config} + query := (&ProvisionedNetworkClient{config: asq.config}).Query() for _, opt := range opts { opt(query) } @@ -351,7 +359,7 @@ func (asq *AgentStatusQuery) WithAgentStatusToProvisionedNetwork(opts ...func(*P // WithAgentStatusToBuild tells the query-builder to eager-load the nodes that are connected to // the "AgentStatusToBuild" edge. The optional arguments are used to configure the query builder of the edge. func (asq *AgentStatusQuery) WithAgentStatusToBuild(opts ...func(*BuildQuery)) *AgentStatusQuery { - query := &BuildQuery{config: asq.config} + query := (&BuildClient{config: asq.config}).Query() for _, opt := range opts { opt(query) } @@ -373,17 +381,13 @@ func (asq *AgentStatusQuery) WithAgentStatusToBuild(opts ...func(*BuildQuery)) * // GroupBy(agentstatus.FieldClientID). // Aggregate(ent.Count()). // Scan(ctx, &v) -// func (asq *AgentStatusQuery) GroupBy(field string, fields ...string) *AgentStatusGroupBy { - group := &AgentStatusGroupBy{config: asq.config} - group.fields = append([]string{field}, fields...) - group.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := asq.prepareQuery(ctx); err != nil { - return nil, err - } - return asq.sqlQuery(ctx), nil - } - return group + asq.ctx.Fields = append([]string{field}, fields...) + grbuild := &AgentStatusGroupBy{build: asq} + grbuild.flds = &asq.ctx.Fields + grbuild.label = agentstatus.Label + grbuild.scan = grbuild.Scan + return grbuild } // Select allows the selection one or more fields/columns for the given query, @@ -398,14 +402,31 @@ func (asq *AgentStatusQuery) GroupBy(field string, fields ...string) *AgentStatu // client.AgentStatus.Query(). // Select(agentstatus.FieldClientID). // Scan(ctx, &v) -// func (asq *AgentStatusQuery) Select(fields ...string) *AgentStatusSelect { - asq.fields = append(asq.fields, fields...) - return &AgentStatusSelect{AgentStatusQuery: asq} + asq.ctx.Fields = append(asq.ctx.Fields, fields...) + sbuild := &AgentStatusSelect{AgentStatusQuery: asq} + sbuild.label = agentstatus.Label + sbuild.flds, sbuild.scan = &asq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a AgentStatusSelect configured with the given aggregations. +func (asq *AgentStatusQuery) Aggregate(fns ...AggregateFunc) *AgentStatusSelect { + return asq.Select().Aggregate(fns...) } func (asq *AgentStatusQuery) prepareQuery(ctx context.Context) error { - for _, f := range asq.fields { + for _, inter := range asq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, asq); err != nil { + return err + } + } + } + for _, f := range asq.ctx.Fields { if !agentstatus.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -420,7 +441,7 @@ func (asq *AgentStatusQuery) prepareQuery(ctx context.Context) error { return nil } -func (asq *AgentStatusQuery) sqlAll(ctx context.Context) ([]*AgentStatus, error) { +func (asq *AgentStatusQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*AgentStatus, error) { var ( nodes = []*AgentStatus{} withFKs = asq.withFKs @@ -437,150 +458,171 @@ func (asq *AgentStatusQuery) sqlAll(ctx context.Context) ([]*AgentStatus, error) if withFKs { _spec.Node.Columns = append(_spec.Node.Columns, agentstatus.ForeignKeys...) } - _spec.ScanValues = func(columns []string) ([]interface{}, error) { + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*AgentStatus).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { node := &AgentStatus{config: asq.config} nodes = append(nodes, node) - return node.scanValues(columns) - } - _spec.Assign = func(columns []string, values []interface{}) error { - if len(nodes) == 0 { - return fmt.Errorf("ent: Assign called without calling ScanValues") - } - node := nodes[len(nodes)-1] node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(asq.modifiers) > 0 { + _spec.Modifiers = asq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } if err := sqlgraph.QueryNodes(ctx, asq.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := asq.withAgentStatusToProvisionedHost; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*AgentStatus) - for i := range nodes { - if nodes[i].agent_status_agent_status_to_provisioned_host == nil { - continue - } - fk := *nodes[i].agent_status_agent_status_to_provisioned_host - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + if err := asq.loadAgentStatusToProvisionedHost(ctx, query, nodes, nil, + func(n *AgentStatus, e *ProvisionedHost) { n.Edges.AgentStatusToProvisionedHost = e }); err != nil { + return nil, err } - query.Where(provisionedhost.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + } + if query := asq.withAgentStatusToProvisionedNetwork; query != nil { + if err := asq.loadAgentStatusToProvisionedNetwork(ctx, query, nodes, nil, + func(n *AgentStatus, e *ProvisionedNetwork) { n.Edges.AgentStatusToProvisionedNetwork = e }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "agent_status_agent_status_to_provisioned_host" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.AgentStatusToProvisionedHost = n - } + } + if query := asq.withAgentStatusToBuild; query != nil { + if err := asq.loadAgentStatusToBuild(ctx, query, nodes, nil, + func(n *AgentStatus, e *Build) { n.Edges.AgentStatusToBuild = e }); err != nil { + return nil, err + } + } + for i := range asq.loadTotal { + if err := asq.loadTotal[i](ctx, nodes); err != nil { + return nil, err } } + return nodes, nil +} - if query := asq.withAgentStatusToProvisionedNetwork; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*AgentStatus) +func (asq *AgentStatusQuery) loadAgentStatusToProvisionedHost(ctx context.Context, query *ProvisionedHostQuery, nodes []*AgentStatus, init func(*AgentStatus), assign func(*AgentStatus, *ProvisionedHost)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*AgentStatus) + for i := range nodes { + if nodes[i].agent_status_agent_status_to_provisioned_host == nil { + continue + } + fk := *nodes[i].agent_status_agent_status_to_provisioned_host + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(provisionedhost.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "agent_status_agent_status_to_provisioned_host" returned %v`, n.ID) + } for i := range nodes { - if nodes[i].agent_status_agent_status_to_provisioned_network == nil { - continue - } - fk := *nodes[i].agent_status_agent_status_to_provisioned_network - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + assign(nodes[i], n) } - query.Where(provisionednetwork.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + } + return nil +} +func (asq *AgentStatusQuery) loadAgentStatusToProvisionedNetwork(ctx context.Context, query *ProvisionedNetworkQuery, nodes []*AgentStatus, init func(*AgentStatus), assign func(*AgentStatus, *ProvisionedNetwork)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*AgentStatus) + for i := range nodes { + if nodes[i].agent_status_agent_status_to_provisioned_network == nil { + continue } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "agent_status_agent_status_to_provisioned_network" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.AgentStatusToProvisionedNetwork = n - } + fk := *nodes[i].agent_status_agent_status_to_provisioned_network + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) } + nodeids[fk] = append(nodeids[fk], nodes[i]) } - - if query := asq.withAgentStatusToBuild; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*AgentStatus) + if len(ids) == 0 { + return nil + } + query.Where(provisionednetwork.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "agent_status_agent_status_to_provisioned_network" returned %v`, n.ID) + } for i := range nodes { - if nodes[i].agent_status_agent_status_to_build == nil { - continue - } - fk := *nodes[i].agent_status_agent_status_to_build - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + assign(nodes[i], n) } - query.Where(build.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + } + return nil +} +func (asq *AgentStatusQuery) loadAgentStatusToBuild(ctx context.Context, query *BuildQuery, nodes []*AgentStatus, init func(*AgentStatus), assign func(*AgentStatus, *Build)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*AgentStatus) + for i := range nodes { + if nodes[i].agent_status_agent_status_to_build == nil { + continue } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "agent_status_agent_status_to_build" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.AgentStatusToBuild = n - } + fk := *nodes[i].agent_status_agent_status_to_build + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) } + nodeids[fk] = append(nodeids[fk], nodes[i]) } - - return nodes, nil + if len(ids) == 0 { + return nil + } + query.Where(build.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "agent_status_agent_status_to_build" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil } func (asq *AgentStatusQuery) sqlCount(ctx context.Context) (int, error) { _spec := asq.querySpec() - _spec.Node.Columns = asq.fields - if len(asq.fields) > 0 { - _spec.Unique = asq.unique != nil && *asq.unique + if len(asq.modifiers) > 0 { + _spec.Modifiers = asq.modifiers } - return sqlgraph.CountNodes(ctx, asq.driver, _spec) -} - -func (asq *AgentStatusQuery) sqlExist(ctx context.Context) (bool, error) { - n, err := asq.sqlCount(ctx) - if err != nil { - return false, fmt.Errorf("ent: check existence: %w", err) + _spec.Node.Columns = asq.ctx.Fields + if len(asq.ctx.Fields) > 0 { + _spec.Unique = asq.ctx.Unique != nil && *asq.ctx.Unique } - return n > 0, nil + return sqlgraph.CountNodes(ctx, asq.driver, _spec) } func (asq *AgentStatusQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: agentstatus.Table, - Columns: agentstatus.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: agentstatus.FieldID, - }, - }, - From: asq.sql, - Unique: true, - } - if unique := asq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(agentstatus.Table, agentstatus.Columns, sqlgraph.NewFieldSpec(agentstatus.FieldID, field.TypeUUID)) + _spec.From = asq.sql + if unique := asq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if asq.path != nil { + _spec.Unique = true } - if fields := asq.fields; len(fields) > 0 { + if fields := asq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, agentstatus.FieldID) for i := range fields { @@ -596,10 +638,10 @@ func (asq *AgentStatusQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := asq.limit; limit != nil { + if limit := asq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := asq.offset; offset != nil { + if offset := asq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := asq.order; len(ps) > 0 { @@ -615,7 +657,7 @@ func (asq *AgentStatusQuery) querySpec() *sqlgraph.QuerySpec { func (asq *AgentStatusQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(asq.driver.Dialect()) t1 := builder.Table(agentstatus.Table) - columns := asq.fields + columns := asq.ctx.Fields if len(columns) == 0 { columns = agentstatus.Columns } @@ -624,7 +666,7 @@ func (asq *AgentStatusQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = asq.sql selector.Select(selector.Columns(columns...)...) } - if asq.unique != nil && *asq.unique { + if asq.ctx.Unique != nil && *asq.ctx.Unique { selector.Distinct() } for _, p := range asq.predicates { @@ -633,12 +675,12 @@ func (asq *AgentStatusQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range asq.order { p(selector) } - if offset := asq.offset; offset != nil { + if offset := asq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := asq.limit; limit != nil { + if limit := asq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -646,12 +688,8 @@ func (asq *AgentStatusQuery) sqlQuery(ctx context.Context) *sql.Selector { // AgentStatusGroupBy is the group-by builder for AgentStatus entities. type AgentStatusGroupBy struct { - config - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) + selector + build *AgentStatusQuery } // Aggregate adds the given aggregation functions to the group-by query. @@ -660,471 +698,77 @@ func (asgb *AgentStatusGroupBy) Aggregate(fns ...AggregateFunc) *AgentStatusGrou return asgb } -// Scan applies the group-by query and scans the result into the given value. -func (asgb *AgentStatusGroupBy) Scan(ctx context.Context, v interface{}) error { - query, err := asgb.path(ctx) - if err != nil { +// Scan applies the selector query and scans the result into the given value. +func (asgb *AgentStatusGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, asgb.build.ctx, "GroupBy") + if err := asgb.build.prepareQuery(ctx); err != nil { return err } - asgb.sql = query - return asgb.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (asgb *AgentStatusGroupBy) ScanX(ctx context.Context, v interface{}) { - if err := asgb.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from group-by. -// It is only allowed when executing a group-by query with one field. -func (asgb *AgentStatusGroupBy) Strings(ctx context.Context) ([]string, error) { - if len(asgb.fields) > 1 { - return nil, errors.New("ent: AgentStatusGroupBy.Strings is not achievable when grouping more than 1 field") - } - var v []string - if err := asgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (asgb *AgentStatusGroupBy) StringsX(ctx context.Context) []string { - v, err := asgb.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (asgb *AgentStatusGroupBy) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = asgb.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{agentstatus.Label} - default: - err = fmt.Errorf("ent: AgentStatusGroupBy.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (asgb *AgentStatusGroupBy) StringX(ctx context.Context) string { - v, err := asgb.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from group-by. -// It is only allowed when executing a group-by query with one field. -func (asgb *AgentStatusGroupBy) Ints(ctx context.Context) ([]int, error) { - if len(asgb.fields) > 1 { - return nil, errors.New("ent: AgentStatusGroupBy.Ints is not achievable when grouping more than 1 field") - } - var v []int - if err := asgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil + return scanWithInterceptors[*AgentStatusQuery, *AgentStatusGroupBy](ctx, asgb.build, asgb, asgb.build.inters, v) } -// IntsX is like Ints, but panics if an error occurs. -func (asgb *AgentStatusGroupBy) IntsX(ctx context.Context) []int { - v, err := asgb.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (asgb *AgentStatusGroupBy) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = asgb.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{agentstatus.Label} - default: - err = fmt.Errorf("ent: AgentStatusGroupBy.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (asgb *AgentStatusGroupBy) IntX(ctx context.Context) int { - v, err := asgb.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from group-by. -// It is only allowed when executing a group-by query with one field. -func (asgb *AgentStatusGroupBy) Float64s(ctx context.Context) ([]float64, error) { - if len(asgb.fields) > 1 { - return nil, errors.New("ent: AgentStatusGroupBy.Float64s is not achievable when grouping more than 1 field") - } - var v []float64 - if err := asgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (asgb *AgentStatusGroupBy) Float64sX(ctx context.Context) []float64 { - v, err := asgb.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (asgb *AgentStatusGroupBy) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = asgb.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{agentstatus.Label} - default: - err = fmt.Errorf("ent: AgentStatusGroupBy.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (asgb *AgentStatusGroupBy) Float64X(ctx context.Context) float64 { - v, err := asgb.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from group-by. -// It is only allowed when executing a group-by query with one field. -func (asgb *AgentStatusGroupBy) Bools(ctx context.Context) ([]bool, error) { - if len(asgb.fields) > 1 { - return nil, errors.New("ent: AgentStatusGroupBy.Bools is not achievable when grouping more than 1 field") - } - var v []bool - if err := asgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (asgb *AgentStatusGroupBy) BoolsX(ctx context.Context) []bool { - v, err := asgb.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (asgb *AgentStatusGroupBy) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = asgb.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{agentstatus.Label} - default: - err = fmt.Errorf("ent: AgentStatusGroupBy.Bools returned %d results when one was expected", len(v)) - } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (asgb *AgentStatusGroupBy) BoolX(ctx context.Context) bool { - v, err := asgb.Bool(ctx) - if err != nil { - panic(err) +func (asgb *AgentStatusGroupBy) sqlScan(ctx context.Context, root *AgentStatusQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(asgb.fns)) + for _, fn := range asgb.fns { + aggregation = append(aggregation, fn(selector)) } - return v -} - -func (asgb *AgentStatusGroupBy) sqlScan(ctx context.Context, v interface{}) error { - for _, f := range asgb.fields { - if !agentstatus.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*asgb.flds)+len(asgb.fns)) + for _, f := range *asgb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := asgb.sqlQuery() + selector.GroupBy(selector.Columns(*asgb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := asgb.driver.Query(ctx, query, args, rows); err != nil { + if err := asgb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (asgb *AgentStatusGroupBy) sqlQuery() *sql.Selector { - selector := asgb.sql.Select() - aggregation := make([]string, 0, len(asgb.fns)) - for _, fn := range asgb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(asgb.fields)+len(asgb.fns)) - for _, f := range asgb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(asgb.fields...)...) -} - // AgentStatusSelect is the builder for selecting fields of AgentStatus entities. type AgentStatusSelect struct { *AgentStatusQuery - // intermediate query (i.e. traversal path). - sql *sql.Selector + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (ass *AgentStatusSelect) Aggregate(fns ...AggregateFunc) *AgentStatusSelect { + ass.fns = append(ass.fns, fns...) + return ass } // Scan applies the selector query and scans the result into the given value. -func (ass *AgentStatusSelect) Scan(ctx context.Context, v interface{}) error { +func (ass *AgentStatusSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ass.ctx, "Select") if err := ass.prepareQuery(ctx); err != nil { return err } - ass.sql = ass.AgentStatusQuery.sqlQuery(ctx) - return ass.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (ass *AgentStatusSelect) ScanX(ctx context.Context, v interface{}) { - if err := ass.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from a selector. It is only allowed when selecting one field. -func (ass *AgentStatusSelect) Strings(ctx context.Context) ([]string, error) { - if len(ass.fields) > 1 { - return nil, errors.New("ent: AgentStatusSelect.Strings is not achievable when selecting more than 1 field") - } - var v []string - if err := ass.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (ass *AgentStatusSelect) StringsX(ctx context.Context) []string { - v, err := ass.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a selector. It is only allowed when selecting one field. -func (ass *AgentStatusSelect) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = ass.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{agentstatus.Label} - default: - err = fmt.Errorf("ent: AgentStatusSelect.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (ass *AgentStatusSelect) StringX(ctx context.Context) string { - v, err := ass.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from a selector. It is only allowed when selecting one field. -func (ass *AgentStatusSelect) Ints(ctx context.Context) ([]int, error) { - if len(ass.fields) > 1 { - return nil, errors.New("ent: AgentStatusSelect.Ints is not achievable when selecting more than 1 field") - } - var v []int - if err := ass.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (ass *AgentStatusSelect) IntsX(ctx context.Context) []int { - v, err := ass.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a selector. It is only allowed when selecting one field. -func (ass *AgentStatusSelect) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = ass.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{agentstatus.Label} - default: - err = fmt.Errorf("ent: AgentStatusSelect.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (ass *AgentStatusSelect) IntX(ctx context.Context) int { - v, err := ass.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. -func (ass *AgentStatusSelect) Float64s(ctx context.Context) ([]float64, error) { - if len(ass.fields) > 1 { - return nil, errors.New("ent: AgentStatusSelect.Float64s is not achievable when selecting more than 1 field") - } - var v []float64 - if err := ass.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (ass *AgentStatusSelect) Float64sX(ctx context.Context) []float64 { - v, err := ass.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. -func (ass *AgentStatusSelect) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = ass.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{agentstatus.Label} - default: - err = fmt.Errorf("ent: AgentStatusSelect.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (ass *AgentStatusSelect) Float64X(ctx context.Context) float64 { - v, err := ass.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from a selector. It is only allowed when selecting one field. -func (ass *AgentStatusSelect) Bools(ctx context.Context) ([]bool, error) { - if len(ass.fields) > 1 { - return nil, errors.New("ent: AgentStatusSelect.Bools is not achievable when selecting more than 1 field") - } - var v []bool - if err := ass.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil + return scanWithInterceptors[*AgentStatusQuery, *AgentStatusSelect](ctx, ass.AgentStatusQuery, ass, ass.inters, v) } -// BoolsX is like Bools, but panics if an error occurs. -func (ass *AgentStatusSelect) BoolsX(ctx context.Context) []bool { - v, err := ass.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a selector. It is only allowed when selecting one field. -func (ass *AgentStatusSelect) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = ass.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{agentstatus.Label} - default: - err = fmt.Errorf("ent: AgentStatusSelect.Bools returned %d results when one was expected", len(v)) +func (ass *AgentStatusSelect) sqlScan(ctx context.Context, root *AgentStatusQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(ass.fns)) + for _, fn := range ass.fns { + aggregation = append(aggregation, fn(selector)) } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (ass *AgentStatusSelect) BoolX(ctx context.Context) bool { - v, err := ass.Bool(ctx) - if err != nil { - panic(err) + switch n := len(*ass.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) } - return v -} - -func (ass *AgentStatusSelect) sqlScan(ctx context.Context, v interface{}) error { rows := &sql.Rows{} - query, args := ass.sql.Query() + query, args := selector.Query() if err := ass.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/ent/agentstatus_update.go b/ent/agentstatus_update.go index 18f952a8..48c77364 100755 --- a/ent/agentstatus_update.go +++ b/ent/agentstatus_update.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -37,12 +37,28 @@ func (asu *AgentStatusUpdate) SetClientID(s string) *AgentStatusUpdate { return asu } +// SetNillableClientID sets the "ClientID" field if the given value is not nil. +func (asu *AgentStatusUpdate) SetNillableClientID(s *string) *AgentStatusUpdate { + if s != nil { + asu.SetClientID(*s) + } + return asu +} + // SetHostname sets the "Hostname" field. func (asu *AgentStatusUpdate) SetHostname(s string) *AgentStatusUpdate { asu.mutation.SetHostname(s) return asu } +// SetNillableHostname sets the "Hostname" field if the given value is not nil. +func (asu *AgentStatusUpdate) SetNillableHostname(s *string) *AgentStatusUpdate { + if s != nil { + asu.SetHostname(*s) + } + return asu +} + // SetUpTime sets the "UpTime" field. func (asu *AgentStatusUpdate) SetUpTime(i int64) *AgentStatusUpdate { asu.mutation.ResetUpTime() @@ -50,6 +66,14 @@ func (asu *AgentStatusUpdate) SetUpTime(i int64) *AgentStatusUpdate { return asu } +// SetNillableUpTime sets the "UpTime" field if the given value is not nil. +func (asu *AgentStatusUpdate) SetNillableUpTime(i *int64) *AgentStatusUpdate { + if i != nil { + asu.SetUpTime(*i) + } + return asu +} + // AddUpTime adds i to the "UpTime" field. func (asu *AgentStatusUpdate) AddUpTime(i int64) *AgentStatusUpdate { asu.mutation.AddUpTime(i) @@ -63,6 +87,14 @@ func (asu *AgentStatusUpdate) SetBootTime(i int64) *AgentStatusUpdate { return asu } +// SetNillableBootTime sets the "BootTime" field if the given value is not nil. +func (asu *AgentStatusUpdate) SetNillableBootTime(i *int64) *AgentStatusUpdate { + if i != nil { + asu.SetBootTime(*i) + } + return asu +} + // AddBootTime adds i to the "BootTime" field. func (asu *AgentStatusUpdate) AddBootTime(i int64) *AgentStatusUpdate { asu.mutation.AddBootTime(i) @@ -76,6 +108,14 @@ func (asu *AgentStatusUpdate) SetNumProcs(i int64) *AgentStatusUpdate { return asu } +// SetNillableNumProcs sets the "NumProcs" field if the given value is not nil. +func (asu *AgentStatusUpdate) SetNillableNumProcs(i *int64) *AgentStatusUpdate { + if i != nil { + asu.SetNumProcs(*i) + } + return asu +} + // AddNumProcs adds i to the "NumProcs" field. func (asu *AgentStatusUpdate) AddNumProcs(i int64) *AgentStatusUpdate { asu.mutation.AddNumProcs(i) @@ -88,12 +128,28 @@ func (asu *AgentStatusUpdate) SetOs(s string) *AgentStatusUpdate { return asu } +// SetNillableOs sets the "Os" field if the given value is not nil. +func (asu *AgentStatusUpdate) SetNillableOs(s *string) *AgentStatusUpdate { + if s != nil { + asu.SetOs(*s) + } + return asu +} + // SetHostID sets the "HostID" field. func (asu *AgentStatusUpdate) SetHostID(s string) *AgentStatusUpdate { asu.mutation.SetHostID(s) return asu } +// SetNillableHostID sets the "HostID" field if the given value is not nil. +func (asu *AgentStatusUpdate) SetNillableHostID(s *string) *AgentStatusUpdate { + if s != nil { + asu.SetHostID(*s) + } + return asu +} + // SetLoad1 sets the "Load1" field. func (asu *AgentStatusUpdate) SetLoad1(f float64) *AgentStatusUpdate { asu.mutation.ResetLoad1() @@ -101,6 +157,14 @@ func (asu *AgentStatusUpdate) SetLoad1(f float64) *AgentStatusUpdate { return asu } +// SetNillableLoad1 sets the "Load1" field if the given value is not nil. +func (asu *AgentStatusUpdate) SetNillableLoad1(f *float64) *AgentStatusUpdate { + if f != nil { + asu.SetLoad1(*f) + } + return asu +} + // AddLoad1 adds f to the "Load1" field. func (asu *AgentStatusUpdate) AddLoad1(f float64) *AgentStatusUpdate { asu.mutation.AddLoad1(f) @@ -114,6 +178,14 @@ func (asu *AgentStatusUpdate) SetLoad5(f float64) *AgentStatusUpdate { return asu } +// SetNillableLoad5 sets the "Load5" field if the given value is not nil. +func (asu *AgentStatusUpdate) SetNillableLoad5(f *float64) *AgentStatusUpdate { + if f != nil { + asu.SetLoad5(*f) + } + return asu +} + // AddLoad5 adds f to the "Load5" field. func (asu *AgentStatusUpdate) AddLoad5(f float64) *AgentStatusUpdate { asu.mutation.AddLoad5(f) @@ -127,6 +199,14 @@ func (asu *AgentStatusUpdate) SetLoad15(f float64) *AgentStatusUpdate { return asu } +// SetNillableLoad15 sets the "Load15" field if the given value is not nil. +func (asu *AgentStatusUpdate) SetNillableLoad15(f *float64) *AgentStatusUpdate { + if f != nil { + asu.SetLoad15(*f) + } + return asu +} + // AddLoad15 adds f to the "Load15" field. func (asu *AgentStatusUpdate) AddLoad15(f float64) *AgentStatusUpdate { asu.mutation.AddLoad15(f) @@ -140,6 +220,14 @@ func (asu *AgentStatusUpdate) SetTotalMem(i int64) *AgentStatusUpdate { return asu } +// SetNillableTotalMem sets the "TotalMem" field if the given value is not nil. +func (asu *AgentStatusUpdate) SetNillableTotalMem(i *int64) *AgentStatusUpdate { + if i != nil { + asu.SetTotalMem(*i) + } + return asu +} + // AddTotalMem adds i to the "TotalMem" field. func (asu *AgentStatusUpdate) AddTotalMem(i int64) *AgentStatusUpdate { asu.mutation.AddTotalMem(i) @@ -153,6 +241,14 @@ func (asu *AgentStatusUpdate) SetFreeMem(i int64) *AgentStatusUpdate { return asu } +// SetNillableFreeMem sets the "FreeMem" field if the given value is not nil. +func (asu *AgentStatusUpdate) SetNillableFreeMem(i *int64) *AgentStatusUpdate { + if i != nil { + asu.SetFreeMem(*i) + } + return asu +} + // AddFreeMem adds i to the "FreeMem" field. func (asu *AgentStatusUpdate) AddFreeMem(i int64) *AgentStatusUpdate { asu.mutation.AddFreeMem(i) @@ -166,6 +262,14 @@ func (asu *AgentStatusUpdate) SetUsedMem(i int64) *AgentStatusUpdate { return asu } +// SetNillableUsedMem sets the "UsedMem" field if the given value is not nil. +func (asu *AgentStatusUpdate) SetNillableUsedMem(i *int64) *AgentStatusUpdate { + if i != nil { + asu.SetUsedMem(*i) + } + return asu +} + // AddUsedMem adds i to the "UsedMem" field. func (asu *AgentStatusUpdate) AddUsedMem(i int64) *AgentStatusUpdate { asu.mutation.AddUsedMem(i) @@ -179,6 +283,14 @@ func (asu *AgentStatusUpdate) SetTimestamp(i int64) *AgentStatusUpdate { return asu } +// SetNillableTimestamp sets the "Timestamp" field if the given value is not nil. +func (asu *AgentStatusUpdate) SetNillableTimestamp(i *int64) *AgentStatusUpdate { + if i != nil { + asu.SetTimestamp(*i) + } + return asu +} + // AddTimestamp adds i to the "Timestamp" field. func (asu *AgentStatusUpdate) AddTimestamp(i int64) *AgentStatusUpdate { asu.mutation.AddTimestamp(i) @@ -267,34 +379,7 @@ func (asu *AgentStatusUpdate) ClearAgentStatusToBuild() *AgentStatusUpdate { // Save executes the query and returns the number of nodes affected by the update operation. func (asu *AgentStatusUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(asu.hooks) == 0 { - affected, err = asu.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*AgentStatusMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - asu.mutation = mutation - affected, err = asu.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(asu.hooks) - 1; i >= 0; i-- { - if asu.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = asu.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, asu.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, asu.sqlSave, asu.mutation, asu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -320,16 +405,7 @@ func (asu *AgentStatusUpdate) ExecX(ctx context.Context) { } func (asu *AgentStatusUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: agentstatus.Table, - Columns: agentstatus.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: agentstatus.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(agentstatus.Table, agentstatus.Columns, sqlgraph.NewFieldSpec(agentstatus.FieldID, field.TypeUUID)) if ps := asu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -338,172 +414,76 @@ func (asu *AgentStatusUpdate) sqlSave(ctx context.Context) (n int, err error) { } } if value, ok := asu.mutation.ClientID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: agentstatus.FieldClientID, - }) + _spec.SetField(agentstatus.FieldClientID, field.TypeString, value) } if value, ok := asu.mutation.Hostname(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: agentstatus.FieldHostname, - }) + _spec.SetField(agentstatus.FieldHostname, field.TypeString, value) } if value, ok := asu.mutation.UpTime(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: agentstatus.FieldUpTime, - }) + _spec.SetField(agentstatus.FieldUpTime, field.TypeInt64, value) } if value, ok := asu.mutation.AddedUpTime(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: agentstatus.FieldUpTime, - }) + _spec.AddField(agentstatus.FieldUpTime, field.TypeInt64, value) } if value, ok := asu.mutation.BootTime(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: agentstatus.FieldBootTime, - }) + _spec.SetField(agentstatus.FieldBootTime, field.TypeInt64, value) } if value, ok := asu.mutation.AddedBootTime(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: agentstatus.FieldBootTime, - }) + _spec.AddField(agentstatus.FieldBootTime, field.TypeInt64, value) } if value, ok := asu.mutation.NumProcs(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: agentstatus.FieldNumProcs, - }) + _spec.SetField(agentstatus.FieldNumProcs, field.TypeInt64, value) } if value, ok := asu.mutation.AddedNumProcs(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: agentstatus.FieldNumProcs, - }) + _spec.AddField(agentstatus.FieldNumProcs, field.TypeInt64, value) } if value, ok := asu.mutation.Os(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: agentstatus.FieldOs, - }) + _spec.SetField(agentstatus.FieldOs, field.TypeString, value) } if value, ok := asu.mutation.HostID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: agentstatus.FieldHostID, - }) + _spec.SetField(agentstatus.FieldHostID, field.TypeString, value) } if value, ok := asu.mutation.Load1(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeFloat64, - Value: value, - Column: agentstatus.FieldLoad1, - }) + _spec.SetField(agentstatus.FieldLoad1, field.TypeFloat64, value) } if value, ok := asu.mutation.AddedLoad1(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeFloat64, - Value: value, - Column: agentstatus.FieldLoad1, - }) + _spec.AddField(agentstatus.FieldLoad1, field.TypeFloat64, value) } if value, ok := asu.mutation.Load5(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeFloat64, - Value: value, - Column: agentstatus.FieldLoad5, - }) + _spec.SetField(agentstatus.FieldLoad5, field.TypeFloat64, value) } if value, ok := asu.mutation.AddedLoad5(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeFloat64, - Value: value, - Column: agentstatus.FieldLoad5, - }) + _spec.AddField(agentstatus.FieldLoad5, field.TypeFloat64, value) } if value, ok := asu.mutation.Load15(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeFloat64, - Value: value, - Column: agentstatus.FieldLoad15, - }) + _spec.SetField(agentstatus.FieldLoad15, field.TypeFloat64, value) } if value, ok := asu.mutation.AddedLoad15(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeFloat64, - Value: value, - Column: agentstatus.FieldLoad15, - }) + _spec.AddField(agentstatus.FieldLoad15, field.TypeFloat64, value) } if value, ok := asu.mutation.TotalMem(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: agentstatus.FieldTotalMem, - }) + _spec.SetField(agentstatus.FieldTotalMem, field.TypeInt64, value) } if value, ok := asu.mutation.AddedTotalMem(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: agentstatus.FieldTotalMem, - }) + _spec.AddField(agentstatus.FieldTotalMem, field.TypeInt64, value) } if value, ok := asu.mutation.FreeMem(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: agentstatus.FieldFreeMem, - }) + _spec.SetField(agentstatus.FieldFreeMem, field.TypeInt64, value) } if value, ok := asu.mutation.AddedFreeMem(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: agentstatus.FieldFreeMem, - }) + _spec.AddField(agentstatus.FieldFreeMem, field.TypeInt64, value) } if value, ok := asu.mutation.UsedMem(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: agentstatus.FieldUsedMem, - }) + _spec.SetField(agentstatus.FieldUsedMem, field.TypeInt64, value) } if value, ok := asu.mutation.AddedUsedMem(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: agentstatus.FieldUsedMem, - }) + _spec.AddField(agentstatus.FieldUsedMem, field.TypeInt64, value) } if value, ok := asu.mutation.Timestamp(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: agentstatus.FieldTimestamp, - }) + _spec.SetField(agentstatus.FieldTimestamp, field.TypeInt64, value) } if value, ok := asu.mutation.AddedTimestamp(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: agentstatus.FieldTimestamp, - }) + _spec.AddField(agentstatus.FieldTimestamp, field.TypeInt64, value) } if asu.mutation.AgentStatusToProvisionedHostCleared() { edge := &sqlgraph.EdgeSpec{ @@ -513,10 +493,7 @@ func (asu *AgentStatusUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{agentstatus.AgentStatusToProvisionedHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionedhost.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionedhost.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -529,10 +506,7 @@ func (asu *AgentStatusUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{agentstatus.AgentStatusToProvisionedHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionedhost.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionedhost.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -548,10 +522,7 @@ func (asu *AgentStatusUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{agentstatus.AgentStatusToProvisionedNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionednetwork.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -564,10 +535,7 @@ func (asu *AgentStatusUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{agentstatus.AgentStatusToProvisionedNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionednetwork.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -583,10 +551,7 @@ func (asu *AgentStatusUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{agentstatus.AgentStatusToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -599,10 +564,7 @@ func (asu *AgentStatusUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{agentstatus.AgentStatusToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -614,10 +576,11 @@ func (asu *AgentStatusUpdate) sqlSave(ctx context.Context) (n int, err error) { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{agentstatus.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return 0, err } + asu.mutation.done = true return n, nil } @@ -635,12 +598,28 @@ func (asuo *AgentStatusUpdateOne) SetClientID(s string) *AgentStatusUpdateOne { return asuo } +// SetNillableClientID sets the "ClientID" field if the given value is not nil. +func (asuo *AgentStatusUpdateOne) SetNillableClientID(s *string) *AgentStatusUpdateOne { + if s != nil { + asuo.SetClientID(*s) + } + return asuo +} + // SetHostname sets the "Hostname" field. func (asuo *AgentStatusUpdateOne) SetHostname(s string) *AgentStatusUpdateOne { asuo.mutation.SetHostname(s) return asuo } +// SetNillableHostname sets the "Hostname" field if the given value is not nil. +func (asuo *AgentStatusUpdateOne) SetNillableHostname(s *string) *AgentStatusUpdateOne { + if s != nil { + asuo.SetHostname(*s) + } + return asuo +} + // SetUpTime sets the "UpTime" field. func (asuo *AgentStatusUpdateOne) SetUpTime(i int64) *AgentStatusUpdateOne { asuo.mutation.ResetUpTime() @@ -648,6 +627,14 @@ func (asuo *AgentStatusUpdateOne) SetUpTime(i int64) *AgentStatusUpdateOne { return asuo } +// SetNillableUpTime sets the "UpTime" field if the given value is not nil. +func (asuo *AgentStatusUpdateOne) SetNillableUpTime(i *int64) *AgentStatusUpdateOne { + if i != nil { + asuo.SetUpTime(*i) + } + return asuo +} + // AddUpTime adds i to the "UpTime" field. func (asuo *AgentStatusUpdateOne) AddUpTime(i int64) *AgentStatusUpdateOne { asuo.mutation.AddUpTime(i) @@ -661,6 +648,14 @@ func (asuo *AgentStatusUpdateOne) SetBootTime(i int64) *AgentStatusUpdateOne { return asuo } +// SetNillableBootTime sets the "BootTime" field if the given value is not nil. +func (asuo *AgentStatusUpdateOne) SetNillableBootTime(i *int64) *AgentStatusUpdateOne { + if i != nil { + asuo.SetBootTime(*i) + } + return asuo +} + // AddBootTime adds i to the "BootTime" field. func (asuo *AgentStatusUpdateOne) AddBootTime(i int64) *AgentStatusUpdateOne { asuo.mutation.AddBootTime(i) @@ -674,6 +669,14 @@ func (asuo *AgentStatusUpdateOne) SetNumProcs(i int64) *AgentStatusUpdateOne { return asuo } +// SetNillableNumProcs sets the "NumProcs" field if the given value is not nil. +func (asuo *AgentStatusUpdateOne) SetNillableNumProcs(i *int64) *AgentStatusUpdateOne { + if i != nil { + asuo.SetNumProcs(*i) + } + return asuo +} + // AddNumProcs adds i to the "NumProcs" field. func (asuo *AgentStatusUpdateOne) AddNumProcs(i int64) *AgentStatusUpdateOne { asuo.mutation.AddNumProcs(i) @@ -686,12 +689,28 @@ func (asuo *AgentStatusUpdateOne) SetOs(s string) *AgentStatusUpdateOne { return asuo } +// SetNillableOs sets the "Os" field if the given value is not nil. +func (asuo *AgentStatusUpdateOne) SetNillableOs(s *string) *AgentStatusUpdateOne { + if s != nil { + asuo.SetOs(*s) + } + return asuo +} + // SetHostID sets the "HostID" field. func (asuo *AgentStatusUpdateOne) SetHostID(s string) *AgentStatusUpdateOne { asuo.mutation.SetHostID(s) return asuo } +// SetNillableHostID sets the "HostID" field if the given value is not nil. +func (asuo *AgentStatusUpdateOne) SetNillableHostID(s *string) *AgentStatusUpdateOne { + if s != nil { + asuo.SetHostID(*s) + } + return asuo +} + // SetLoad1 sets the "Load1" field. func (asuo *AgentStatusUpdateOne) SetLoad1(f float64) *AgentStatusUpdateOne { asuo.mutation.ResetLoad1() @@ -699,6 +718,14 @@ func (asuo *AgentStatusUpdateOne) SetLoad1(f float64) *AgentStatusUpdateOne { return asuo } +// SetNillableLoad1 sets the "Load1" field if the given value is not nil. +func (asuo *AgentStatusUpdateOne) SetNillableLoad1(f *float64) *AgentStatusUpdateOne { + if f != nil { + asuo.SetLoad1(*f) + } + return asuo +} + // AddLoad1 adds f to the "Load1" field. func (asuo *AgentStatusUpdateOne) AddLoad1(f float64) *AgentStatusUpdateOne { asuo.mutation.AddLoad1(f) @@ -712,6 +739,14 @@ func (asuo *AgentStatusUpdateOne) SetLoad5(f float64) *AgentStatusUpdateOne { return asuo } +// SetNillableLoad5 sets the "Load5" field if the given value is not nil. +func (asuo *AgentStatusUpdateOne) SetNillableLoad5(f *float64) *AgentStatusUpdateOne { + if f != nil { + asuo.SetLoad5(*f) + } + return asuo +} + // AddLoad5 adds f to the "Load5" field. func (asuo *AgentStatusUpdateOne) AddLoad5(f float64) *AgentStatusUpdateOne { asuo.mutation.AddLoad5(f) @@ -725,6 +760,14 @@ func (asuo *AgentStatusUpdateOne) SetLoad15(f float64) *AgentStatusUpdateOne { return asuo } +// SetNillableLoad15 sets the "Load15" field if the given value is not nil. +func (asuo *AgentStatusUpdateOne) SetNillableLoad15(f *float64) *AgentStatusUpdateOne { + if f != nil { + asuo.SetLoad15(*f) + } + return asuo +} + // AddLoad15 adds f to the "Load15" field. func (asuo *AgentStatusUpdateOne) AddLoad15(f float64) *AgentStatusUpdateOne { asuo.mutation.AddLoad15(f) @@ -738,6 +781,14 @@ func (asuo *AgentStatusUpdateOne) SetTotalMem(i int64) *AgentStatusUpdateOne { return asuo } +// SetNillableTotalMem sets the "TotalMem" field if the given value is not nil. +func (asuo *AgentStatusUpdateOne) SetNillableTotalMem(i *int64) *AgentStatusUpdateOne { + if i != nil { + asuo.SetTotalMem(*i) + } + return asuo +} + // AddTotalMem adds i to the "TotalMem" field. func (asuo *AgentStatusUpdateOne) AddTotalMem(i int64) *AgentStatusUpdateOne { asuo.mutation.AddTotalMem(i) @@ -751,6 +802,14 @@ func (asuo *AgentStatusUpdateOne) SetFreeMem(i int64) *AgentStatusUpdateOne { return asuo } +// SetNillableFreeMem sets the "FreeMem" field if the given value is not nil. +func (asuo *AgentStatusUpdateOne) SetNillableFreeMem(i *int64) *AgentStatusUpdateOne { + if i != nil { + asuo.SetFreeMem(*i) + } + return asuo +} + // AddFreeMem adds i to the "FreeMem" field. func (asuo *AgentStatusUpdateOne) AddFreeMem(i int64) *AgentStatusUpdateOne { asuo.mutation.AddFreeMem(i) @@ -764,6 +823,14 @@ func (asuo *AgentStatusUpdateOne) SetUsedMem(i int64) *AgentStatusUpdateOne { return asuo } +// SetNillableUsedMem sets the "UsedMem" field if the given value is not nil. +func (asuo *AgentStatusUpdateOne) SetNillableUsedMem(i *int64) *AgentStatusUpdateOne { + if i != nil { + asuo.SetUsedMem(*i) + } + return asuo +} + // AddUsedMem adds i to the "UsedMem" field. func (asuo *AgentStatusUpdateOne) AddUsedMem(i int64) *AgentStatusUpdateOne { asuo.mutation.AddUsedMem(i) @@ -777,6 +844,14 @@ func (asuo *AgentStatusUpdateOne) SetTimestamp(i int64) *AgentStatusUpdateOne { return asuo } +// SetNillableTimestamp sets the "Timestamp" field if the given value is not nil. +func (asuo *AgentStatusUpdateOne) SetNillableTimestamp(i *int64) *AgentStatusUpdateOne { + if i != nil { + asuo.SetTimestamp(*i) + } + return asuo +} + // AddTimestamp adds i to the "Timestamp" field. func (asuo *AgentStatusUpdateOne) AddTimestamp(i int64) *AgentStatusUpdateOne { asuo.mutation.AddTimestamp(i) @@ -863,6 +938,12 @@ func (asuo *AgentStatusUpdateOne) ClearAgentStatusToBuild() *AgentStatusUpdateOn return asuo } +// Where appends a list predicates to the AgentStatusUpdate builder. +func (asuo *AgentStatusUpdateOne) Where(ps ...predicate.AgentStatus) *AgentStatusUpdateOne { + asuo.mutation.Where(ps...) + return asuo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (asuo *AgentStatusUpdateOne) Select(field string, fields ...string) *AgentStatusUpdateOne { @@ -872,34 +953,7 @@ func (asuo *AgentStatusUpdateOne) Select(field string, fields ...string) *AgentS // Save executes the query and returns the updated AgentStatus entity. func (asuo *AgentStatusUpdateOne) Save(ctx context.Context) (*AgentStatus, error) { - var ( - err error - node *AgentStatus - ) - if len(asuo.hooks) == 0 { - node, err = asuo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*AgentStatusMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - asuo.mutation = mutation - node, err = asuo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(asuo.hooks) - 1; i >= 0; i-- { - if asuo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = asuo.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, asuo.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, asuo.sqlSave, asuo.mutation, asuo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -925,16 +979,7 @@ func (asuo *AgentStatusUpdateOne) ExecX(ctx context.Context) { } func (asuo *AgentStatusUpdateOne) sqlSave(ctx context.Context) (_node *AgentStatus, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: agentstatus.Table, - Columns: agentstatus.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: agentstatus.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(agentstatus.Table, agentstatus.Columns, sqlgraph.NewFieldSpec(agentstatus.FieldID, field.TypeUUID)) id, ok := asuo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "AgentStatus.id" for update`)} @@ -960,172 +1005,76 @@ func (asuo *AgentStatusUpdateOne) sqlSave(ctx context.Context) (_node *AgentStat } } if value, ok := asuo.mutation.ClientID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: agentstatus.FieldClientID, - }) + _spec.SetField(agentstatus.FieldClientID, field.TypeString, value) } if value, ok := asuo.mutation.Hostname(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: agentstatus.FieldHostname, - }) + _spec.SetField(agentstatus.FieldHostname, field.TypeString, value) } if value, ok := asuo.mutation.UpTime(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: agentstatus.FieldUpTime, - }) + _spec.SetField(agentstatus.FieldUpTime, field.TypeInt64, value) } if value, ok := asuo.mutation.AddedUpTime(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: agentstatus.FieldUpTime, - }) + _spec.AddField(agentstatus.FieldUpTime, field.TypeInt64, value) } if value, ok := asuo.mutation.BootTime(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: agentstatus.FieldBootTime, - }) + _spec.SetField(agentstatus.FieldBootTime, field.TypeInt64, value) } if value, ok := asuo.mutation.AddedBootTime(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: agentstatus.FieldBootTime, - }) + _spec.AddField(agentstatus.FieldBootTime, field.TypeInt64, value) } if value, ok := asuo.mutation.NumProcs(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: agentstatus.FieldNumProcs, - }) + _spec.SetField(agentstatus.FieldNumProcs, field.TypeInt64, value) } if value, ok := asuo.mutation.AddedNumProcs(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: agentstatus.FieldNumProcs, - }) + _spec.AddField(agentstatus.FieldNumProcs, field.TypeInt64, value) } if value, ok := asuo.mutation.Os(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: agentstatus.FieldOs, - }) + _spec.SetField(agentstatus.FieldOs, field.TypeString, value) } if value, ok := asuo.mutation.HostID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: agentstatus.FieldHostID, - }) + _spec.SetField(agentstatus.FieldHostID, field.TypeString, value) } if value, ok := asuo.mutation.Load1(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeFloat64, - Value: value, - Column: agentstatus.FieldLoad1, - }) + _spec.SetField(agentstatus.FieldLoad1, field.TypeFloat64, value) } if value, ok := asuo.mutation.AddedLoad1(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeFloat64, - Value: value, - Column: agentstatus.FieldLoad1, - }) + _spec.AddField(agentstatus.FieldLoad1, field.TypeFloat64, value) } if value, ok := asuo.mutation.Load5(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeFloat64, - Value: value, - Column: agentstatus.FieldLoad5, - }) + _spec.SetField(agentstatus.FieldLoad5, field.TypeFloat64, value) } if value, ok := asuo.mutation.AddedLoad5(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeFloat64, - Value: value, - Column: agentstatus.FieldLoad5, - }) + _spec.AddField(agentstatus.FieldLoad5, field.TypeFloat64, value) } if value, ok := asuo.mutation.Load15(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeFloat64, - Value: value, - Column: agentstatus.FieldLoad15, - }) + _spec.SetField(agentstatus.FieldLoad15, field.TypeFloat64, value) } if value, ok := asuo.mutation.AddedLoad15(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeFloat64, - Value: value, - Column: agentstatus.FieldLoad15, - }) + _spec.AddField(agentstatus.FieldLoad15, field.TypeFloat64, value) } if value, ok := asuo.mutation.TotalMem(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: agentstatus.FieldTotalMem, - }) + _spec.SetField(agentstatus.FieldTotalMem, field.TypeInt64, value) } if value, ok := asuo.mutation.AddedTotalMem(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: agentstatus.FieldTotalMem, - }) + _spec.AddField(agentstatus.FieldTotalMem, field.TypeInt64, value) } if value, ok := asuo.mutation.FreeMem(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: agentstatus.FieldFreeMem, - }) + _spec.SetField(agentstatus.FieldFreeMem, field.TypeInt64, value) } if value, ok := asuo.mutation.AddedFreeMem(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: agentstatus.FieldFreeMem, - }) + _spec.AddField(agentstatus.FieldFreeMem, field.TypeInt64, value) } if value, ok := asuo.mutation.UsedMem(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: agentstatus.FieldUsedMem, - }) + _spec.SetField(agentstatus.FieldUsedMem, field.TypeInt64, value) } if value, ok := asuo.mutation.AddedUsedMem(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: agentstatus.FieldUsedMem, - }) + _spec.AddField(agentstatus.FieldUsedMem, field.TypeInt64, value) } if value, ok := asuo.mutation.Timestamp(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: agentstatus.FieldTimestamp, - }) + _spec.SetField(agentstatus.FieldTimestamp, field.TypeInt64, value) } if value, ok := asuo.mutation.AddedTimestamp(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: agentstatus.FieldTimestamp, - }) + _spec.AddField(agentstatus.FieldTimestamp, field.TypeInt64, value) } if asuo.mutation.AgentStatusToProvisionedHostCleared() { edge := &sqlgraph.EdgeSpec{ @@ -1135,10 +1084,7 @@ func (asuo *AgentStatusUpdateOne) sqlSave(ctx context.Context) (_node *AgentStat Columns: []string{agentstatus.AgentStatusToProvisionedHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionedhost.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionedhost.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1151,10 +1097,7 @@ func (asuo *AgentStatusUpdateOne) sqlSave(ctx context.Context) (_node *AgentStat Columns: []string{agentstatus.AgentStatusToProvisionedHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionedhost.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionedhost.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1170,10 +1113,7 @@ func (asuo *AgentStatusUpdateOne) sqlSave(ctx context.Context) (_node *AgentStat Columns: []string{agentstatus.AgentStatusToProvisionedNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionednetwork.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1186,10 +1126,7 @@ func (asuo *AgentStatusUpdateOne) sqlSave(ctx context.Context) (_node *AgentStat Columns: []string{agentstatus.AgentStatusToProvisionedNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionednetwork.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1205,10 +1142,7 @@ func (asuo *AgentStatusUpdateOne) sqlSave(ctx context.Context) (_node *AgentStat Columns: []string{agentstatus.AgentStatusToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1221,10 +1155,7 @@ func (asuo *AgentStatusUpdateOne) sqlSave(ctx context.Context) (_node *AgentStat Columns: []string{agentstatus.AgentStatusToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1239,9 +1170,10 @@ func (asuo *AgentStatusUpdateOne) sqlSave(ctx context.Context) (_node *AgentStat if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{agentstatus.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } + asuo.mutation.done = true return _node, nil } diff --git a/ent/agenttask.go b/ent/agenttask.go index 8629ddda..89fd32d0 100755 --- a/ent/agenttask.go +++ b/ent/agenttask.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -6,6 +6,7 @@ import ( "fmt" "strings" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/gen0cide/laforge/ent/agenttask" "github.com/gen0cide/laforge/ent/provisionedhost" @@ -34,6 +35,7 @@ type AgentTask struct { // The values are being populated by the AgentTaskQuery when eager-loading is set. Edges AgentTaskEdges `json:"edges"` + // vvvvvvvvvvvv CUSTOM vvvvvvvvvvvv // Edges put into the main struct to be loaded via hcl // AgentTaskToProvisioningStep holds the value of the AgentTaskToProvisioningStep edge. HCLAgentTaskToProvisioningStep *ProvisioningStep `json:"AgentTaskToProvisioningStep,omitempty"` @@ -41,9 +43,10 @@ type AgentTask struct { HCLAgentTaskToProvisionedHost *ProvisionedHost `json:"AgentTaskToProvisionedHost,omitempty"` // AgentTaskToAdhocPlan holds the value of the AgentTaskToAdhocPlan edge. HCLAgentTaskToAdhocPlan []*AdhocPlan `json:"AgentTaskToAdhocPlan,omitempty"` - // + // ^^^^^^^^^^^^ CUSTOM ^^^^^^^^^^^^^ agent_task_agent_task_to_provisioning_step *uuid.UUID agent_task_agent_task_to_provisioned_host *uuid.UUID + selectValues sql.SelectValues } // AgentTaskEdges holds the relations/edges for other nodes in the graph. @@ -57,6 +60,10 @@ type AgentTaskEdges struct { // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. loadedTypes [3]bool + // totalCount holds the count of the edges above. + totalCount [3]map[string]int + + namedAgentTaskToAdhocPlan map[string][]*AdhocPlan } // AgentTaskToProvisioningStepOrErr returns the AgentTaskToProvisioningStep value or an error if the edge @@ -64,8 +71,7 @@ type AgentTaskEdges struct { func (e AgentTaskEdges) AgentTaskToProvisioningStepOrErr() (*ProvisioningStep, error) { if e.loadedTypes[0] { if e.AgentTaskToProvisioningStep == nil { - // The edge AgentTaskToProvisioningStep was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: provisioningstep.Label} } return e.AgentTaskToProvisioningStep, nil @@ -78,8 +84,7 @@ func (e AgentTaskEdges) AgentTaskToProvisioningStepOrErr() (*ProvisioningStep, e func (e AgentTaskEdges) AgentTaskToProvisionedHostOrErr() (*ProvisionedHost, error) { if e.loadedTypes[1] { if e.AgentTaskToProvisionedHost == nil { - // The edge AgentTaskToProvisionedHost was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: provisionedhost.Label} } return e.AgentTaskToProvisionedHost, nil @@ -97,8 +102,8 @@ func (e AgentTaskEdges) AgentTaskToAdhocPlanOrErr() ([]*AdhocPlan, error) { } // scanValues returns the types for scanning values from sql.Rows. -func (*AgentTask) scanValues(columns []string) ([]interface{}, error) { - values := make([]interface{}, len(columns)) +func (*AgentTask) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) for i := range columns { switch columns[i] { case agenttask.FieldNumber: @@ -112,7 +117,7 @@ func (*AgentTask) scanValues(columns []string) ([]interface{}, error) { case agenttask.ForeignKeys[1]: // agent_task_agent_task_to_provisioned_host values[i] = &sql.NullScanner{S: new(uuid.UUID)} default: - return nil, fmt.Errorf("unexpected column %q for type AgentTask", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -120,7 +125,7 @@ func (*AgentTask) scanValues(columns []string) ([]interface{}, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the AgentTask fields. -func (at *AgentTask) assignValues(columns []string, values []interface{}) error { +func (at *AgentTask) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -182,41 +187,49 @@ func (at *AgentTask) assignValues(columns []string, values []interface{}) error at.agent_task_agent_task_to_provisioned_host = new(uuid.UUID) *at.agent_task_agent_task_to_provisioned_host = *value.S.(*uuid.UUID) } + default: + at.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the AgentTask. +// This includes values selected through modifiers, order, etc. +func (at *AgentTask) Value(name string) (ent.Value, error) { + return at.selectValues.Get(name) +} + // QueryAgentTaskToProvisioningStep queries the "AgentTaskToProvisioningStep" edge of the AgentTask entity. func (at *AgentTask) QueryAgentTaskToProvisioningStep() *ProvisioningStepQuery { - return (&AgentTaskClient{config: at.config}).QueryAgentTaskToProvisioningStep(at) + return NewAgentTaskClient(at.config).QueryAgentTaskToProvisioningStep(at) } // QueryAgentTaskToProvisionedHost queries the "AgentTaskToProvisionedHost" edge of the AgentTask entity. func (at *AgentTask) QueryAgentTaskToProvisionedHost() *ProvisionedHostQuery { - return (&AgentTaskClient{config: at.config}).QueryAgentTaskToProvisionedHost(at) + return NewAgentTaskClient(at.config).QueryAgentTaskToProvisionedHost(at) } // QueryAgentTaskToAdhocPlan queries the "AgentTaskToAdhocPlan" edge of the AgentTask entity. func (at *AgentTask) QueryAgentTaskToAdhocPlan() *AdhocPlanQuery { - return (&AgentTaskClient{config: at.config}).QueryAgentTaskToAdhocPlan(at) + return NewAgentTaskClient(at.config).QueryAgentTaskToAdhocPlan(at) } // Update returns a builder for updating this AgentTask. // Note that you need to call AgentTask.Unwrap() before calling this method if this AgentTask // was returned from a transaction, and the transaction was committed or rolled back. func (at *AgentTask) Update() *AgentTaskUpdateOne { - return (&AgentTaskClient{config: at.config}).UpdateOne(at) + return NewAgentTaskClient(at.config).UpdateOne(at) } // Unwrap unwraps the AgentTask entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. func (at *AgentTask) Unwrap() *AgentTask { - tx, ok := at.config.driver.(*txDriver) + _tx, ok := at.config.driver.(*txDriver) if !ok { panic("ent: AgentTask is not a transactional entity") } - at.config.driver = tx.drv + at.config.driver = _tx.drv return at } @@ -224,28 +237,51 @@ func (at *AgentTask) Unwrap() *AgentTask { func (at *AgentTask) String() string { var builder strings.Builder builder.WriteString("AgentTask(") - builder.WriteString(fmt.Sprintf("id=%v", at.ID)) - builder.WriteString(", command=") + builder.WriteString(fmt.Sprintf("id=%v, ", at.ID)) + builder.WriteString("command=") builder.WriteString(fmt.Sprintf("%v", at.Command)) - builder.WriteString(", args=") + builder.WriteString(", ") + builder.WriteString("args=") builder.WriteString(at.Args) - builder.WriteString(", number=") + builder.WriteString(", ") + builder.WriteString("number=") builder.WriteString(fmt.Sprintf("%v", at.Number)) - builder.WriteString(", output=") + builder.WriteString(", ") + builder.WriteString("output=") builder.WriteString(at.Output) - builder.WriteString(", state=") + builder.WriteString(", ") + builder.WriteString("state=") builder.WriteString(fmt.Sprintf("%v", at.State)) - builder.WriteString(", error_message=") + builder.WriteString(", ") + builder.WriteString("error_message=") builder.WriteString(at.ErrorMessage) builder.WriteByte(')') return builder.String() } -// AgentTasks is a parsable slice of AgentTask. -type AgentTasks []*AgentTask +// NamedAgentTaskToAdhocPlan returns the AgentTaskToAdhocPlan named value or an error if the edge was not +// loaded in eager-loading with this name. +func (at *AgentTask) NamedAgentTaskToAdhocPlan(name string) ([]*AdhocPlan, error) { + if at.Edges.namedAgentTaskToAdhocPlan == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := at.Edges.namedAgentTaskToAdhocPlan[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} -func (at AgentTasks) config(cfg config) { - for _i := range at { - at[_i].config = cfg +func (at *AgentTask) appendNamedAgentTaskToAdhocPlan(name string, edges ...*AdhocPlan) { + if at.Edges.namedAgentTaskToAdhocPlan == nil { + at.Edges.namedAgentTaskToAdhocPlan = make(map[string][]*AdhocPlan) + } + if len(edges) == 0 { + at.Edges.namedAgentTaskToAdhocPlan[name] = []*AdhocPlan{} + } else { + at.Edges.namedAgentTaskToAdhocPlan[name] = append(at.Edges.namedAgentTaskToAdhocPlan[name], edges...) } } + +// AgentTasks is a parsable slice of AgentTask. +type AgentTasks []*AgentTask diff --git a/ent/agenttask/agenttask.go b/ent/agenttask/agenttask.go index 897abd78..e15bd3d3 100755 --- a/ent/agenttask/agenttask.go +++ b/ent/agenttask/agenttask.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package agenttask @@ -7,6 +7,8 @@ import ( "io" "strconv" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -159,37 +161,124 @@ func StateValidator(s State) error { } } +// OrderOption defines the ordering options for the AgentTask queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCommand orders the results by the command field. +func ByCommand(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCommand, opts...).ToFunc() +} + +// ByArgs orders the results by the args field. +func ByArgs(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldArgs, opts...).ToFunc() +} + +// ByNumber orders the results by the number field. +func ByNumber(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldNumber, opts...).ToFunc() +} + +// ByOutput orders the results by the output field. +func ByOutput(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldOutput, opts...).ToFunc() +} + +// ByState orders the results by the state field. +func ByState(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldState, opts...).ToFunc() +} + +// ByErrorMessage orders the results by the error_message field. +func ByErrorMessage(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldErrorMessage, opts...).ToFunc() +} + +// ByAgentTaskToProvisioningStepField orders the results by AgentTaskToProvisioningStep field. +func ByAgentTaskToProvisioningStepField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAgentTaskToProvisioningStepStep(), sql.OrderByField(field, opts...)) + } +} + +// ByAgentTaskToProvisionedHostField orders the results by AgentTaskToProvisionedHost field. +func ByAgentTaskToProvisionedHostField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAgentTaskToProvisionedHostStep(), sql.OrderByField(field, opts...)) + } +} + +// ByAgentTaskToAdhocPlanCount orders the results by AgentTaskToAdhocPlan count. +func ByAgentTaskToAdhocPlanCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newAgentTaskToAdhocPlanStep(), opts...) + } +} + +// ByAgentTaskToAdhocPlan orders the results by AgentTaskToAdhocPlan terms. +func ByAgentTaskToAdhocPlan(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAgentTaskToAdhocPlanStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newAgentTaskToProvisioningStepStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AgentTaskToProvisioningStepInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, AgentTaskToProvisioningStepTable, AgentTaskToProvisioningStepColumn), + ) +} +func newAgentTaskToProvisionedHostStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AgentTaskToProvisionedHostInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, AgentTaskToProvisionedHostTable, AgentTaskToProvisionedHostColumn), + ) +} +func newAgentTaskToAdhocPlanStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AgentTaskToAdhocPlanInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, AgentTaskToAdhocPlanTable, AgentTaskToAdhocPlanColumn), + ) +} + // MarshalGQL implements graphql.Marshaler interface. -func (c Command) MarshalGQL(w io.Writer) { - io.WriteString(w, strconv.Quote(c.String())) +func (e Command) MarshalGQL(w io.Writer) { + io.WriteString(w, strconv.Quote(e.String())) } // UnmarshalGQL implements graphql.Unmarshaler interface. -func (c *Command) UnmarshalGQL(val interface{}) error { +func (e *Command) UnmarshalGQL(val interface{}) error { str, ok := val.(string) if !ok { return fmt.Errorf("enum %T must be a string", val) } - *c = Command(str) - if err := CommandValidator(*c); err != nil { + *e = Command(str) + if err := CommandValidator(*e); err != nil { return fmt.Errorf("%s is not a valid Command", str) } return nil } // MarshalGQL implements graphql.Marshaler interface. -func (s State) MarshalGQL(w io.Writer) { - io.WriteString(w, strconv.Quote(s.String())) +func (e State) MarshalGQL(w io.Writer) { + io.WriteString(w, strconv.Quote(e.String())) } // UnmarshalGQL implements graphql.Unmarshaler interface. -func (s *State) UnmarshalGQL(val interface{}) error { +func (e *State) UnmarshalGQL(val interface{}) error { str, ok := val.(string) if !ok { return fmt.Errorf("enum %T must be a string", val) } - *s = State(str) - if err := StateValidator(*s); err != nil { + *e = State(str) + if err := StateValidator(*e); err != nil { return fmt.Errorf("%s is not a valid State", str) } return nil diff --git a/ent/agenttask/where.go b/ent/agenttask/where.go index 9539f45a..ee4c92a2 100755 --- a/ent/agenttask/where.go +++ b/ent/agenttask/where.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package agenttask @@ -11,618 +11,342 @@ import ( // ID filters vertices based on their ID field. func ID(id uuid.UUID) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.AgentTask(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id uuid.UUID) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.AgentTask(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id uuid.UUID) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.AgentTask(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...uuid.UUID) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.AgentTask(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...uuid.UUID) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.AgentTask(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id uuid.UUID) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.AgentTask(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id uuid.UUID) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.AgentTask(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id uuid.UUID) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.AgentTask(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id uuid.UUID) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.AgentTask(sql.FieldLTE(FieldID, id)) } // Args applies equality check predicate on the "args" field. It's identical to ArgsEQ. func Args(v string) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldArgs), v)) - }) + return predicate.AgentTask(sql.FieldEQ(FieldArgs, v)) } // Number applies equality check predicate on the "number" field. It's identical to NumberEQ. func Number(v int) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldNumber), v)) - }) + return predicate.AgentTask(sql.FieldEQ(FieldNumber, v)) } // Output applies equality check predicate on the "output" field. It's identical to OutputEQ. func Output(v string) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldOutput), v)) - }) + return predicate.AgentTask(sql.FieldEQ(FieldOutput, v)) } // ErrorMessage applies equality check predicate on the "error_message" field. It's identical to ErrorMessageEQ. func ErrorMessage(v string) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldErrorMessage), v)) - }) + return predicate.AgentTask(sql.FieldEQ(FieldErrorMessage, v)) } // CommandEQ applies the EQ predicate on the "command" field. func CommandEQ(v Command) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCommand), v)) - }) + return predicate.AgentTask(sql.FieldEQ(FieldCommand, v)) } // CommandNEQ applies the NEQ predicate on the "command" field. func CommandNEQ(v Command) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldCommand), v)) - }) + return predicate.AgentTask(sql.FieldNEQ(FieldCommand, v)) } // CommandIn applies the In predicate on the "command" field. func CommandIn(vs ...Command) predicate.AgentTask { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AgentTask(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldCommand), v...)) - }) + return predicate.AgentTask(sql.FieldIn(FieldCommand, vs...)) } // CommandNotIn applies the NotIn predicate on the "command" field. func CommandNotIn(vs ...Command) predicate.AgentTask { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AgentTask(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldCommand), v...)) - }) + return predicate.AgentTask(sql.FieldNotIn(FieldCommand, vs...)) } // ArgsEQ applies the EQ predicate on the "args" field. func ArgsEQ(v string) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldArgs), v)) - }) + return predicate.AgentTask(sql.FieldEQ(FieldArgs, v)) } // ArgsNEQ applies the NEQ predicate on the "args" field. func ArgsNEQ(v string) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldArgs), v)) - }) + return predicate.AgentTask(sql.FieldNEQ(FieldArgs, v)) } // ArgsIn applies the In predicate on the "args" field. func ArgsIn(vs ...string) predicate.AgentTask { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AgentTask(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldArgs), v...)) - }) + return predicate.AgentTask(sql.FieldIn(FieldArgs, vs...)) } // ArgsNotIn applies the NotIn predicate on the "args" field. func ArgsNotIn(vs ...string) predicate.AgentTask { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AgentTask(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldArgs), v...)) - }) + return predicate.AgentTask(sql.FieldNotIn(FieldArgs, vs...)) } // ArgsGT applies the GT predicate on the "args" field. func ArgsGT(v string) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldArgs), v)) - }) + return predicate.AgentTask(sql.FieldGT(FieldArgs, v)) } // ArgsGTE applies the GTE predicate on the "args" field. func ArgsGTE(v string) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldArgs), v)) - }) + return predicate.AgentTask(sql.FieldGTE(FieldArgs, v)) } // ArgsLT applies the LT predicate on the "args" field. func ArgsLT(v string) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldArgs), v)) - }) + return predicate.AgentTask(sql.FieldLT(FieldArgs, v)) } // ArgsLTE applies the LTE predicate on the "args" field. func ArgsLTE(v string) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldArgs), v)) - }) + return predicate.AgentTask(sql.FieldLTE(FieldArgs, v)) } // ArgsContains applies the Contains predicate on the "args" field. func ArgsContains(v string) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldArgs), v)) - }) + return predicate.AgentTask(sql.FieldContains(FieldArgs, v)) } // ArgsHasPrefix applies the HasPrefix predicate on the "args" field. func ArgsHasPrefix(v string) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldArgs), v)) - }) + return predicate.AgentTask(sql.FieldHasPrefix(FieldArgs, v)) } // ArgsHasSuffix applies the HasSuffix predicate on the "args" field. func ArgsHasSuffix(v string) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldArgs), v)) - }) + return predicate.AgentTask(sql.FieldHasSuffix(FieldArgs, v)) } // ArgsEqualFold applies the EqualFold predicate on the "args" field. func ArgsEqualFold(v string) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldArgs), v)) - }) + return predicate.AgentTask(sql.FieldEqualFold(FieldArgs, v)) } // ArgsContainsFold applies the ContainsFold predicate on the "args" field. func ArgsContainsFold(v string) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldArgs), v)) - }) + return predicate.AgentTask(sql.FieldContainsFold(FieldArgs, v)) } // NumberEQ applies the EQ predicate on the "number" field. func NumberEQ(v int) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldNumber), v)) - }) + return predicate.AgentTask(sql.FieldEQ(FieldNumber, v)) } // NumberNEQ applies the NEQ predicate on the "number" field. func NumberNEQ(v int) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldNumber), v)) - }) + return predicate.AgentTask(sql.FieldNEQ(FieldNumber, v)) } // NumberIn applies the In predicate on the "number" field. func NumberIn(vs ...int) predicate.AgentTask { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AgentTask(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldNumber), v...)) - }) + return predicate.AgentTask(sql.FieldIn(FieldNumber, vs...)) } // NumberNotIn applies the NotIn predicate on the "number" field. func NumberNotIn(vs ...int) predicate.AgentTask { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AgentTask(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldNumber), v...)) - }) + return predicate.AgentTask(sql.FieldNotIn(FieldNumber, vs...)) } // NumberGT applies the GT predicate on the "number" field. func NumberGT(v int) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldNumber), v)) - }) + return predicate.AgentTask(sql.FieldGT(FieldNumber, v)) } // NumberGTE applies the GTE predicate on the "number" field. func NumberGTE(v int) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldNumber), v)) - }) + return predicate.AgentTask(sql.FieldGTE(FieldNumber, v)) } // NumberLT applies the LT predicate on the "number" field. func NumberLT(v int) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldNumber), v)) - }) + return predicate.AgentTask(sql.FieldLT(FieldNumber, v)) } // NumberLTE applies the LTE predicate on the "number" field. func NumberLTE(v int) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldNumber), v)) - }) + return predicate.AgentTask(sql.FieldLTE(FieldNumber, v)) } // OutputEQ applies the EQ predicate on the "output" field. func OutputEQ(v string) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldOutput), v)) - }) + return predicate.AgentTask(sql.FieldEQ(FieldOutput, v)) } // OutputNEQ applies the NEQ predicate on the "output" field. func OutputNEQ(v string) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldOutput), v)) - }) + return predicate.AgentTask(sql.FieldNEQ(FieldOutput, v)) } // OutputIn applies the In predicate on the "output" field. func OutputIn(vs ...string) predicate.AgentTask { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AgentTask(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldOutput), v...)) - }) + return predicate.AgentTask(sql.FieldIn(FieldOutput, vs...)) } // OutputNotIn applies the NotIn predicate on the "output" field. func OutputNotIn(vs ...string) predicate.AgentTask { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AgentTask(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldOutput), v...)) - }) + return predicate.AgentTask(sql.FieldNotIn(FieldOutput, vs...)) } // OutputGT applies the GT predicate on the "output" field. func OutputGT(v string) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldOutput), v)) - }) + return predicate.AgentTask(sql.FieldGT(FieldOutput, v)) } // OutputGTE applies the GTE predicate on the "output" field. func OutputGTE(v string) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldOutput), v)) - }) + return predicate.AgentTask(sql.FieldGTE(FieldOutput, v)) } // OutputLT applies the LT predicate on the "output" field. func OutputLT(v string) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldOutput), v)) - }) + return predicate.AgentTask(sql.FieldLT(FieldOutput, v)) } // OutputLTE applies the LTE predicate on the "output" field. func OutputLTE(v string) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldOutput), v)) - }) + return predicate.AgentTask(sql.FieldLTE(FieldOutput, v)) } // OutputContains applies the Contains predicate on the "output" field. func OutputContains(v string) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldOutput), v)) - }) + return predicate.AgentTask(sql.FieldContains(FieldOutput, v)) } // OutputHasPrefix applies the HasPrefix predicate on the "output" field. func OutputHasPrefix(v string) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldOutput), v)) - }) + return predicate.AgentTask(sql.FieldHasPrefix(FieldOutput, v)) } // OutputHasSuffix applies the HasSuffix predicate on the "output" field. func OutputHasSuffix(v string) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldOutput), v)) - }) + return predicate.AgentTask(sql.FieldHasSuffix(FieldOutput, v)) } // OutputEqualFold applies the EqualFold predicate on the "output" field. func OutputEqualFold(v string) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldOutput), v)) - }) + return predicate.AgentTask(sql.FieldEqualFold(FieldOutput, v)) } // OutputContainsFold applies the ContainsFold predicate on the "output" field. func OutputContainsFold(v string) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldOutput), v)) - }) + return predicate.AgentTask(sql.FieldContainsFold(FieldOutput, v)) } // StateEQ applies the EQ predicate on the "state" field. func StateEQ(v State) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldState), v)) - }) + return predicate.AgentTask(sql.FieldEQ(FieldState, v)) } // StateNEQ applies the NEQ predicate on the "state" field. func StateNEQ(v State) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldState), v)) - }) + return predicate.AgentTask(sql.FieldNEQ(FieldState, v)) } // StateIn applies the In predicate on the "state" field. func StateIn(vs ...State) predicate.AgentTask { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AgentTask(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldState), v...)) - }) + return predicate.AgentTask(sql.FieldIn(FieldState, vs...)) } // StateNotIn applies the NotIn predicate on the "state" field. func StateNotIn(vs ...State) predicate.AgentTask { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AgentTask(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldState), v...)) - }) + return predicate.AgentTask(sql.FieldNotIn(FieldState, vs...)) } // ErrorMessageEQ applies the EQ predicate on the "error_message" field. func ErrorMessageEQ(v string) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldErrorMessage), v)) - }) + return predicate.AgentTask(sql.FieldEQ(FieldErrorMessage, v)) } // ErrorMessageNEQ applies the NEQ predicate on the "error_message" field. func ErrorMessageNEQ(v string) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldErrorMessage), v)) - }) + return predicate.AgentTask(sql.FieldNEQ(FieldErrorMessage, v)) } // ErrorMessageIn applies the In predicate on the "error_message" field. func ErrorMessageIn(vs ...string) predicate.AgentTask { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AgentTask(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldErrorMessage), v...)) - }) + return predicate.AgentTask(sql.FieldIn(FieldErrorMessage, vs...)) } // ErrorMessageNotIn applies the NotIn predicate on the "error_message" field. func ErrorMessageNotIn(vs ...string) predicate.AgentTask { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AgentTask(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldErrorMessage), v...)) - }) + return predicate.AgentTask(sql.FieldNotIn(FieldErrorMessage, vs...)) } // ErrorMessageGT applies the GT predicate on the "error_message" field. func ErrorMessageGT(v string) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldErrorMessage), v)) - }) + return predicate.AgentTask(sql.FieldGT(FieldErrorMessage, v)) } // ErrorMessageGTE applies the GTE predicate on the "error_message" field. func ErrorMessageGTE(v string) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldErrorMessage), v)) - }) + return predicate.AgentTask(sql.FieldGTE(FieldErrorMessage, v)) } // ErrorMessageLT applies the LT predicate on the "error_message" field. func ErrorMessageLT(v string) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldErrorMessage), v)) - }) + return predicate.AgentTask(sql.FieldLT(FieldErrorMessage, v)) } // ErrorMessageLTE applies the LTE predicate on the "error_message" field. func ErrorMessageLTE(v string) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldErrorMessage), v)) - }) + return predicate.AgentTask(sql.FieldLTE(FieldErrorMessage, v)) } // ErrorMessageContains applies the Contains predicate on the "error_message" field. func ErrorMessageContains(v string) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldErrorMessage), v)) - }) + return predicate.AgentTask(sql.FieldContains(FieldErrorMessage, v)) } // ErrorMessageHasPrefix applies the HasPrefix predicate on the "error_message" field. func ErrorMessageHasPrefix(v string) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldErrorMessage), v)) - }) + return predicate.AgentTask(sql.FieldHasPrefix(FieldErrorMessage, v)) } // ErrorMessageHasSuffix applies the HasSuffix predicate on the "error_message" field. func ErrorMessageHasSuffix(v string) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldErrorMessage), v)) - }) + return predicate.AgentTask(sql.FieldHasSuffix(FieldErrorMessage, v)) } // ErrorMessageEqualFold applies the EqualFold predicate on the "error_message" field. func ErrorMessageEqualFold(v string) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldErrorMessage), v)) - }) + return predicate.AgentTask(sql.FieldEqualFold(FieldErrorMessage, v)) } // ErrorMessageContainsFold applies the ContainsFold predicate on the "error_message" field. func ErrorMessageContainsFold(v string) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldErrorMessage), v)) - }) + return predicate.AgentTask(sql.FieldContainsFold(FieldErrorMessage, v)) } // HasAgentTaskToProvisioningStep applies the HasEdge predicate on the "AgentTaskToProvisioningStep" edge. @@ -630,7 +354,6 @@ func HasAgentTaskToProvisioningStep() predicate.AgentTask { return predicate.AgentTask(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(AgentTaskToProvisioningStepTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, false, AgentTaskToProvisioningStepTable, AgentTaskToProvisioningStepColumn), ) sqlgraph.HasNeighbors(s, step) @@ -640,11 +363,7 @@ func HasAgentTaskToProvisioningStep() predicate.AgentTask { // HasAgentTaskToProvisioningStepWith applies the HasEdge predicate on the "AgentTaskToProvisioningStep" edge with a given conditions (other predicates). func HasAgentTaskToProvisioningStepWith(preds ...predicate.ProvisioningStep) predicate.AgentTask { return predicate.AgentTask(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(AgentTaskToProvisioningStepInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, AgentTaskToProvisioningStepTable, AgentTaskToProvisioningStepColumn), - ) + step := newAgentTaskToProvisioningStepStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -658,7 +377,6 @@ func HasAgentTaskToProvisionedHost() predicate.AgentTask { return predicate.AgentTask(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(AgentTaskToProvisionedHostTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, false, AgentTaskToProvisionedHostTable, AgentTaskToProvisionedHostColumn), ) sqlgraph.HasNeighbors(s, step) @@ -668,11 +386,7 @@ func HasAgentTaskToProvisionedHost() predicate.AgentTask { // HasAgentTaskToProvisionedHostWith applies the HasEdge predicate on the "AgentTaskToProvisionedHost" edge with a given conditions (other predicates). func HasAgentTaskToProvisionedHostWith(preds ...predicate.ProvisionedHost) predicate.AgentTask { return predicate.AgentTask(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(AgentTaskToProvisionedHostInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, AgentTaskToProvisionedHostTable, AgentTaskToProvisionedHostColumn), - ) + step := newAgentTaskToProvisionedHostStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -686,7 +400,6 @@ func HasAgentTaskToAdhocPlan() predicate.AgentTask { return predicate.AgentTask(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(AgentTaskToAdhocPlanTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, true, AgentTaskToAdhocPlanTable, AgentTaskToAdhocPlanColumn), ) sqlgraph.HasNeighbors(s, step) @@ -696,11 +409,7 @@ func HasAgentTaskToAdhocPlan() predicate.AgentTask { // HasAgentTaskToAdhocPlanWith applies the HasEdge predicate on the "AgentTaskToAdhocPlan" edge with a given conditions (other predicates). func HasAgentTaskToAdhocPlanWith(preds ...predicate.AdhocPlan) predicate.AgentTask { return predicate.AgentTask(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(AgentTaskToAdhocPlanInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, true, AgentTaskToAdhocPlanTable, AgentTaskToAdhocPlanColumn), - ) + step := newAgentTaskToAdhocPlanStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -711,32 +420,15 @@ func HasAgentTaskToAdhocPlanWith(preds ...predicate.AdhocPlan) predicate.AgentTa // And groups predicates with the AND operator between them. func And(predicates ...predicate.AgentTask) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.AgentTask(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.AgentTask) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.AgentTask(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.AgentTask) predicate.AgentTask { - return predicate.AgentTask(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.AgentTask(sql.NotPredicates(p)) } diff --git a/ent/agenttask_create.go b/ent/agenttask_create.go index b1cb70f8..32a66423 100755 --- a/ent/agenttask_create.go +++ b/ent/agenttask_create.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -141,44 +141,8 @@ func (atc *AgentTaskCreate) Mutation() *AgentTaskMutation { // Save creates the AgentTask in the database. func (atc *AgentTaskCreate) Save(ctx context.Context) (*AgentTask, error) { - var ( - err error - node *AgentTask - ) atc.defaults() - if len(atc.hooks) == 0 { - if err = atc.check(); err != nil { - return nil, err - } - node, err = atc.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*AgentTaskMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = atc.check(); err != nil { - return nil, err - } - atc.mutation = mutation - if node, err = atc.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(atc.hooks) - 1; i >= 0; i-- { - if atc.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = atc.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, atc.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, atc.sqlSave, atc.mutation, atc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -256,10 +220,13 @@ func (atc *AgentTaskCreate) check() error { } func (atc *AgentTaskCreate) sqlSave(ctx context.Context) (*AgentTask, error) { + if err := atc.check(); err != nil { + return nil, err + } _node, _spec := atc.createSpec() if err := sqlgraph.CreateNode(ctx, atc.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } @@ -270,70 +237,42 @@ func (atc *AgentTaskCreate) sqlSave(ctx context.Context) (*AgentTask, error) { return nil, err } } + atc.mutation.id = &_node.ID + atc.mutation.done = true return _node, nil } func (atc *AgentTaskCreate) createSpec() (*AgentTask, *sqlgraph.CreateSpec) { var ( _node = &AgentTask{config: atc.config} - _spec = &sqlgraph.CreateSpec{ - Table: agenttask.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: agenttask.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(agenttask.Table, sqlgraph.NewFieldSpec(agenttask.FieldID, field.TypeUUID)) ) if id, ok := atc.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } if value, ok := atc.mutation.Command(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: agenttask.FieldCommand, - }) + _spec.SetField(agenttask.FieldCommand, field.TypeEnum, value) _node.Command = value } if value, ok := atc.mutation.Args(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: agenttask.FieldArgs, - }) + _spec.SetField(agenttask.FieldArgs, field.TypeString, value) _node.Args = value } if value, ok := atc.mutation.Number(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: agenttask.FieldNumber, - }) + _spec.SetField(agenttask.FieldNumber, field.TypeInt, value) _node.Number = value } if value, ok := atc.mutation.Output(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: agenttask.FieldOutput, - }) + _spec.SetField(agenttask.FieldOutput, field.TypeString, value) _node.Output = value } if value, ok := atc.mutation.State(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: agenttask.FieldState, - }) + _spec.SetField(agenttask.FieldState, field.TypeEnum, value) _node.State = value } if value, ok := atc.mutation.ErrorMessage(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: agenttask.FieldErrorMessage, - }) + _spec.SetField(agenttask.FieldErrorMessage, field.TypeString, value) _node.ErrorMessage = value } if nodes := atc.mutation.AgentTaskToProvisioningStepIDs(); len(nodes) > 0 { @@ -344,10 +283,7 @@ func (atc *AgentTaskCreate) createSpec() (*AgentTask, *sqlgraph.CreateSpec) { Columns: []string{agenttask.AgentTaskToProvisioningStepColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisioningstep.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisioningstep.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -364,10 +300,7 @@ func (atc *AgentTaskCreate) createSpec() (*AgentTask, *sqlgraph.CreateSpec) { Columns: []string{agenttask.AgentTaskToProvisionedHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionedhost.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionedhost.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -384,10 +317,7 @@ func (atc *AgentTaskCreate) createSpec() (*AgentTask, *sqlgraph.CreateSpec) { Columns: []string{agenttask.AgentTaskToAdhocPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: adhocplan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(adhocplan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -401,11 +331,15 @@ func (atc *AgentTaskCreate) createSpec() (*AgentTask, *sqlgraph.CreateSpec) { // AgentTaskCreateBulk is the builder for creating many AgentTask entities in bulk. type AgentTaskCreateBulk struct { config + err error builders []*AgentTaskCreate } // Save creates the AgentTask entities in the database. func (atcb *AgentTaskCreateBulk) Save(ctx context.Context) ([]*AgentTask, error) { + if atcb.err != nil { + return nil, atcb.err + } specs := make([]*sqlgraph.CreateSpec, len(atcb.builders)) nodes := make([]*AgentTask, len(atcb.builders)) mutators := make([]Mutator, len(atcb.builders)) @@ -422,8 +356,8 @@ func (atcb *AgentTaskCreateBulk) Save(ctx context.Context) ([]*AgentTask, error) return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, atcb.builders[i+1].mutation) } else { @@ -431,7 +365,7 @@ func (atcb *AgentTaskCreateBulk) Save(ctx context.Context) ([]*AgentTask, error) // Invoke the actual operation on the latest mutation in the chain. if err = sqlgraph.BatchCreate(ctx, atcb.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } } } diff --git a/ent/agenttask_delete.go b/ent/agenttask_delete.go index 5707409a..4b553f49 100755 --- a/ent/agenttask_delete.go +++ b/ent/agenttask_delete.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (atd *AgentTaskDelete) Where(ps ...predicate.AgentTask) *AgentTaskDelete { // Exec executes the deletion query and returns how many vertices were deleted. func (atd *AgentTaskDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(atd.hooks) == 0 { - affected, err = atd.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*AgentTaskMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - atd.mutation = mutation - affected, err = atd.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(atd.hooks) - 1; i >= 0; i-- { - if atd.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = atd.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, atd.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, atd.sqlExec, atd.mutation, atd.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (atd *AgentTaskDelete) ExecX(ctx context.Context) int { } func (atd *AgentTaskDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: agenttask.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: agenttask.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(agenttask.Table, sqlgraph.NewFieldSpec(agenttask.FieldID, field.TypeUUID)) if ps := atd.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -84,7 +48,12 @@ func (atd *AgentTaskDelete) sqlExec(ctx context.Context) (int, error) { } } } - return sqlgraph.DeleteNodes(ctx, atd.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, atd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + atd.mutation.done = true + return affected, err } // AgentTaskDeleteOne is the builder for deleting a single AgentTask entity. @@ -92,6 +61,12 @@ type AgentTaskDeleteOne struct { atd *AgentTaskDelete } +// Where appends a list predicates to the AgentTaskDelete builder. +func (atdo *AgentTaskDeleteOne) Where(ps ...predicate.AgentTask) *AgentTaskDeleteOne { + atdo.atd.mutation.Where(ps...) + return atdo +} + // Exec executes the deletion query. func (atdo *AgentTaskDeleteOne) Exec(ctx context.Context) error { n, err := atdo.atd.Exec(ctx) @@ -107,5 +82,7 @@ func (atdo *AgentTaskDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (atdo *AgentTaskDeleteOne) ExecX(ctx context.Context) { - atdo.atd.ExecX(ctx) + if err := atdo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/ent/agenttask_query.go b/ent/agenttask_query.go index 099a0b9e..c46cca7a 100755 --- a/ent/agenttask_query.go +++ b/ent/agenttask_query.go @@ -1,11 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" "database/sql/driver" - "errors" "fmt" "math" @@ -23,17 +22,17 @@ import ( // AgentTaskQuery is the builder for querying AgentTask entities. type AgentTaskQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string - predicates []predicate.AgentTask - // eager-loading edges. + ctx *QueryContext + order []agenttask.OrderOption + inters []Interceptor + predicates []predicate.AgentTask withAgentTaskToProvisioningStep *ProvisioningStepQuery withAgentTaskToProvisionedHost *ProvisionedHostQuery withAgentTaskToAdhocPlan *AdhocPlanQuery withFKs bool + modifiers []func(*sql.Selector) + loadTotal []func(context.Context, []*AgentTask) error + withNamedAgentTaskToAdhocPlan map[string]*AdhocPlanQuery // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -45,34 +44,34 @@ func (atq *AgentTaskQuery) Where(ps ...predicate.AgentTask) *AgentTaskQuery { return atq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (atq *AgentTaskQuery) Limit(limit int) *AgentTaskQuery { - atq.limit = &limit + atq.ctx.Limit = &limit return atq } -// Offset adds an offset step to the query. +// Offset to start from. func (atq *AgentTaskQuery) Offset(offset int) *AgentTaskQuery { - atq.offset = &offset + atq.ctx.Offset = &offset return atq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (atq *AgentTaskQuery) Unique(unique bool) *AgentTaskQuery { - atq.unique = &unique + atq.ctx.Unique = &unique return atq } -// Order adds an order step to the query. -func (atq *AgentTaskQuery) Order(o ...OrderFunc) *AgentTaskQuery { +// Order specifies how the records should be ordered. +func (atq *AgentTaskQuery) Order(o ...agenttask.OrderOption) *AgentTaskQuery { atq.order = append(atq.order, o...) return atq } // QueryAgentTaskToProvisioningStep chains the current query on the "AgentTaskToProvisioningStep" edge. func (atq *AgentTaskQuery) QueryAgentTaskToProvisioningStep() *ProvisioningStepQuery { - query := &ProvisioningStepQuery{config: atq.config} + query := (&ProvisioningStepClient{config: atq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := atq.prepareQuery(ctx); err != nil { return nil, err @@ -94,7 +93,7 @@ func (atq *AgentTaskQuery) QueryAgentTaskToProvisioningStep() *ProvisioningStepQ // QueryAgentTaskToProvisionedHost chains the current query on the "AgentTaskToProvisionedHost" edge. func (atq *AgentTaskQuery) QueryAgentTaskToProvisionedHost() *ProvisionedHostQuery { - query := &ProvisionedHostQuery{config: atq.config} + query := (&ProvisionedHostClient{config: atq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := atq.prepareQuery(ctx); err != nil { return nil, err @@ -116,7 +115,7 @@ func (atq *AgentTaskQuery) QueryAgentTaskToProvisionedHost() *ProvisionedHostQue // QueryAgentTaskToAdhocPlan chains the current query on the "AgentTaskToAdhocPlan" edge. func (atq *AgentTaskQuery) QueryAgentTaskToAdhocPlan() *AdhocPlanQuery { - query := &AdhocPlanQuery{config: atq.config} + query := (&AdhocPlanClient{config: atq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := atq.prepareQuery(ctx); err != nil { return nil, err @@ -139,7 +138,7 @@ func (atq *AgentTaskQuery) QueryAgentTaskToAdhocPlan() *AdhocPlanQuery { // First returns the first AgentTask entity from the query. // Returns a *NotFoundError when no AgentTask was found. func (atq *AgentTaskQuery) First(ctx context.Context) (*AgentTask, error) { - nodes, err := atq.Limit(1).All(ctx) + nodes, err := atq.Limit(1).All(setContextOp(ctx, atq.ctx, "First")) if err != nil { return nil, err } @@ -162,7 +161,7 @@ func (atq *AgentTaskQuery) FirstX(ctx context.Context) *AgentTask { // Returns a *NotFoundError when no AgentTask ID was found. func (atq *AgentTaskQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = atq.Limit(1).IDs(ctx); err != nil { + if ids, err = atq.Limit(1).IDs(setContextOp(ctx, atq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -185,7 +184,7 @@ func (atq *AgentTaskQuery) FirstIDX(ctx context.Context) uuid.UUID { // Returns a *NotSingularError when more than one AgentTask entity is found. // Returns a *NotFoundError when no AgentTask entities are found. func (atq *AgentTaskQuery) Only(ctx context.Context) (*AgentTask, error) { - nodes, err := atq.Limit(2).All(ctx) + nodes, err := atq.Limit(2).All(setContextOp(ctx, atq.ctx, "Only")) if err != nil { return nil, err } @@ -213,7 +212,7 @@ func (atq *AgentTaskQuery) OnlyX(ctx context.Context) *AgentTask { // Returns a *NotFoundError when no entities are found. func (atq *AgentTaskQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = atq.Limit(2).IDs(ctx); err != nil { + if ids, err = atq.Limit(2).IDs(setContextOp(ctx, atq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -238,10 +237,12 @@ func (atq *AgentTaskQuery) OnlyIDX(ctx context.Context) uuid.UUID { // All executes the query and returns a list of AgentTasks. func (atq *AgentTaskQuery) All(ctx context.Context) ([]*AgentTask, error) { + ctx = setContextOp(ctx, atq.ctx, "All") if err := atq.prepareQuery(ctx); err != nil { return nil, err } - return atq.sqlAll(ctx) + qr := querierAll[[]*AgentTask, *AgentTaskQuery]() + return withInterceptors[[]*AgentTask](ctx, atq, qr, atq.inters) } // AllX is like All, but panics if an error occurs. @@ -254,9 +255,12 @@ func (atq *AgentTaskQuery) AllX(ctx context.Context) []*AgentTask { } // IDs executes the query and returns a list of AgentTask IDs. -func (atq *AgentTaskQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { - var ids []uuid.UUID - if err := atq.Select(agenttask.FieldID).Scan(ctx, &ids); err != nil { +func (atq *AgentTaskQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if atq.ctx.Unique == nil && atq.path != nil { + atq.Unique(true) + } + ctx = setContextOp(ctx, atq.ctx, "IDs") + if err = atq.Select(agenttask.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -273,10 +277,11 @@ func (atq *AgentTaskQuery) IDsX(ctx context.Context) []uuid.UUID { // Count returns the count of the given query. func (atq *AgentTaskQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, atq.ctx, "Count") if err := atq.prepareQuery(ctx); err != nil { return 0, err } - return atq.sqlCount(ctx) + return withInterceptors[int](ctx, atq, querierCount[*AgentTaskQuery](), atq.inters) } // CountX is like Count, but panics if an error occurs. @@ -290,10 +295,15 @@ func (atq *AgentTaskQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (atq *AgentTaskQuery) Exist(ctx context.Context) (bool, error) { - if err := atq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, atq.ctx, "Exist") + switch _, err := atq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return atq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -313,24 +323,23 @@ func (atq *AgentTaskQuery) Clone() *AgentTaskQuery { } return &AgentTaskQuery{ config: atq.config, - limit: atq.limit, - offset: atq.offset, - order: append([]OrderFunc{}, atq.order...), + ctx: atq.ctx.Clone(), + order: append([]agenttask.OrderOption{}, atq.order...), + inters: append([]Interceptor{}, atq.inters...), predicates: append([]predicate.AgentTask{}, atq.predicates...), withAgentTaskToProvisioningStep: atq.withAgentTaskToProvisioningStep.Clone(), withAgentTaskToProvisionedHost: atq.withAgentTaskToProvisionedHost.Clone(), withAgentTaskToAdhocPlan: atq.withAgentTaskToAdhocPlan.Clone(), // clone intermediate query. - sql: atq.sql.Clone(), - path: atq.path, - unique: atq.unique, + sql: atq.sql.Clone(), + path: atq.path, } } // WithAgentTaskToProvisioningStep tells the query-builder to eager-load the nodes that are connected to // the "AgentTaskToProvisioningStep" edge. The optional arguments are used to configure the query builder of the edge. func (atq *AgentTaskQuery) WithAgentTaskToProvisioningStep(opts ...func(*ProvisioningStepQuery)) *AgentTaskQuery { - query := &ProvisioningStepQuery{config: atq.config} + query := (&ProvisioningStepClient{config: atq.config}).Query() for _, opt := range opts { opt(query) } @@ -341,7 +350,7 @@ func (atq *AgentTaskQuery) WithAgentTaskToProvisioningStep(opts ...func(*Provisi // WithAgentTaskToProvisionedHost tells the query-builder to eager-load the nodes that are connected to // the "AgentTaskToProvisionedHost" edge. The optional arguments are used to configure the query builder of the edge. func (atq *AgentTaskQuery) WithAgentTaskToProvisionedHost(opts ...func(*ProvisionedHostQuery)) *AgentTaskQuery { - query := &ProvisionedHostQuery{config: atq.config} + query := (&ProvisionedHostClient{config: atq.config}).Query() for _, opt := range opts { opt(query) } @@ -352,7 +361,7 @@ func (atq *AgentTaskQuery) WithAgentTaskToProvisionedHost(opts ...func(*Provisio // WithAgentTaskToAdhocPlan tells the query-builder to eager-load the nodes that are connected to // the "AgentTaskToAdhocPlan" edge. The optional arguments are used to configure the query builder of the edge. func (atq *AgentTaskQuery) WithAgentTaskToAdhocPlan(opts ...func(*AdhocPlanQuery)) *AgentTaskQuery { - query := &AdhocPlanQuery{config: atq.config} + query := (&AdhocPlanClient{config: atq.config}).Query() for _, opt := range opts { opt(query) } @@ -374,17 +383,13 @@ func (atq *AgentTaskQuery) WithAgentTaskToAdhocPlan(opts ...func(*AdhocPlanQuery // GroupBy(agenttask.FieldCommand). // Aggregate(ent.Count()). // Scan(ctx, &v) -// func (atq *AgentTaskQuery) GroupBy(field string, fields ...string) *AgentTaskGroupBy { - group := &AgentTaskGroupBy{config: atq.config} - group.fields = append([]string{field}, fields...) - group.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := atq.prepareQuery(ctx); err != nil { - return nil, err - } - return atq.sqlQuery(ctx), nil - } - return group + atq.ctx.Fields = append([]string{field}, fields...) + grbuild := &AgentTaskGroupBy{build: atq} + grbuild.flds = &atq.ctx.Fields + grbuild.label = agenttask.Label + grbuild.scan = grbuild.Scan + return grbuild } // Select allows the selection one or more fields/columns for the given query, @@ -399,14 +404,31 @@ func (atq *AgentTaskQuery) GroupBy(field string, fields ...string) *AgentTaskGro // client.AgentTask.Query(). // Select(agenttask.FieldCommand). // Scan(ctx, &v) -// func (atq *AgentTaskQuery) Select(fields ...string) *AgentTaskSelect { - atq.fields = append(atq.fields, fields...) - return &AgentTaskSelect{AgentTaskQuery: atq} + atq.ctx.Fields = append(atq.ctx.Fields, fields...) + sbuild := &AgentTaskSelect{AgentTaskQuery: atq} + sbuild.label = agenttask.Label + sbuild.flds, sbuild.scan = &atq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a AgentTaskSelect configured with the given aggregations. +func (atq *AgentTaskQuery) Aggregate(fns ...AggregateFunc) *AgentTaskSelect { + return atq.Select().Aggregate(fns...) } func (atq *AgentTaskQuery) prepareQuery(ctx context.Context) error { - for _, f := range atq.fields { + for _, inter := range atq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, atq); err != nil { + return err + } + } + } + for _, f := range atq.ctx.Fields { if !agenttask.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -421,7 +443,7 @@ func (atq *AgentTaskQuery) prepareQuery(ctx context.Context) error { return nil } -func (atq *AgentTaskQuery) sqlAll(ctx context.Context) ([]*AgentTask, error) { +func (atq *AgentTaskQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*AgentTask, error) { var ( nodes = []*AgentTask{} withFKs = atq.withFKs @@ -438,150 +460,180 @@ func (atq *AgentTaskQuery) sqlAll(ctx context.Context) ([]*AgentTask, error) { if withFKs { _spec.Node.Columns = append(_spec.Node.Columns, agenttask.ForeignKeys...) } - _spec.ScanValues = func(columns []string) ([]interface{}, error) { + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*AgentTask).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { node := &AgentTask{config: atq.config} nodes = append(nodes, node) - return node.scanValues(columns) - } - _spec.Assign = func(columns []string, values []interface{}) error { - if len(nodes) == 0 { - return fmt.Errorf("ent: Assign called without calling ScanValues") - } - node := nodes[len(nodes)-1] node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(atq.modifiers) > 0 { + _spec.Modifiers = atq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } if err := sqlgraph.QueryNodes(ctx, atq.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := atq.withAgentTaskToProvisioningStep; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*AgentTask) - for i := range nodes { - if nodes[i].agent_task_agent_task_to_provisioning_step == nil { - continue - } - fk := *nodes[i].agent_task_agent_task_to_provisioning_step - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + if err := atq.loadAgentTaskToProvisioningStep(ctx, query, nodes, nil, + func(n *AgentTask, e *ProvisioningStep) { n.Edges.AgentTaskToProvisioningStep = e }); err != nil { + return nil, err } - query.Where(provisioningstep.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + } + if query := atq.withAgentTaskToProvisionedHost; query != nil { + if err := atq.loadAgentTaskToProvisionedHost(ctx, query, nodes, nil, + func(n *AgentTask, e *ProvisionedHost) { n.Edges.AgentTaskToProvisionedHost = e }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "agent_task_agent_task_to_provisioning_step" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.AgentTaskToProvisioningStep = n - } + } + if query := atq.withAgentTaskToAdhocPlan; query != nil { + if err := atq.loadAgentTaskToAdhocPlan(ctx, query, nodes, + func(n *AgentTask) { n.Edges.AgentTaskToAdhocPlan = []*AdhocPlan{} }, + func(n *AgentTask, e *AdhocPlan) { + n.Edges.AgentTaskToAdhocPlan = append(n.Edges.AgentTaskToAdhocPlan, e) + }); err != nil { + return nil, err + } + } + for name, query := range atq.withNamedAgentTaskToAdhocPlan { + if err := atq.loadAgentTaskToAdhocPlan(ctx, query, nodes, + func(n *AgentTask) { n.appendNamedAgentTaskToAdhocPlan(name) }, + func(n *AgentTask, e *AdhocPlan) { n.appendNamedAgentTaskToAdhocPlan(name, e) }); err != nil { + return nil, err } } + for i := range atq.loadTotal { + if err := atq.loadTotal[i](ctx, nodes); err != nil { + return nil, err + } + } + return nodes, nil +} - if query := atq.withAgentTaskToProvisionedHost; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*AgentTask) +func (atq *AgentTaskQuery) loadAgentTaskToProvisioningStep(ctx context.Context, query *ProvisioningStepQuery, nodes []*AgentTask, init func(*AgentTask), assign func(*AgentTask, *ProvisioningStep)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*AgentTask) + for i := range nodes { + if nodes[i].agent_task_agent_task_to_provisioning_step == nil { + continue + } + fk := *nodes[i].agent_task_agent_task_to_provisioning_step + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(provisioningstep.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "agent_task_agent_task_to_provisioning_step" returned %v`, n.ID) + } for i := range nodes { - if nodes[i].agent_task_agent_task_to_provisioned_host == nil { - continue - } - fk := *nodes[i].agent_task_agent_task_to_provisioned_host - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + assign(nodes[i], n) } - query.Where(provisionedhost.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + } + return nil +} +func (atq *AgentTaskQuery) loadAgentTaskToProvisionedHost(ctx context.Context, query *ProvisionedHostQuery, nodes []*AgentTask, init func(*AgentTask), assign func(*AgentTask, *ProvisionedHost)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*AgentTask) + for i := range nodes { + if nodes[i].agent_task_agent_task_to_provisioned_host == nil { + continue } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "agent_task_agent_task_to_provisioned_host" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.AgentTaskToProvisionedHost = n - } + fk := *nodes[i].agent_task_agent_task_to_provisioned_host + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) } + nodeids[fk] = append(nodeids[fk], nodes[i]) } - - if query := atq.withAgentTaskToAdhocPlan; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*AgentTask) + if len(ids) == 0 { + return nil + } + query.Where(provisionedhost.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "agent_task_agent_task_to_provisioned_host" returned %v`, n.ID) + } for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.AgentTaskToAdhocPlan = []*AdhocPlan{} + assign(nodes[i], n) } - query.withFKs = true - query.Where(predicate.AdhocPlan(func(s *sql.Selector) { - s.Where(sql.InValues(agenttask.AgentTaskToAdhocPlanColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + } + return nil +} +func (atq *AgentTaskQuery) loadAgentTaskToAdhocPlan(ctx context.Context, query *AdhocPlanQuery, nodes []*AgentTask, init func(*AgentTask), assign func(*AgentTask, *AdhocPlan)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*AgentTask) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) } - for _, n := range neighbors { - fk := n.adhoc_plan_adhoc_plan_to_agent_task - if fk == nil { - return nil, fmt.Errorf(`foreign-key "adhoc_plan_adhoc_plan_to_agent_task" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "adhoc_plan_adhoc_plan_to_agent_task" returned %v for node %v`, *fk, n.ID) - } - node.Edges.AgentTaskToAdhocPlan = append(node.Edges.AgentTaskToAdhocPlan, n) + } + query.withFKs = true + query.Where(predicate.AdhocPlan(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(agenttask.AgentTaskToAdhocPlanColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.adhoc_plan_adhoc_plan_to_agent_task + if fk == nil { + return fmt.Errorf(`foreign-key "adhoc_plan_adhoc_plan_to_agent_task" is nil for node %v`, n.ID) } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "adhoc_plan_adhoc_plan_to_agent_task" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) } - - return nodes, nil + return nil } func (atq *AgentTaskQuery) sqlCount(ctx context.Context) (int, error) { _spec := atq.querySpec() - _spec.Node.Columns = atq.fields - if len(atq.fields) > 0 { - _spec.Unique = atq.unique != nil && *atq.unique + if len(atq.modifiers) > 0 { + _spec.Modifiers = atq.modifiers } - return sqlgraph.CountNodes(ctx, atq.driver, _spec) -} - -func (atq *AgentTaskQuery) sqlExist(ctx context.Context) (bool, error) { - n, err := atq.sqlCount(ctx) - if err != nil { - return false, fmt.Errorf("ent: check existence: %w", err) + _spec.Node.Columns = atq.ctx.Fields + if len(atq.ctx.Fields) > 0 { + _spec.Unique = atq.ctx.Unique != nil && *atq.ctx.Unique } - return n > 0, nil + return sqlgraph.CountNodes(ctx, atq.driver, _spec) } func (atq *AgentTaskQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: agenttask.Table, - Columns: agenttask.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: agenttask.FieldID, - }, - }, - From: atq.sql, - Unique: true, - } - if unique := atq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(agenttask.Table, agenttask.Columns, sqlgraph.NewFieldSpec(agenttask.FieldID, field.TypeUUID)) + _spec.From = atq.sql + if unique := atq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if atq.path != nil { + _spec.Unique = true } - if fields := atq.fields; len(fields) > 0 { + if fields := atq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, agenttask.FieldID) for i := range fields { @@ -597,10 +649,10 @@ func (atq *AgentTaskQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := atq.limit; limit != nil { + if limit := atq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := atq.offset; offset != nil { + if offset := atq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := atq.order; len(ps) > 0 { @@ -616,7 +668,7 @@ func (atq *AgentTaskQuery) querySpec() *sqlgraph.QuerySpec { func (atq *AgentTaskQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(atq.driver.Dialect()) t1 := builder.Table(agenttask.Table) - columns := atq.fields + columns := atq.ctx.Fields if len(columns) == 0 { columns = agenttask.Columns } @@ -625,7 +677,7 @@ func (atq *AgentTaskQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = atq.sql selector.Select(selector.Columns(columns...)...) } - if atq.unique != nil && *atq.unique { + if atq.ctx.Unique != nil && *atq.ctx.Unique { selector.Distinct() } for _, p := range atq.predicates { @@ -634,25 +686,35 @@ func (atq *AgentTaskQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range atq.order { p(selector) } - if offset := atq.offset; offset != nil { + if offset := atq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := atq.limit; limit != nil { + if limit := atq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector } +// WithNamedAgentTaskToAdhocPlan tells the query-builder to eager-load the nodes that are connected to the "AgentTaskToAdhocPlan" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (atq *AgentTaskQuery) WithNamedAgentTaskToAdhocPlan(name string, opts ...func(*AdhocPlanQuery)) *AgentTaskQuery { + query := (&AdhocPlanClient{config: atq.config}).Query() + for _, opt := range opts { + opt(query) + } + if atq.withNamedAgentTaskToAdhocPlan == nil { + atq.withNamedAgentTaskToAdhocPlan = make(map[string]*AdhocPlanQuery) + } + atq.withNamedAgentTaskToAdhocPlan[name] = query + return atq +} + // AgentTaskGroupBy is the group-by builder for AgentTask entities. type AgentTaskGroupBy struct { - config - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) + selector + build *AgentTaskQuery } // Aggregate adds the given aggregation functions to the group-by query. @@ -661,471 +723,77 @@ func (atgb *AgentTaskGroupBy) Aggregate(fns ...AggregateFunc) *AgentTaskGroupBy return atgb } -// Scan applies the group-by query and scans the result into the given value. -func (atgb *AgentTaskGroupBy) Scan(ctx context.Context, v interface{}) error { - query, err := atgb.path(ctx) - if err != nil { +// Scan applies the selector query and scans the result into the given value. +func (atgb *AgentTaskGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, atgb.build.ctx, "GroupBy") + if err := atgb.build.prepareQuery(ctx); err != nil { return err } - atgb.sql = query - return atgb.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (atgb *AgentTaskGroupBy) ScanX(ctx context.Context, v interface{}) { - if err := atgb.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from group-by. -// It is only allowed when executing a group-by query with one field. -func (atgb *AgentTaskGroupBy) Strings(ctx context.Context) ([]string, error) { - if len(atgb.fields) > 1 { - return nil, errors.New("ent: AgentTaskGroupBy.Strings is not achievable when grouping more than 1 field") - } - var v []string - if err := atgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (atgb *AgentTaskGroupBy) StringsX(ctx context.Context) []string { - v, err := atgb.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (atgb *AgentTaskGroupBy) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = atgb.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{agenttask.Label} - default: - err = fmt.Errorf("ent: AgentTaskGroupBy.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (atgb *AgentTaskGroupBy) StringX(ctx context.Context) string { - v, err := atgb.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from group-by. -// It is only allowed when executing a group-by query with one field. -func (atgb *AgentTaskGroupBy) Ints(ctx context.Context) ([]int, error) { - if len(atgb.fields) > 1 { - return nil, errors.New("ent: AgentTaskGroupBy.Ints is not achievable when grouping more than 1 field") - } - var v []int - if err := atgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (atgb *AgentTaskGroupBy) IntsX(ctx context.Context) []int { - v, err := atgb.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (atgb *AgentTaskGroupBy) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = atgb.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{agenttask.Label} - default: - err = fmt.Errorf("ent: AgentTaskGroupBy.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (atgb *AgentTaskGroupBy) IntX(ctx context.Context) int { - v, err := atgb.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from group-by. -// It is only allowed when executing a group-by query with one field. -func (atgb *AgentTaskGroupBy) Float64s(ctx context.Context) ([]float64, error) { - if len(atgb.fields) > 1 { - return nil, errors.New("ent: AgentTaskGroupBy.Float64s is not achievable when grouping more than 1 field") - } - var v []float64 - if err := atgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (atgb *AgentTaskGroupBy) Float64sX(ctx context.Context) []float64 { - v, err := atgb.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (atgb *AgentTaskGroupBy) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = atgb.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{agenttask.Label} - default: - err = fmt.Errorf("ent: AgentTaskGroupBy.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (atgb *AgentTaskGroupBy) Float64X(ctx context.Context) float64 { - v, err := atgb.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from group-by. -// It is only allowed when executing a group-by query with one field. -func (atgb *AgentTaskGroupBy) Bools(ctx context.Context) ([]bool, error) { - if len(atgb.fields) > 1 { - return nil, errors.New("ent: AgentTaskGroupBy.Bools is not achievable when grouping more than 1 field") - } - var v []bool - if err := atgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (atgb *AgentTaskGroupBy) BoolsX(ctx context.Context) []bool { - v, err := atgb.Bools(ctx) - if err != nil { - panic(err) - } - return v + return scanWithInterceptors[*AgentTaskQuery, *AgentTaskGroupBy](ctx, atgb.build, atgb, atgb.build.inters, v) } -// Bool returns a single bool from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (atgb *AgentTaskGroupBy) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = atgb.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{agenttask.Label} - default: - err = fmt.Errorf("ent: AgentTaskGroupBy.Bools returned %d results when one was expected", len(v)) - } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (atgb *AgentTaskGroupBy) BoolX(ctx context.Context) bool { - v, err := atgb.Bool(ctx) - if err != nil { - panic(err) +func (atgb *AgentTaskGroupBy) sqlScan(ctx context.Context, root *AgentTaskQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(atgb.fns)) + for _, fn := range atgb.fns { + aggregation = append(aggregation, fn(selector)) } - return v -} - -func (atgb *AgentTaskGroupBy) sqlScan(ctx context.Context, v interface{}) error { - for _, f := range atgb.fields { - if !agenttask.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*atgb.flds)+len(atgb.fns)) + for _, f := range *atgb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := atgb.sqlQuery() + selector.GroupBy(selector.Columns(*atgb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := atgb.driver.Query(ctx, query, args, rows); err != nil { + if err := atgb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (atgb *AgentTaskGroupBy) sqlQuery() *sql.Selector { - selector := atgb.sql.Select() - aggregation := make([]string, 0, len(atgb.fns)) - for _, fn := range atgb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(atgb.fields)+len(atgb.fns)) - for _, f := range atgb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(atgb.fields...)...) -} - // AgentTaskSelect is the builder for selecting fields of AgentTask entities. type AgentTaskSelect struct { *AgentTaskQuery - // intermediate query (i.e. traversal path). - sql *sql.Selector + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (ats *AgentTaskSelect) Aggregate(fns ...AggregateFunc) *AgentTaskSelect { + ats.fns = append(ats.fns, fns...) + return ats } // Scan applies the selector query and scans the result into the given value. -func (ats *AgentTaskSelect) Scan(ctx context.Context, v interface{}) error { +func (ats *AgentTaskSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ats.ctx, "Select") if err := ats.prepareQuery(ctx); err != nil { return err } - ats.sql = ats.AgentTaskQuery.sqlQuery(ctx) - return ats.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (ats *AgentTaskSelect) ScanX(ctx context.Context, v interface{}) { - if err := ats.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from a selector. It is only allowed when selecting one field. -func (ats *AgentTaskSelect) Strings(ctx context.Context) ([]string, error) { - if len(ats.fields) > 1 { - return nil, errors.New("ent: AgentTaskSelect.Strings is not achievable when selecting more than 1 field") - } - var v []string - if err := ats.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (ats *AgentTaskSelect) StringsX(ctx context.Context) []string { - v, err := ats.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a selector. It is only allowed when selecting one field. -func (ats *AgentTaskSelect) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = ats.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{agenttask.Label} - default: - err = fmt.Errorf("ent: AgentTaskSelect.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (ats *AgentTaskSelect) StringX(ctx context.Context) string { - v, err := ats.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from a selector. It is only allowed when selecting one field. -func (ats *AgentTaskSelect) Ints(ctx context.Context) ([]int, error) { - if len(ats.fields) > 1 { - return nil, errors.New("ent: AgentTaskSelect.Ints is not achievable when selecting more than 1 field") - } - var v []int - if err := ats.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (ats *AgentTaskSelect) IntsX(ctx context.Context) []int { - v, err := ats.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a selector. It is only allowed when selecting one field. -func (ats *AgentTaskSelect) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = ats.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{agenttask.Label} - default: - err = fmt.Errorf("ent: AgentTaskSelect.Ints returned %d results when one was expected", len(v)) - } - return + return scanWithInterceptors[*AgentTaskQuery, *AgentTaskSelect](ctx, ats.AgentTaskQuery, ats, ats.inters, v) } -// IntX is like Int, but panics if an error occurs. -func (ats *AgentTaskSelect) IntX(ctx context.Context) int { - v, err := ats.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. -func (ats *AgentTaskSelect) Float64s(ctx context.Context) ([]float64, error) { - if len(ats.fields) > 1 { - return nil, errors.New("ent: AgentTaskSelect.Float64s is not achievable when selecting more than 1 field") - } - var v []float64 - if err := ats.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (ats *AgentTaskSelect) Float64sX(ctx context.Context) []float64 { - v, err := ats.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. -func (ats *AgentTaskSelect) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = ats.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{agenttask.Label} - default: - err = fmt.Errorf("ent: AgentTaskSelect.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (ats *AgentTaskSelect) Float64X(ctx context.Context) float64 { - v, err := ats.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from a selector. It is only allowed when selecting one field. -func (ats *AgentTaskSelect) Bools(ctx context.Context) ([]bool, error) { - if len(ats.fields) > 1 { - return nil, errors.New("ent: AgentTaskSelect.Bools is not achievable when selecting more than 1 field") - } - var v []bool - if err := ats.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (ats *AgentTaskSelect) BoolsX(ctx context.Context) []bool { - v, err := ats.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a selector. It is only allowed when selecting one field. -func (ats *AgentTaskSelect) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = ats.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{agenttask.Label} - default: - err = fmt.Errorf("ent: AgentTaskSelect.Bools returned %d results when one was expected", len(v)) +func (ats *AgentTaskSelect) sqlScan(ctx context.Context, root *AgentTaskQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(ats.fns)) + for _, fn := range ats.fns { + aggregation = append(aggregation, fn(selector)) } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (ats *AgentTaskSelect) BoolX(ctx context.Context) bool { - v, err := ats.Bool(ctx) - if err != nil { - panic(err) + switch n := len(*ats.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) } - return v -} - -func (ats *AgentTaskSelect) sqlScan(ctx context.Context, v interface{}) error { rows := &sql.Rows{} - query, args := ats.sql.Query() + query, args := selector.Query() if err := ats.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/ent/agenttask_update.go b/ent/agenttask_update.go index e361820c..17d2b89a 100755 --- a/ent/agenttask_update.go +++ b/ent/agenttask_update.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -37,12 +37,28 @@ func (atu *AgentTaskUpdate) SetCommand(a agenttask.Command) *AgentTaskUpdate { return atu } +// SetNillableCommand sets the "command" field if the given value is not nil. +func (atu *AgentTaskUpdate) SetNillableCommand(a *agenttask.Command) *AgentTaskUpdate { + if a != nil { + atu.SetCommand(*a) + } + return atu +} + // SetArgs sets the "args" field. func (atu *AgentTaskUpdate) SetArgs(s string) *AgentTaskUpdate { atu.mutation.SetArgs(s) return atu } +// SetNillableArgs sets the "args" field if the given value is not nil. +func (atu *AgentTaskUpdate) SetNillableArgs(s *string) *AgentTaskUpdate { + if s != nil { + atu.SetArgs(*s) + } + return atu +} + // SetNumber sets the "number" field. func (atu *AgentTaskUpdate) SetNumber(i int) *AgentTaskUpdate { atu.mutation.ResetNumber() @@ -50,6 +66,14 @@ func (atu *AgentTaskUpdate) SetNumber(i int) *AgentTaskUpdate { return atu } +// SetNillableNumber sets the "number" field if the given value is not nil. +func (atu *AgentTaskUpdate) SetNillableNumber(i *int) *AgentTaskUpdate { + if i != nil { + atu.SetNumber(*i) + } + return atu +} + // AddNumber adds i to the "number" field. func (atu *AgentTaskUpdate) AddNumber(i int) *AgentTaskUpdate { atu.mutation.AddNumber(i) @@ -76,6 +100,14 @@ func (atu *AgentTaskUpdate) SetState(a agenttask.State) *AgentTaskUpdate { return atu } +// SetNillableState sets the "state" field if the given value is not nil. +func (atu *AgentTaskUpdate) SetNillableState(a *agenttask.State) *AgentTaskUpdate { + if a != nil { + atu.SetState(*a) + } + return atu +} + // SetErrorMessage sets the "error_message" field. func (atu *AgentTaskUpdate) SetErrorMessage(s string) *AgentTaskUpdate { atu.mutation.SetErrorMessage(s) @@ -175,40 +207,7 @@ func (atu *AgentTaskUpdate) RemoveAgentTaskToAdhocPlan(a ...*AdhocPlan) *AgentTa // Save executes the query and returns the number of nodes affected by the update operation. func (atu *AgentTaskUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(atu.hooks) == 0 { - if err = atu.check(); err != nil { - return 0, err - } - affected, err = atu.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*AgentTaskMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = atu.check(); err != nil { - return 0, err - } - atu.mutation = mutation - affected, err = atu.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(atu.hooks) - 1; i >= 0; i-- { - if atu.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = atu.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, atu.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, atu.sqlSave, atu.mutation, atu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -252,16 +251,10 @@ func (atu *AgentTaskUpdate) check() error { } func (atu *AgentTaskUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: agenttask.Table, - Columns: agenttask.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: agenttask.FieldID, - }, - }, + if err := atu.check(); err != nil { + return n, err } + _spec := sqlgraph.NewUpdateSpec(agenttask.Table, agenttask.Columns, sqlgraph.NewFieldSpec(agenttask.FieldID, field.TypeUUID)) if ps := atu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -270,53 +263,25 @@ func (atu *AgentTaskUpdate) sqlSave(ctx context.Context) (n int, err error) { } } if value, ok := atu.mutation.Command(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: agenttask.FieldCommand, - }) + _spec.SetField(agenttask.FieldCommand, field.TypeEnum, value) } if value, ok := atu.mutation.Args(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: agenttask.FieldArgs, - }) + _spec.SetField(agenttask.FieldArgs, field.TypeString, value) } if value, ok := atu.mutation.Number(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: agenttask.FieldNumber, - }) + _spec.SetField(agenttask.FieldNumber, field.TypeInt, value) } if value, ok := atu.mutation.AddedNumber(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: agenttask.FieldNumber, - }) + _spec.AddField(agenttask.FieldNumber, field.TypeInt, value) } if value, ok := atu.mutation.Output(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: agenttask.FieldOutput, - }) + _spec.SetField(agenttask.FieldOutput, field.TypeString, value) } if value, ok := atu.mutation.State(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: agenttask.FieldState, - }) + _spec.SetField(agenttask.FieldState, field.TypeEnum, value) } if value, ok := atu.mutation.ErrorMessage(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: agenttask.FieldErrorMessage, - }) + _spec.SetField(agenttask.FieldErrorMessage, field.TypeString, value) } if atu.mutation.AgentTaskToProvisioningStepCleared() { edge := &sqlgraph.EdgeSpec{ @@ -326,10 +291,7 @@ func (atu *AgentTaskUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{agenttask.AgentTaskToProvisioningStepColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisioningstep.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisioningstep.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -342,10 +304,7 @@ func (atu *AgentTaskUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{agenttask.AgentTaskToProvisioningStepColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisioningstep.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisioningstep.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -361,10 +320,7 @@ func (atu *AgentTaskUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{agenttask.AgentTaskToProvisionedHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionedhost.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionedhost.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -377,10 +333,7 @@ func (atu *AgentTaskUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{agenttask.AgentTaskToProvisionedHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionedhost.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionedhost.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -396,10 +349,7 @@ func (atu *AgentTaskUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{agenttask.AgentTaskToAdhocPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: adhocplan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(adhocplan.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -412,10 +362,7 @@ func (atu *AgentTaskUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{agenttask.AgentTaskToAdhocPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: adhocplan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(adhocplan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -431,10 +378,7 @@ func (atu *AgentTaskUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{agenttask.AgentTaskToAdhocPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: adhocplan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(adhocplan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -446,10 +390,11 @@ func (atu *AgentTaskUpdate) sqlSave(ctx context.Context) (n int, err error) { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{agenttask.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return 0, err } + atu.mutation.done = true return n, nil } @@ -467,12 +412,28 @@ func (atuo *AgentTaskUpdateOne) SetCommand(a agenttask.Command) *AgentTaskUpdate return atuo } +// SetNillableCommand sets the "command" field if the given value is not nil. +func (atuo *AgentTaskUpdateOne) SetNillableCommand(a *agenttask.Command) *AgentTaskUpdateOne { + if a != nil { + atuo.SetCommand(*a) + } + return atuo +} + // SetArgs sets the "args" field. func (atuo *AgentTaskUpdateOne) SetArgs(s string) *AgentTaskUpdateOne { atuo.mutation.SetArgs(s) return atuo } +// SetNillableArgs sets the "args" field if the given value is not nil. +func (atuo *AgentTaskUpdateOne) SetNillableArgs(s *string) *AgentTaskUpdateOne { + if s != nil { + atuo.SetArgs(*s) + } + return atuo +} + // SetNumber sets the "number" field. func (atuo *AgentTaskUpdateOne) SetNumber(i int) *AgentTaskUpdateOne { atuo.mutation.ResetNumber() @@ -480,6 +441,14 @@ func (atuo *AgentTaskUpdateOne) SetNumber(i int) *AgentTaskUpdateOne { return atuo } +// SetNillableNumber sets the "number" field if the given value is not nil. +func (atuo *AgentTaskUpdateOne) SetNillableNumber(i *int) *AgentTaskUpdateOne { + if i != nil { + atuo.SetNumber(*i) + } + return atuo +} + // AddNumber adds i to the "number" field. func (atuo *AgentTaskUpdateOne) AddNumber(i int) *AgentTaskUpdateOne { atuo.mutation.AddNumber(i) @@ -506,6 +475,14 @@ func (atuo *AgentTaskUpdateOne) SetState(a agenttask.State) *AgentTaskUpdateOne return atuo } +// SetNillableState sets the "state" field if the given value is not nil. +func (atuo *AgentTaskUpdateOne) SetNillableState(a *agenttask.State) *AgentTaskUpdateOne { + if a != nil { + atuo.SetState(*a) + } + return atuo +} + // SetErrorMessage sets the "error_message" field. func (atuo *AgentTaskUpdateOne) SetErrorMessage(s string) *AgentTaskUpdateOne { atuo.mutation.SetErrorMessage(s) @@ -603,6 +580,12 @@ func (atuo *AgentTaskUpdateOne) RemoveAgentTaskToAdhocPlan(a ...*AdhocPlan) *Age return atuo.RemoveAgentTaskToAdhocPlanIDs(ids...) } +// Where appends a list predicates to the AgentTaskUpdate builder. +func (atuo *AgentTaskUpdateOne) Where(ps ...predicate.AgentTask) *AgentTaskUpdateOne { + atuo.mutation.Where(ps...) + return atuo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (atuo *AgentTaskUpdateOne) Select(field string, fields ...string) *AgentTaskUpdateOne { @@ -612,40 +595,7 @@ func (atuo *AgentTaskUpdateOne) Select(field string, fields ...string) *AgentTas // Save executes the query and returns the updated AgentTask entity. func (atuo *AgentTaskUpdateOne) Save(ctx context.Context) (*AgentTask, error) { - var ( - err error - node *AgentTask - ) - if len(atuo.hooks) == 0 { - if err = atuo.check(); err != nil { - return nil, err - } - node, err = atuo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*AgentTaskMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = atuo.check(); err != nil { - return nil, err - } - atuo.mutation = mutation - node, err = atuo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(atuo.hooks) - 1; i >= 0; i-- { - if atuo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = atuo.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, atuo.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, atuo.sqlSave, atuo.mutation, atuo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -689,16 +639,10 @@ func (atuo *AgentTaskUpdateOne) check() error { } func (atuo *AgentTaskUpdateOne) sqlSave(ctx context.Context) (_node *AgentTask, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: agenttask.Table, - Columns: agenttask.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: agenttask.FieldID, - }, - }, + if err := atuo.check(); err != nil { + return _node, err } + _spec := sqlgraph.NewUpdateSpec(agenttask.Table, agenttask.Columns, sqlgraph.NewFieldSpec(agenttask.FieldID, field.TypeUUID)) id, ok := atuo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "AgentTask.id" for update`)} @@ -724,53 +668,25 @@ func (atuo *AgentTaskUpdateOne) sqlSave(ctx context.Context) (_node *AgentTask, } } if value, ok := atuo.mutation.Command(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: agenttask.FieldCommand, - }) + _spec.SetField(agenttask.FieldCommand, field.TypeEnum, value) } if value, ok := atuo.mutation.Args(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: agenttask.FieldArgs, - }) + _spec.SetField(agenttask.FieldArgs, field.TypeString, value) } if value, ok := atuo.mutation.Number(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: agenttask.FieldNumber, - }) + _spec.SetField(agenttask.FieldNumber, field.TypeInt, value) } if value, ok := atuo.mutation.AddedNumber(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: agenttask.FieldNumber, - }) + _spec.AddField(agenttask.FieldNumber, field.TypeInt, value) } if value, ok := atuo.mutation.Output(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: agenttask.FieldOutput, - }) + _spec.SetField(agenttask.FieldOutput, field.TypeString, value) } if value, ok := atuo.mutation.State(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: agenttask.FieldState, - }) + _spec.SetField(agenttask.FieldState, field.TypeEnum, value) } if value, ok := atuo.mutation.ErrorMessage(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: agenttask.FieldErrorMessage, - }) + _spec.SetField(agenttask.FieldErrorMessage, field.TypeString, value) } if atuo.mutation.AgentTaskToProvisioningStepCleared() { edge := &sqlgraph.EdgeSpec{ @@ -780,10 +696,7 @@ func (atuo *AgentTaskUpdateOne) sqlSave(ctx context.Context) (_node *AgentTask, Columns: []string{agenttask.AgentTaskToProvisioningStepColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisioningstep.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisioningstep.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -796,10 +709,7 @@ func (atuo *AgentTaskUpdateOne) sqlSave(ctx context.Context) (_node *AgentTask, Columns: []string{agenttask.AgentTaskToProvisioningStepColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisioningstep.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisioningstep.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -815,10 +725,7 @@ func (atuo *AgentTaskUpdateOne) sqlSave(ctx context.Context) (_node *AgentTask, Columns: []string{agenttask.AgentTaskToProvisionedHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionedhost.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionedhost.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -831,10 +738,7 @@ func (atuo *AgentTaskUpdateOne) sqlSave(ctx context.Context) (_node *AgentTask, Columns: []string{agenttask.AgentTaskToProvisionedHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionedhost.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionedhost.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -850,10 +754,7 @@ func (atuo *AgentTaskUpdateOne) sqlSave(ctx context.Context) (_node *AgentTask, Columns: []string{agenttask.AgentTaskToAdhocPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: adhocplan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(adhocplan.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -866,10 +767,7 @@ func (atuo *AgentTaskUpdateOne) sqlSave(ctx context.Context) (_node *AgentTask, Columns: []string{agenttask.AgentTaskToAdhocPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: adhocplan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(adhocplan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -885,10 +783,7 @@ func (atuo *AgentTaskUpdateOne) sqlSave(ctx context.Context) (_node *AgentTask, Columns: []string{agenttask.AgentTaskToAdhocPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: adhocplan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(adhocplan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -903,9 +798,10 @@ func (atuo *AgentTaskUpdateOne) sqlSave(ctx context.Context) (_node *AgentTask, if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{agenttask.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } + atuo.mutation.done = true return _node, nil } diff --git a/ent/ansible.go b/ent/ansible.go index d2272bd0..d53b2973 100755 --- a/ent/ansible.go +++ b/ent/ansible.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -7,6 +7,7 @@ import ( "fmt" "strings" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/gen0cide/laforge/ent/ansible" "github.com/gen0cide/laforge/ent/environment" @@ -20,8 +21,8 @@ type Ansible struct { ID uuid.UUID `json:"id,omitempty"` // Name holds the value of the "name" field. Name string `json:"name,omitempty" hcl:"name,attr"` - // HclID holds the value of the "hcl_id" field. - HclID string `json:"hcl_id,omitempty" hcl:"id,label"` + // HCLID holds the value of the "hcl_id" field. + HCLID string `json:"hcl_id,omitempty" hcl:"id,label"` // Description holds the value of the "description" field. Description string `json:"description,omitempty" hcl:"description,optional"` // Source holds the value of the "source" field. @@ -40,13 +41,15 @@ type Ansible struct { // The values are being populated by the AnsibleQuery when eager-loading is set. Edges AnsibleEdges `json:"edges"` + // vvvvvvvvvvvv CUSTOM vvvvvvvvvvvv // Edges put into the main struct to be loaded via hcl // AnsibleToUser holds the value of the AnsibleToUser edge. HCLAnsibleToUser []*User `json:"AnsibleToUser,omitempty" hcl:"maintainer,block"` // AnsibleFromEnvironment holds the value of the AnsibleFromEnvironment edge. HCLAnsibleFromEnvironment *Environment `json:"AnsibleFromEnvironment,omitempty"` - // + // ^^^^^^^^^^^^ CUSTOM ^^^^^^^^^^^^^ environment_environment_to_ansible *uuid.UUID + selectValues sql.SelectValues } // AnsibleEdges holds the relations/edges for other nodes in the graph. @@ -58,6 +61,10 @@ type AnsibleEdges struct { // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. loadedTypes [2]bool + // totalCount holds the count of the edges above. + totalCount [2]map[string]int + + namedAnsibleToUser map[string][]*User } // AnsibleToUserOrErr returns the AnsibleToUser value or an error if the edge @@ -74,8 +81,7 @@ func (e AnsibleEdges) AnsibleToUserOrErr() ([]*User, error) { func (e AnsibleEdges) AnsibleFromEnvironmentOrErr() (*Environment, error) { if e.loadedTypes[1] { if e.AnsibleFromEnvironment == nil { - // The edge AnsibleFromEnvironment was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: environment.Label} } return e.AnsibleFromEnvironment, nil @@ -84,20 +90,20 @@ func (e AnsibleEdges) AnsibleFromEnvironmentOrErr() (*Environment, error) { } // scanValues returns the types for scanning values from sql.Rows. -func (*Ansible) scanValues(columns []string) ([]interface{}, error) { - values := make([]interface{}, len(columns)) +func (*Ansible) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) for i := range columns { switch columns[i] { case ansible.FieldTags: values[i] = new([]byte) - case ansible.FieldName, ansible.FieldHclID, ansible.FieldDescription, ansible.FieldSource, ansible.FieldPlaybookName, ansible.FieldMethod, ansible.FieldInventory, ansible.FieldAbsPath: + case ansible.FieldName, ansible.FieldHCLID, ansible.FieldDescription, ansible.FieldSource, ansible.FieldPlaybookName, ansible.FieldMethod, ansible.FieldInventory, ansible.FieldAbsPath: values[i] = new(sql.NullString) case ansible.FieldID: values[i] = new(uuid.UUID) case ansible.ForeignKeys[0]: // environment_environment_to_ansible values[i] = &sql.NullScanner{S: new(uuid.UUID)} default: - return nil, fmt.Errorf("unexpected column %q for type Ansible", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -105,7 +111,7 @@ func (*Ansible) scanValues(columns []string) ([]interface{}, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the Ansible fields. -func (a *Ansible) assignValues(columns []string, values []interface{}) error { +func (a *Ansible) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -123,11 +129,11 @@ func (a *Ansible) assignValues(columns []string, values []interface{}) error { } else if value.Valid { a.Name = value.String } - case ansible.FieldHclID: + case ansible.FieldHCLID: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field hcl_id", values[i]) } else if value.Valid { - a.HclID = value.String + a.HCLID = value.String } case ansible.FieldDescription: if value, ok := values[i].(*sql.NullString); !ok { @@ -180,36 +186,44 @@ func (a *Ansible) assignValues(columns []string, values []interface{}) error { a.environment_environment_to_ansible = new(uuid.UUID) *a.environment_environment_to_ansible = *value.S.(*uuid.UUID) } + default: + a.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the Ansible. +// This includes values selected through modifiers, order, etc. +func (a *Ansible) Value(name string) (ent.Value, error) { + return a.selectValues.Get(name) +} + // QueryAnsibleToUser queries the "AnsibleToUser" edge of the Ansible entity. func (a *Ansible) QueryAnsibleToUser() *UserQuery { - return (&AnsibleClient{config: a.config}).QueryAnsibleToUser(a) + return NewAnsibleClient(a.config).QueryAnsibleToUser(a) } // QueryAnsibleFromEnvironment queries the "AnsibleFromEnvironment" edge of the Ansible entity. func (a *Ansible) QueryAnsibleFromEnvironment() *EnvironmentQuery { - return (&AnsibleClient{config: a.config}).QueryAnsibleFromEnvironment(a) + return NewAnsibleClient(a.config).QueryAnsibleFromEnvironment(a) } // Update returns a builder for updating this Ansible. // Note that you need to call Ansible.Unwrap() before calling this method if this Ansible // was returned from a transaction, and the transaction was committed or rolled back. func (a *Ansible) Update() *AnsibleUpdateOne { - return (&AnsibleClient{config: a.config}).UpdateOne(a) + return NewAnsibleClient(a.config).UpdateOne(a) } // Unwrap unwraps the Ansible entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. func (a *Ansible) Unwrap() *Ansible { - tx, ok := a.config.driver.(*txDriver) + _tx, ok := a.config.driver.(*txDriver) if !ok { panic("ent: Ansible is not a transactional entity") } - a.config.driver = tx.drv + a.config.driver = _tx.drv return a } @@ -217,34 +231,60 @@ func (a *Ansible) Unwrap() *Ansible { func (a *Ansible) String() string { var builder strings.Builder builder.WriteString("Ansible(") - builder.WriteString(fmt.Sprintf("id=%v", a.ID)) - builder.WriteString(", name=") + builder.WriteString(fmt.Sprintf("id=%v, ", a.ID)) + builder.WriteString("name=") builder.WriteString(a.Name) - builder.WriteString(", hcl_id=") - builder.WriteString(a.HclID) - builder.WriteString(", description=") + builder.WriteString(", ") + builder.WriteString("hcl_id=") + builder.WriteString(a.HCLID) + builder.WriteString(", ") + builder.WriteString("description=") builder.WriteString(a.Description) - builder.WriteString(", source=") + builder.WriteString(", ") + builder.WriteString("source=") builder.WriteString(a.Source) - builder.WriteString(", playbook_name=") + builder.WriteString(", ") + builder.WriteString("playbook_name=") builder.WriteString(a.PlaybookName) - builder.WriteString(", method=") + builder.WriteString(", ") + builder.WriteString("method=") builder.WriteString(fmt.Sprintf("%v", a.Method)) - builder.WriteString(", inventory=") + builder.WriteString(", ") + builder.WriteString("inventory=") builder.WriteString(a.Inventory) - builder.WriteString(", abs_path=") + builder.WriteString(", ") + builder.WriteString("abs_path=") builder.WriteString(a.AbsPath) - builder.WriteString(", tags=") + builder.WriteString(", ") + builder.WriteString("tags=") builder.WriteString(fmt.Sprintf("%v", a.Tags)) builder.WriteByte(')') return builder.String() } -// Ansibles is a parsable slice of Ansible. -type Ansibles []*Ansible +// NamedAnsibleToUser returns the AnsibleToUser named value or an error if the edge was not +// loaded in eager-loading with this name. +func (a *Ansible) NamedAnsibleToUser(name string) ([]*User, error) { + if a.Edges.namedAnsibleToUser == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := a.Edges.namedAnsibleToUser[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} -func (a Ansibles) config(cfg config) { - for _i := range a { - a[_i].config = cfg +func (a *Ansible) appendNamedAnsibleToUser(name string, edges ...*User) { + if a.Edges.namedAnsibleToUser == nil { + a.Edges.namedAnsibleToUser = make(map[string][]*User) + } + if len(edges) == 0 { + a.Edges.namedAnsibleToUser[name] = []*User{} + } else { + a.Edges.namedAnsibleToUser[name] = append(a.Edges.namedAnsibleToUser[name], edges...) } } + +// Ansibles is a parsable slice of Ansible. +type Ansibles []*Ansible diff --git a/ent/ansible/ansible.go b/ent/ansible/ansible.go index 6b7b530d..c01590e3 100755 --- a/ent/ansible/ansible.go +++ b/ent/ansible/ansible.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ansible @@ -7,6 +7,8 @@ import ( "io" "strconv" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -17,8 +19,8 @@ const ( FieldID = "id" // FieldName holds the string denoting the name field in the database. FieldName = "name" - // FieldHclID holds the string denoting the hcl_id field in the database. - FieldHclID = "hcl_id" + // FieldHCLID holds the string denoting the hcl_id field in the database. + FieldHCLID = "hcl_id" // FieldDescription holds the string denoting the description field in the database. FieldDescription = "description" // FieldSource holds the string denoting the source field in the database. @@ -59,7 +61,7 @@ const ( var Columns = []string{ FieldID, FieldName, - FieldHclID, + FieldHCLID, FieldDescription, FieldSource, FieldPlaybookName, @@ -117,19 +119,102 @@ func MethodValidator(m Method) error { } } +// OrderOption defines the ordering options for the Ansible queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByHCLID orders the results by the hcl_id field. +func ByHCLID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldHCLID, opts...).ToFunc() +} + +// ByDescription orders the results by the description field. +func ByDescription(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDescription, opts...).ToFunc() +} + +// BySource orders the results by the source field. +func BySource(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSource, opts...).ToFunc() +} + +// ByPlaybookName orders the results by the playbook_name field. +func ByPlaybookName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPlaybookName, opts...).ToFunc() +} + +// ByMethod orders the results by the method field. +func ByMethod(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldMethod, opts...).ToFunc() +} + +// ByInventory orders the results by the inventory field. +func ByInventory(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldInventory, opts...).ToFunc() +} + +// ByAbsPath orders the results by the abs_path field. +func ByAbsPath(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAbsPath, opts...).ToFunc() +} + +// ByAnsibleToUserCount orders the results by AnsibleToUser count. +func ByAnsibleToUserCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newAnsibleToUserStep(), opts...) + } +} + +// ByAnsibleToUser orders the results by AnsibleToUser terms. +func ByAnsibleToUser(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAnsibleToUserStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByAnsibleFromEnvironmentField orders the results by AnsibleFromEnvironment field. +func ByAnsibleFromEnvironmentField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAnsibleFromEnvironmentStep(), sql.OrderByField(field, opts...)) + } +} +func newAnsibleToUserStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AnsibleToUserInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, AnsibleToUserTable, AnsibleToUserColumn), + ) +} +func newAnsibleFromEnvironmentStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AnsibleFromEnvironmentInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, AnsibleFromEnvironmentTable, AnsibleFromEnvironmentColumn), + ) +} + // MarshalGQL implements graphql.Marshaler interface. -func (m Method) MarshalGQL(w io.Writer) { - io.WriteString(w, strconv.Quote(m.String())) +func (e Method) MarshalGQL(w io.Writer) { + io.WriteString(w, strconv.Quote(e.String())) } // UnmarshalGQL implements graphql.Unmarshaler interface. -func (m *Method) UnmarshalGQL(val interface{}) error { +func (e *Method) UnmarshalGQL(val interface{}) error { str, ok := val.(string) if !ok { return fmt.Errorf("enum %T must be a string", val) } - *m = Method(str) - if err := MethodValidator(*m); err != nil { + *e = Method(str) + if err := MethodValidator(*e); err != nil { return fmt.Errorf("%s is not a valid Method", str) } return nil diff --git a/ent/ansible/where.go b/ent/ansible/where.go index 03f3a227..f75e9989 100755 --- a/ent/ansible/where.go +++ b/ent/ansible/where.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ansible @@ -11,959 +11,557 @@ import ( // ID filters vertices based on their ID field. func ID(id uuid.UUID) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Ansible(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id uuid.UUID) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Ansible(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id uuid.UUID) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.Ansible(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...uuid.UUID) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.Ansible(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...uuid.UUID) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.Ansible(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id uuid.UUID) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.Ansible(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id uuid.UUID) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.Ansible(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id uuid.UUID) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.Ansible(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id uuid.UUID) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.Ansible(sql.FieldLTE(FieldID, id)) } // Name applies equality check predicate on the "name" field. It's identical to NameEQ. func Name(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldName), v)) - }) + return predicate.Ansible(sql.FieldEQ(FieldName, v)) } -// HclID applies equality check predicate on the "hcl_id" field. It's identical to HclIDEQ. -func HclID(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldHclID), v)) - }) +// HCLID applies equality check predicate on the "hcl_id" field. It's identical to HCLIDEQ. +func HCLID(v string) predicate.Ansible { + return predicate.Ansible(sql.FieldEQ(FieldHCLID, v)) } // Description applies equality check predicate on the "description" field. It's identical to DescriptionEQ. func Description(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldDescription), v)) - }) + return predicate.Ansible(sql.FieldEQ(FieldDescription, v)) } // Source applies equality check predicate on the "source" field. It's identical to SourceEQ. func Source(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSource), v)) - }) + return predicate.Ansible(sql.FieldEQ(FieldSource, v)) } // PlaybookName applies equality check predicate on the "playbook_name" field. It's identical to PlaybookNameEQ. func PlaybookName(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldPlaybookName), v)) - }) + return predicate.Ansible(sql.FieldEQ(FieldPlaybookName, v)) } // Inventory applies equality check predicate on the "inventory" field. It's identical to InventoryEQ. func Inventory(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldInventory), v)) - }) + return predicate.Ansible(sql.FieldEQ(FieldInventory, v)) } // AbsPath applies equality check predicate on the "abs_path" field. It's identical to AbsPathEQ. func AbsPath(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldAbsPath), v)) - }) + return predicate.Ansible(sql.FieldEQ(FieldAbsPath, v)) } // NameEQ applies the EQ predicate on the "name" field. func NameEQ(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldName), v)) - }) + return predicate.Ansible(sql.FieldEQ(FieldName, v)) } // NameNEQ applies the NEQ predicate on the "name" field. func NameNEQ(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldName), v)) - }) + return predicate.Ansible(sql.FieldNEQ(FieldName, v)) } // NameIn applies the In predicate on the "name" field. func NameIn(vs ...string) predicate.Ansible { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Ansible(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldName), v...)) - }) + return predicate.Ansible(sql.FieldIn(FieldName, vs...)) } // NameNotIn applies the NotIn predicate on the "name" field. func NameNotIn(vs ...string) predicate.Ansible { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Ansible(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldName), v...)) - }) + return predicate.Ansible(sql.FieldNotIn(FieldName, vs...)) } // NameGT applies the GT predicate on the "name" field. func NameGT(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldName), v)) - }) + return predicate.Ansible(sql.FieldGT(FieldName, v)) } // NameGTE applies the GTE predicate on the "name" field. func NameGTE(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldName), v)) - }) + return predicate.Ansible(sql.FieldGTE(FieldName, v)) } // NameLT applies the LT predicate on the "name" field. func NameLT(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldName), v)) - }) + return predicate.Ansible(sql.FieldLT(FieldName, v)) } // NameLTE applies the LTE predicate on the "name" field. func NameLTE(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldName), v)) - }) + return predicate.Ansible(sql.FieldLTE(FieldName, v)) } // NameContains applies the Contains predicate on the "name" field. func NameContains(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldName), v)) - }) + return predicate.Ansible(sql.FieldContains(FieldName, v)) } // NameHasPrefix applies the HasPrefix predicate on the "name" field. func NameHasPrefix(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldName), v)) - }) + return predicate.Ansible(sql.FieldHasPrefix(FieldName, v)) } // NameHasSuffix applies the HasSuffix predicate on the "name" field. func NameHasSuffix(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldName), v)) - }) + return predicate.Ansible(sql.FieldHasSuffix(FieldName, v)) } // NameEqualFold applies the EqualFold predicate on the "name" field. func NameEqualFold(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldName), v)) - }) + return predicate.Ansible(sql.FieldEqualFold(FieldName, v)) } // NameContainsFold applies the ContainsFold predicate on the "name" field. func NameContainsFold(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldName), v)) - }) + return predicate.Ansible(sql.FieldContainsFold(FieldName, v)) } -// HclIDEQ applies the EQ predicate on the "hcl_id" field. -func HclIDEQ(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldHclID), v)) - }) +// HCLIDEQ applies the EQ predicate on the "hcl_id" field. +func HCLIDEQ(v string) predicate.Ansible { + return predicate.Ansible(sql.FieldEQ(FieldHCLID, v)) } -// HclIDNEQ applies the NEQ predicate on the "hcl_id" field. -func HclIDNEQ(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldHclID), v)) - }) +// HCLIDNEQ applies the NEQ predicate on the "hcl_id" field. +func HCLIDNEQ(v string) predicate.Ansible { + return predicate.Ansible(sql.FieldNEQ(FieldHCLID, v)) } -// HclIDIn applies the In predicate on the "hcl_id" field. -func HclIDIn(vs ...string) predicate.Ansible { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Ansible(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldHclID), v...)) - }) +// HCLIDIn applies the In predicate on the "hcl_id" field. +func HCLIDIn(vs ...string) predicate.Ansible { + return predicate.Ansible(sql.FieldIn(FieldHCLID, vs...)) } -// HclIDNotIn applies the NotIn predicate on the "hcl_id" field. -func HclIDNotIn(vs ...string) predicate.Ansible { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Ansible(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldHclID), v...)) - }) +// HCLIDNotIn applies the NotIn predicate on the "hcl_id" field. +func HCLIDNotIn(vs ...string) predicate.Ansible { + return predicate.Ansible(sql.FieldNotIn(FieldHCLID, vs...)) } -// HclIDGT applies the GT predicate on the "hcl_id" field. -func HclIDGT(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldHclID), v)) - }) +// HCLIDGT applies the GT predicate on the "hcl_id" field. +func HCLIDGT(v string) predicate.Ansible { + return predicate.Ansible(sql.FieldGT(FieldHCLID, v)) } -// HclIDGTE applies the GTE predicate on the "hcl_id" field. -func HclIDGTE(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldHclID), v)) - }) +// HCLIDGTE applies the GTE predicate on the "hcl_id" field. +func HCLIDGTE(v string) predicate.Ansible { + return predicate.Ansible(sql.FieldGTE(FieldHCLID, v)) } -// HclIDLT applies the LT predicate on the "hcl_id" field. -func HclIDLT(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldHclID), v)) - }) +// HCLIDLT applies the LT predicate on the "hcl_id" field. +func HCLIDLT(v string) predicate.Ansible { + return predicate.Ansible(sql.FieldLT(FieldHCLID, v)) } -// HclIDLTE applies the LTE predicate on the "hcl_id" field. -func HclIDLTE(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldHclID), v)) - }) +// HCLIDLTE applies the LTE predicate on the "hcl_id" field. +func HCLIDLTE(v string) predicate.Ansible { + return predicate.Ansible(sql.FieldLTE(FieldHCLID, v)) } -// HclIDContains applies the Contains predicate on the "hcl_id" field. -func HclIDContains(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldHclID), v)) - }) +// HCLIDContains applies the Contains predicate on the "hcl_id" field. +func HCLIDContains(v string) predicate.Ansible { + return predicate.Ansible(sql.FieldContains(FieldHCLID, v)) } -// HclIDHasPrefix applies the HasPrefix predicate on the "hcl_id" field. -func HclIDHasPrefix(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldHclID), v)) - }) +// HCLIDHasPrefix applies the HasPrefix predicate on the "hcl_id" field. +func HCLIDHasPrefix(v string) predicate.Ansible { + return predicate.Ansible(sql.FieldHasPrefix(FieldHCLID, v)) } -// HclIDHasSuffix applies the HasSuffix predicate on the "hcl_id" field. -func HclIDHasSuffix(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldHclID), v)) - }) +// HCLIDHasSuffix applies the HasSuffix predicate on the "hcl_id" field. +func HCLIDHasSuffix(v string) predicate.Ansible { + return predicate.Ansible(sql.FieldHasSuffix(FieldHCLID, v)) } -// HclIDEqualFold applies the EqualFold predicate on the "hcl_id" field. -func HclIDEqualFold(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldHclID), v)) - }) +// HCLIDEqualFold applies the EqualFold predicate on the "hcl_id" field. +func HCLIDEqualFold(v string) predicate.Ansible { + return predicate.Ansible(sql.FieldEqualFold(FieldHCLID, v)) } -// HclIDContainsFold applies the ContainsFold predicate on the "hcl_id" field. -func HclIDContainsFold(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldHclID), v)) - }) +// HCLIDContainsFold applies the ContainsFold predicate on the "hcl_id" field. +func HCLIDContainsFold(v string) predicate.Ansible { + return predicate.Ansible(sql.FieldContainsFold(FieldHCLID, v)) } // DescriptionEQ applies the EQ predicate on the "description" field. func DescriptionEQ(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldDescription), v)) - }) + return predicate.Ansible(sql.FieldEQ(FieldDescription, v)) } // DescriptionNEQ applies the NEQ predicate on the "description" field. func DescriptionNEQ(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldDescription), v)) - }) + return predicate.Ansible(sql.FieldNEQ(FieldDescription, v)) } // DescriptionIn applies the In predicate on the "description" field. func DescriptionIn(vs ...string) predicate.Ansible { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Ansible(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldDescription), v...)) - }) + return predicate.Ansible(sql.FieldIn(FieldDescription, vs...)) } // DescriptionNotIn applies the NotIn predicate on the "description" field. func DescriptionNotIn(vs ...string) predicate.Ansible { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Ansible(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldDescription), v...)) - }) + return predicate.Ansible(sql.FieldNotIn(FieldDescription, vs...)) } // DescriptionGT applies the GT predicate on the "description" field. func DescriptionGT(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldDescription), v)) - }) + return predicate.Ansible(sql.FieldGT(FieldDescription, v)) } // DescriptionGTE applies the GTE predicate on the "description" field. func DescriptionGTE(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldDescription), v)) - }) + return predicate.Ansible(sql.FieldGTE(FieldDescription, v)) } // DescriptionLT applies the LT predicate on the "description" field. func DescriptionLT(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldDescription), v)) - }) + return predicate.Ansible(sql.FieldLT(FieldDescription, v)) } // DescriptionLTE applies the LTE predicate on the "description" field. func DescriptionLTE(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldDescription), v)) - }) + return predicate.Ansible(sql.FieldLTE(FieldDescription, v)) } // DescriptionContains applies the Contains predicate on the "description" field. func DescriptionContains(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldDescription), v)) - }) + return predicate.Ansible(sql.FieldContains(FieldDescription, v)) } // DescriptionHasPrefix applies the HasPrefix predicate on the "description" field. func DescriptionHasPrefix(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldDescription), v)) - }) + return predicate.Ansible(sql.FieldHasPrefix(FieldDescription, v)) } // DescriptionHasSuffix applies the HasSuffix predicate on the "description" field. func DescriptionHasSuffix(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldDescription), v)) - }) + return predicate.Ansible(sql.FieldHasSuffix(FieldDescription, v)) } // DescriptionEqualFold applies the EqualFold predicate on the "description" field. func DescriptionEqualFold(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldDescription), v)) - }) + return predicate.Ansible(sql.FieldEqualFold(FieldDescription, v)) } // DescriptionContainsFold applies the ContainsFold predicate on the "description" field. func DescriptionContainsFold(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldDescription), v)) - }) + return predicate.Ansible(sql.FieldContainsFold(FieldDescription, v)) } // SourceEQ applies the EQ predicate on the "source" field. func SourceEQ(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSource), v)) - }) + return predicate.Ansible(sql.FieldEQ(FieldSource, v)) } // SourceNEQ applies the NEQ predicate on the "source" field. func SourceNEQ(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldSource), v)) - }) + return predicate.Ansible(sql.FieldNEQ(FieldSource, v)) } // SourceIn applies the In predicate on the "source" field. func SourceIn(vs ...string) predicate.Ansible { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Ansible(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldSource), v...)) - }) + return predicate.Ansible(sql.FieldIn(FieldSource, vs...)) } // SourceNotIn applies the NotIn predicate on the "source" field. func SourceNotIn(vs ...string) predicate.Ansible { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Ansible(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldSource), v...)) - }) + return predicate.Ansible(sql.FieldNotIn(FieldSource, vs...)) } // SourceGT applies the GT predicate on the "source" field. func SourceGT(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldSource), v)) - }) + return predicate.Ansible(sql.FieldGT(FieldSource, v)) } // SourceGTE applies the GTE predicate on the "source" field. func SourceGTE(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldSource), v)) - }) + return predicate.Ansible(sql.FieldGTE(FieldSource, v)) } // SourceLT applies the LT predicate on the "source" field. func SourceLT(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldSource), v)) - }) + return predicate.Ansible(sql.FieldLT(FieldSource, v)) } // SourceLTE applies the LTE predicate on the "source" field. func SourceLTE(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldSource), v)) - }) + return predicate.Ansible(sql.FieldLTE(FieldSource, v)) } // SourceContains applies the Contains predicate on the "source" field. func SourceContains(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldSource), v)) - }) + return predicate.Ansible(sql.FieldContains(FieldSource, v)) } // SourceHasPrefix applies the HasPrefix predicate on the "source" field. func SourceHasPrefix(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldSource), v)) - }) + return predicate.Ansible(sql.FieldHasPrefix(FieldSource, v)) } // SourceHasSuffix applies the HasSuffix predicate on the "source" field. func SourceHasSuffix(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldSource), v)) - }) + return predicate.Ansible(sql.FieldHasSuffix(FieldSource, v)) } // SourceEqualFold applies the EqualFold predicate on the "source" field. func SourceEqualFold(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldSource), v)) - }) + return predicate.Ansible(sql.FieldEqualFold(FieldSource, v)) } // SourceContainsFold applies the ContainsFold predicate on the "source" field. func SourceContainsFold(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldSource), v)) - }) + return predicate.Ansible(sql.FieldContainsFold(FieldSource, v)) } // PlaybookNameEQ applies the EQ predicate on the "playbook_name" field. func PlaybookNameEQ(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldPlaybookName), v)) - }) + return predicate.Ansible(sql.FieldEQ(FieldPlaybookName, v)) } // PlaybookNameNEQ applies the NEQ predicate on the "playbook_name" field. func PlaybookNameNEQ(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldPlaybookName), v)) - }) + return predicate.Ansible(sql.FieldNEQ(FieldPlaybookName, v)) } // PlaybookNameIn applies the In predicate on the "playbook_name" field. func PlaybookNameIn(vs ...string) predicate.Ansible { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Ansible(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldPlaybookName), v...)) - }) + return predicate.Ansible(sql.FieldIn(FieldPlaybookName, vs...)) } // PlaybookNameNotIn applies the NotIn predicate on the "playbook_name" field. func PlaybookNameNotIn(vs ...string) predicate.Ansible { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Ansible(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldPlaybookName), v...)) - }) + return predicate.Ansible(sql.FieldNotIn(FieldPlaybookName, vs...)) } // PlaybookNameGT applies the GT predicate on the "playbook_name" field. func PlaybookNameGT(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldPlaybookName), v)) - }) + return predicate.Ansible(sql.FieldGT(FieldPlaybookName, v)) } // PlaybookNameGTE applies the GTE predicate on the "playbook_name" field. func PlaybookNameGTE(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldPlaybookName), v)) - }) + return predicate.Ansible(sql.FieldGTE(FieldPlaybookName, v)) } // PlaybookNameLT applies the LT predicate on the "playbook_name" field. func PlaybookNameLT(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldPlaybookName), v)) - }) + return predicate.Ansible(sql.FieldLT(FieldPlaybookName, v)) } // PlaybookNameLTE applies the LTE predicate on the "playbook_name" field. func PlaybookNameLTE(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldPlaybookName), v)) - }) + return predicate.Ansible(sql.FieldLTE(FieldPlaybookName, v)) } // PlaybookNameContains applies the Contains predicate on the "playbook_name" field. func PlaybookNameContains(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldPlaybookName), v)) - }) + return predicate.Ansible(sql.FieldContains(FieldPlaybookName, v)) } // PlaybookNameHasPrefix applies the HasPrefix predicate on the "playbook_name" field. func PlaybookNameHasPrefix(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldPlaybookName), v)) - }) + return predicate.Ansible(sql.FieldHasPrefix(FieldPlaybookName, v)) } // PlaybookNameHasSuffix applies the HasSuffix predicate on the "playbook_name" field. func PlaybookNameHasSuffix(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldPlaybookName), v)) - }) + return predicate.Ansible(sql.FieldHasSuffix(FieldPlaybookName, v)) } // PlaybookNameEqualFold applies the EqualFold predicate on the "playbook_name" field. func PlaybookNameEqualFold(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldPlaybookName), v)) - }) + return predicate.Ansible(sql.FieldEqualFold(FieldPlaybookName, v)) } // PlaybookNameContainsFold applies the ContainsFold predicate on the "playbook_name" field. func PlaybookNameContainsFold(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldPlaybookName), v)) - }) + return predicate.Ansible(sql.FieldContainsFold(FieldPlaybookName, v)) } // MethodEQ applies the EQ predicate on the "method" field. func MethodEQ(v Method) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldMethod), v)) - }) + return predicate.Ansible(sql.FieldEQ(FieldMethod, v)) } // MethodNEQ applies the NEQ predicate on the "method" field. func MethodNEQ(v Method) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldMethod), v)) - }) + return predicate.Ansible(sql.FieldNEQ(FieldMethod, v)) } // MethodIn applies the In predicate on the "method" field. func MethodIn(vs ...Method) predicate.Ansible { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Ansible(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldMethod), v...)) - }) + return predicate.Ansible(sql.FieldIn(FieldMethod, vs...)) } // MethodNotIn applies the NotIn predicate on the "method" field. func MethodNotIn(vs ...Method) predicate.Ansible { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Ansible(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldMethod), v...)) - }) + return predicate.Ansible(sql.FieldNotIn(FieldMethod, vs...)) } // InventoryEQ applies the EQ predicate on the "inventory" field. func InventoryEQ(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldInventory), v)) - }) + return predicate.Ansible(sql.FieldEQ(FieldInventory, v)) } // InventoryNEQ applies the NEQ predicate on the "inventory" field. func InventoryNEQ(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldInventory), v)) - }) + return predicate.Ansible(sql.FieldNEQ(FieldInventory, v)) } // InventoryIn applies the In predicate on the "inventory" field. func InventoryIn(vs ...string) predicate.Ansible { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Ansible(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldInventory), v...)) - }) + return predicate.Ansible(sql.FieldIn(FieldInventory, vs...)) } // InventoryNotIn applies the NotIn predicate on the "inventory" field. func InventoryNotIn(vs ...string) predicate.Ansible { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Ansible(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldInventory), v...)) - }) + return predicate.Ansible(sql.FieldNotIn(FieldInventory, vs...)) } // InventoryGT applies the GT predicate on the "inventory" field. func InventoryGT(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldInventory), v)) - }) + return predicate.Ansible(sql.FieldGT(FieldInventory, v)) } // InventoryGTE applies the GTE predicate on the "inventory" field. func InventoryGTE(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldInventory), v)) - }) + return predicate.Ansible(sql.FieldGTE(FieldInventory, v)) } // InventoryLT applies the LT predicate on the "inventory" field. func InventoryLT(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldInventory), v)) - }) + return predicate.Ansible(sql.FieldLT(FieldInventory, v)) } // InventoryLTE applies the LTE predicate on the "inventory" field. func InventoryLTE(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldInventory), v)) - }) + return predicate.Ansible(sql.FieldLTE(FieldInventory, v)) } // InventoryContains applies the Contains predicate on the "inventory" field. func InventoryContains(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldInventory), v)) - }) + return predicate.Ansible(sql.FieldContains(FieldInventory, v)) } // InventoryHasPrefix applies the HasPrefix predicate on the "inventory" field. func InventoryHasPrefix(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldInventory), v)) - }) + return predicate.Ansible(sql.FieldHasPrefix(FieldInventory, v)) } // InventoryHasSuffix applies the HasSuffix predicate on the "inventory" field. func InventoryHasSuffix(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldInventory), v)) - }) + return predicate.Ansible(sql.FieldHasSuffix(FieldInventory, v)) } // InventoryEqualFold applies the EqualFold predicate on the "inventory" field. func InventoryEqualFold(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldInventory), v)) - }) + return predicate.Ansible(sql.FieldEqualFold(FieldInventory, v)) } // InventoryContainsFold applies the ContainsFold predicate on the "inventory" field. func InventoryContainsFold(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldInventory), v)) - }) + return predicate.Ansible(sql.FieldContainsFold(FieldInventory, v)) } // AbsPathEQ applies the EQ predicate on the "abs_path" field. func AbsPathEQ(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldAbsPath), v)) - }) + return predicate.Ansible(sql.FieldEQ(FieldAbsPath, v)) } // AbsPathNEQ applies the NEQ predicate on the "abs_path" field. func AbsPathNEQ(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldAbsPath), v)) - }) + return predicate.Ansible(sql.FieldNEQ(FieldAbsPath, v)) } // AbsPathIn applies the In predicate on the "abs_path" field. func AbsPathIn(vs ...string) predicate.Ansible { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Ansible(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldAbsPath), v...)) - }) + return predicate.Ansible(sql.FieldIn(FieldAbsPath, vs...)) } // AbsPathNotIn applies the NotIn predicate on the "abs_path" field. func AbsPathNotIn(vs ...string) predicate.Ansible { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Ansible(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldAbsPath), v...)) - }) + return predicate.Ansible(sql.FieldNotIn(FieldAbsPath, vs...)) } // AbsPathGT applies the GT predicate on the "abs_path" field. func AbsPathGT(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldAbsPath), v)) - }) + return predicate.Ansible(sql.FieldGT(FieldAbsPath, v)) } // AbsPathGTE applies the GTE predicate on the "abs_path" field. func AbsPathGTE(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldAbsPath), v)) - }) + return predicate.Ansible(sql.FieldGTE(FieldAbsPath, v)) } // AbsPathLT applies the LT predicate on the "abs_path" field. func AbsPathLT(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldAbsPath), v)) - }) + return predicate.Ansible(sql.FieldLT(FieldAbsPath, v)) } // AbsPathLTE applies the LTE predicate on the "abs_path" field. func AbsPathLTE(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldAbsPath), v)) - }) + return predicate.Ansible(sql.FieldLTE(FieldAbsPath, v)) } // AbsPathContains applies the Contains predicate on the "abs_path" field. func AbsPathContains(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldAbsPath), v)) - }) + return predicate.Ansible(sql.FieldContains(FieldAbsPath, v)) } // AbsPathHasPrefix applies the HasPrefix predicate on the "abs_path" field. func AbsPathHasPrefix(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldAbsPath), v)) - }) + return predicate.Ansible(sql.FieldHasPrefix(FieldAbsPath, v)) } // AbsPathHasSuffix applies the HasSuffix predicate on the "abs_path" field. func AbsPathHasSuffix(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldAbsPath), v)) - }) + return predicate.Ansible(sql.FieldHasSuffix(FieldAbsPath, v)) } // AbsPathEqualFold applies the EqualFold predicate on the "abs_path" field. func AbsPathEqualFold(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldAbsPath), v)) - }) + return predicate.Ansible(sql.FieldEqualFold(FieldAbsPath, v)) } // AbsPathContainsFold applies the ContainsFold predicate on the "abs_path" field. func AbsPathContainsFold(v string) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldAbsPath), v)) - }) + return predicate.Ansible(sql.FieldContainsFold(FieldAbsPath, v)) } // HasAnsibleToUser applies the HasEdge predicate on the "AnsibleToUser" edge. @@ -971,7 +569,6 @@ func HasAnsibleToUser() predicate.Ansible { return predicate.Ansible(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(AnsibleToUserTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, false, AnsibleToUserTable, AnsibleToUserColumn), ) sqlgraph.HasNeighbors(s, step) @@ -981,11 +578,7 @@ func HasAnsibleToUser() predicate.Ansible { // HasAnsibleToUserWith applies the HasEdge predicate on the "AnsibleToUser" edge with a given conditions (other predicates). func HasAnsibleToUserWith(preds ...predicate.User) predicate.Ansible { return predicate.Ansible(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(AnsibleToUserInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, AnsibleToUserTable, AnsibleToUserColumn), - ) + step := newAnsibleToUserStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -999,7 +592,6 @@ func HasAnsibleFromEnvironment() predicate.Ansible { return predicate.Ansible(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(AnsibleFromEnvironmentTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, true, AnsibleFromEnvironmentTable, AnsibleFromEnvironmentColumn), ) sqlgraph.HasNeighbors(s, step) @@ -1009,11 +601,7 @@ func HasAnsibleFromEnvironment() predicate.Ansible { // HasAnsibleFromEnvironmentWith applies the HasEdge predicate on the "AnsibleFromEnvironment" edge with a given conditions (other predicates). func HasAnsibleFromEnvironmentWith(preds ...predicate.Environment) predicate.Ansible { return predicate.Ansible(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(AnsibleFromEnvironmentInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, AnsibleFromEnvironmentTable, AnsibleFromEnvironmentColumn), - ) + step := newAnsibleFromEnvironmentStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -1024,32 +612,15 @@ func HasAnsibleFromEnvironmentWith(preds ...predicate.Environment) predicate.Ans // And groups predicates with the AND operator between them. func And(predicates ...predicate.Ansible) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Ansible(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.Ansible) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Ansible(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.Ansible) predicate.Ansible { - return predicate.Ansible(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.Ansible(sql.NotPredicates(p)) } diff --git a/ent/ansible_create.go b/ent/ansible_create.go index b34c49ab..9e893900 100755 --- a/ent/ansible_create.go +++ b/ent/ansible_create.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -28,9 +28,9 @@ func (ac *AnsibleCreate) SetName(s string) *AnsibleCreate { return ac } -// SetHclID sets the "hcl_id" field. -func (ac *AnsibleCreate) SetHclID(s string) *AnsibleCreate { - ac.mutation.SetHclID(s) +// SetHCLID sets the "hcl_id" field. +func (ac *AnsibleCreate) SetHCLID(s string) *AnsibleCreate { + ac.mutation.SetHCLID(s) return ac } @@ -131,44 +131,8 @@ func (ac *AnsibleCreate) Mutation() *AnsibleMutation { // Save creates the Ansible in the database. func (ac *AnsibleCreate) Save(ctx context.Context) (*Ansible, error) { - var ( - err error - node *Ansible - ) ac.defaults() - if len(ac.hooks) == 0 { - if err = ac.check(); err != nil { - return nil, err - } - node, err = ac.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*AnsibleMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = ac.check(); err != nil { - return nil, err - } - ac.mutation = mutation - if node, err = ac.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(ac.hooks) - 1; i >= 0; i-- { - if ac.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = ac.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, ac.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, ac.sqlSave, ac.mutation, ac.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -206,7 +170,7 @@ func (ac *AnsibleCreate) check() error { if _, ok := ac.mutation.Name(); !ok { return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "Ansible.name"`)} } - if _, ok := ac.mutation.HclID(); !ok { + if _, ok := ac.mutation.HCLID(); !ok { return &ValidationError{Name: "hcl_id", err: errors.New(`ent: missing required field "Ansible.hcl_id"`)} } if _, ok := ac.mutation.Description(); !ok { @@ -239,10 +203,13 @@ func (ac *AnsibleCreate) check() error { } func (ac *AnsibleCreate) sqlSave(ctx context.Context) (*Ansible, error) { + if err := ac.check(); err != nil { + return nil, err + } _node, _spec := ac.createSpec() if err := sqlgraph.CreateNode(ctx, ac.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } @@ -253,94 +220,54 @@ func (ac *AnsibleCreate) sqlSave(ctx context.Context) (*Ansible, error) { return nil, err } } + ac.mutation.id = &_node.ID + ac.mutation.done = true return _node, nil } func (ac *AnsibleCreate) createSpec() (*Ansible, *sqlgraph.CreateSpec) { var ( _node = &Ansible{config: ac.config} - _spec = &sqlgraph.CreateSpec{ - Table: ansible.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: ansible.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(ansible.Table, sqlgraph.NewFieldSpec(ansible.FieldID, field.TypeUUID)) ) if id, ok := ac.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } if value, ok := ac.mutation.Name(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: ansible.FieldName, - }) + _spec.SetField(ansible.FieldName, field.TypeString, value) _node.Name = value } - if value, ok := ac.mutation.HclID(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: ansible.FieldHclID, - }) - _node.HclID = value + if value, ok := ac.mutation.HCLID(); ok { + _spec.SetField(ansible.FieldHCLID, field.TypeString, value) + _node.HCLID = value } if value, ok := ac.mutation.Description(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: ansible.FieldDescription, - }) + _spec.SetField(ansible.FieldDescription, field.TypeString, value) _node.Description = value } if value, ok := ac.mutation.Source(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: ansible.FieldSource, - }) + _spec.SetField(ansible.FieldSource, field.TypeString, value) _node.Source = value } if value, ok := ac.mutation.PlaybookName(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: ansible.FieldPlaybookName, - }) + _spec.SetField(ansible.FieldPlaybookName, field.TypeString, value) _node.PlaybookName = value } if value, ok := ac.mutation.Method(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: ansible.FieldMethod, - }) + _spec.SetField(ansible.FieldMethod, field.TypeEnum, value) _node.Method = value } if value, ok := ac.mutation.Inventory(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: ansible.FieldInventory, - }) + _spec.SetField(ansible.FieldInventory, field.TypeString, value) _node.Inventory = value } if value, ok := ac.mutation.AbsPath(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: ansible.FieldAbsPath, - }) + _spec.SetField(ansible.FieldAbsPath, field.TypeString, value) _node.AbsPath = value } if value, ok := ac.mutation.Tags(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: ansible.FieldTags, - }) + _spec.SetField(ansible.FieldTags, field.TypeJSON, value) _node.Tags = value } if nodes := ac.mutation.AnsibleToUserIDs(); len(nodes) > 0 { @@ -351,10 +278,7 @@ func (ac *AnsibleCreate) createSpec() (*Ansible, *sqlgraph.CreateSpec) { Columns: []string{ansible.AnsibleToUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -370,10 +294,7 @@ func (ac *AnsibleCreate) createSpec() (*Ansible, *sqlgraph.CreateSpec) { Columns: []string{ansible.AnsibleFromEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -388,11 +309,15 @@ func (ac *AnsibleCreate) createSpec() (*Ansible, *sqlgraph.CreateSpec) { // AnsibleCreateBulk is the builder for creating many Ansible entities in bulk. type AnsibleCreateBulk struct { config + err error builders []*AnsibleCreate } // Save creates the Ansible entities in the database. func (acb *AnsibleCreateBulk) Save(ctx context.Context) ([]*Ansible, error) { + if acb.err != nil { + return nil, acb.err + } specs := make([]*sqlgraph.CreateSpec, len(acb.builders)) nodes := make([]*Ansible, len(acb.builders)) mutators := make([]Mutator, len(acb.builders)) @@ -409,8 +334,8 @@ func (acb *AnsibleCreateBulk) Save(ctx context.Context) ([]*Ansible, error) { return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, acb.builders[i+1].mutation) } else { @@ -418,7 +343,7 @@ func (acb *AnsibleCreateBulk) Save(ctx context.Context) ([]*Ansible, error) { // Invoke the actual operation on the latest mutation in the chain. if err = sqlgraph.BatchCreate(ctx, acb.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } } } diff --git a/ent/ansible_delete.go b/ent/ansible_delete.go index 1a25c959..95c1856e 100755 --- a/ent/ansible_delete.go +++ b/ent/ansible_delete.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (ad *AnsibleDelete) Where(ps ...predicate.Ansible) *AnsibleDelete { // Exec executes the deletion query and returns how many vertices were deleted. func (ad *AnsibleDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(ad.hooks) == 0 { - affected, err = ad.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*AnsibleMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - ad.mutation = mutation - affected, err = ad.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(ad.hooks) - 1; i >= 0; i-- { - if ad.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = ad.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, ad.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, ad.sqlExec, ad.mutation, ad.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (ad *AnsibleDelete) ExecX(ctx context.Context) int { } func (ad *AnsibleDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: ansible.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: ansible.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(ansible.Table, sqlgraph.NewFieldSpec(ansible.FieldID, field.TypeUUID)) if ps := ad.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -84,7 +48,12 @@ func (ad *AnsibleDelete) sqlExec(ctx context.Context) (int, error) { } } } - return sqlgraph.DeleteNodes(ctx, ad.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, ad.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + ad.mutation.done = true + return affected, err } // AnsibleDeleteOne is the builder for deleting a single Ansible entity. @@ -92,6 +61,12 @@ type AnsibleDeleteOne struct { ad *AnsibleDelete } +// Where appends a list predicates to the AnsibleDelete builder. +func (ado *AnsibleDeleteOne) Where(ps ...predicate.Ansible) *AnsibleDeleteOne { + ado.ad.mutation.Where(ps...) + return ado +} + // Exec executes the deletion query. func (ado *AnsibleDeleteOne) Exec(ctx context.Context) error { n, err := ado.ad.Exec(ctx) @@ -107,5 +82,7 @@ func (ado *AnsibleDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (ado *AnsibleDeleteOne) ExecX(ctx context.Context) { - ado.ad.ExecX(ctx) + if err := ado.Exec(ctx); err != nil { + panic(err) + } } diff --git a/ent/ansible_query.go b/ent/ansible_query.go index 9aa7e8ed..54fb4586 100755 --- a/ent/ansible_query.go +++ b/ent/ansible_query.go @@ -1,11 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" "database/sql/driver" - "errors" "fmt" "math" @@ -22,16 +21,16 @@ import ( // AnsibleQuery is the builder for querying Ansible entities. type AnsibleQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string - predicates []predicate.Ansible - // eager-loading edges. + ctx *QueryContext + order []ansible.OrderOption + inters []Interceptor + predicates []predicate.Ansible withAnsibleToUser *UserQuery withAnsibleFromEnvironment *EnvironmentQuery withFKs bool + modifiers []func(*sql.Selector) + loadTotal []func(context.Context, []*Ansible) error + withNamedAnsibleToUser map[string]*UserQuery // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -43,34 +42,34 @@ func (aq *AnsibleQuery) Where(ps ...predicate.Ansible) *AnsibleQuery { return aq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (aq *AnsibleQuery) Limit(limit int) *AnsibleQuery { - aq.limit = &limit + aq.ctx.Limit = &limit return aq } -// Offset adds an offset step to the query. +// Offset to start from. func (aq *AnsibleQuery) Offset(offset int) *AnsibleQuery { - aq.offset = &offset + aq.ctx.Offset = &offset return aq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (aq *AnsibleQuery) Unique(unique bool) *AnsibleQuery { - aq.unique = &unique + aq.ctx.Unique = &unique return aq } -// Order adds an order step to the query. -func (aq *AnsibleQuery) Order(o ...OrderFunc) *AnsibleQuery { +// Order specifies how the records should be ordered. +func (aq *AnsibleQuery) Order(o ...ansible.OrderOption) *AnsibleQuery { aq.order = append(aq.order, o...) return aq } // QueryAnsibleToUser chains the current query on the "AnsibleToUser" edge. func (aq *AnsibleQuery) QueryAnsibleToUser() *UserQuery { - query := &UserQuery{config: aq.config} + query := (&UserClient{config: aq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := aq.prepareQuery(ctx); err != nil { return nil, err @@ -92,7 +91,7 @@ func (aq *AnsibleQuery) QueryAnsibleToUser() *UserQuery { // QueryAnsibleFromEnvironment chains the current query on the "AnsibleFromEnvironment" edge. func (aq *AnsibleQuery) QueryAnsibleFromEnvironment() *EnvironmentQuery { - query := &EnvironmentQuery{config: aq.config} + query := (&EnvironmentClient{config: aq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := aq.prepareQuery(ctx); err != nil { return nil, err @@ -115,7 +114,7 @@ func (aq *AnsibleQuery) QueryAnsibleFromEnvironment() *EnvironmentQuery { // First returns the first Ansible entity from the query. // Returns a *NotFoundError when no Ansible was found. func (aq *AnsibleQuery) First(ctx context.Context) (*Ansible, error) { - nodes, err := aq.Limit(1).All(ctx) + nodes, err := aq.Limit(1).All(setContextOp(ctx, aq.ctx, "First")) if err != nil { return nil, err } @@ -138,7 +137,7 @@ func (aq *AnsibleQuery) FirstX(ctx context.Context) *Ansible { // Returns a *NotFoundError when no Ansible ID was found. func (aq *AnsibleQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = aq.Limit(1).IDs(ctx); err != nil { + if ids, err = aq.Limit(1).IDs(setContextOp(ctx, aq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -161,7 +160,7 @@ func (aq *AnsibleQuery) FirstIDX(ctx context.Context) uuid.UUID { // Returns a *NotSingularError when more than one Ansible entity is found. // Returns a *NotFoundError when no Ansible entities are found. func (aq *AnsibleQuery) Only(ctx context.Context) (*Ansible, error) { - nodes, err := aq.Limit(2).All(ctx) + nodes, err := aq.Limit(2).All(setContextOp(ctx, aq.ctx, "Only")) if err != nil { return nil, err } @@ -189,7 +188,7 @@ func (aq *AnsibleQuery) OnlyX(ctx context.Context) *Ansible { // Returns a *NotFoundError when no entities are found. func (aq *AnsibleQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = aq.Limit(2).IDs(ctx); err != nil { + if ids, err = aq.Limit(2).IDs(setContextOp(ctx, aq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -214,10 +213,12 @@ func (aq *AnsibleQuery) OnlyIDX(ctx context.Context) uuid.UUID { // All executes the query and returns a list of Ansibles. func (aq *AnsibleQuery) All(ctx context.Context) ([]*Ansible, error) { + ctx = setContextOp(ctx, aq.ctx, "All") if err := aq.prepareQuery(ctx); err != nil { return nil, err } - return aq.sqlAll(ctx) + qr := querierAll[[]*Ansible, *AnsibleQuery]() + return withInterceptors[[]*Ansible](ctx, aq, qr, aq.inters) } // AllX is like All, but panics if an error occurs. @@ -230,9 +231,12 @@ func (aq *AnsibleQuery) AllX(ctx context.Context) []*Ansible { } // IDs executes the query and returns a list of Ansible IDs. -func (aq *AnsibleQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { - var ids []uuid.UUID - if err := aq.Select(ansible.FieldID).Scan(ctx, &ids); err != nil { +func (aq *AnsibleQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if aq.ctx.Unique == nil && aq.path != nil { + aq.Unique(true) + } + ctx = setContextOp(ctx, aq.ctx, "IDs") + if err = aq.Select(ansible.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -249,10 +253,11 @@ func (aq *AnsibleQuery) IDsX(ctx context.Context) []uuid.UUID { // Count returns the count of the given query. func (aq *AnsibleQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, aq.ctx, "Count") if err := aq.prepareQuery(ctx); err != nil { return 0, err } - return aq.sqlCount(ctx) + return withInterceptors[int](ctx, aq, querierCount[*AnsibleQuery](), aq.inters) } // CountX is like Count, but panics if an error occurs. @@ -266,10 +271,15 @@ func (aq *AnsibleQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (aq *AnsibleQuery) Exist(ctx context.Context) (bool, error) { - if err := aq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, aq.ctx, "Exist") + switch _, err := aq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return aq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -289,23 +299,22 @@ func (aq *AnsibleQuery) Clone() *AnsibleQuery { } return &AnsibleQuery{ config: aq.config, - limit: aq.limit, - offset: aq.offset, - order: append([]OrderFunc{}, aq.order...), + ctx: aq.ctx.Clone(), + order: append([]ansible.OrderOption{}, aq.order...), + inters: append([]Interceptor{}, aq.inters...), predicates: append([]predicate.Ansible{}, aq.predicates...), withAnsibleToUser: aq.withAnsibleToUser.Clone(), withAnsibleFromEnvironment: aq.withAnsibleFromEnvironment.Clone(), // clone intermediate query. - sql: aq.sql.Clone(), - path: aq.path, - unique: aq.unique, + sql: aq.sql.Clone(), + path: aq.path, } } // WithAnsibleToUser tells the query-builder to eager-load the nodes that are connected to // the "AnsibleToUser" edge. The optional arguments are used to configure the query builder of the edge. func (aq *AnsibleQuery) WithAnsibleToUser(opts ...func(*UserQuery)) *AnsibleQuery { - query := &UserQuery{config: aq.config} + query := (&UserClient{config: aq.config}).Query() for _, opt := range opts { opt(query) } @@ -316,7 +325,7 @@ func (aq *AnsibleQuery) WithAnsibleToUser(opts ...func(*UserQuery)) *AnsibleQuer // WithAnsibleFromEnvironment tells the query-builder to eager-load the nodes that are connected to // the "AnsibleFromEnvironment" edge. The optional arguments are used to configure the query builder of the edge. func (aq *AnsibleQuery) WithAnsibleFromEnvironment(opts ...func(*EnvironmentQuery)) *AnsibleQuery { - query := &EnvironmentQuery{config: aq.config} + query := (&EnvironmentClient{config: aq.config}).Query() for _, opt := range opts { opt(query) } @@ -338,17 +347,13 @@ func (aq *AnsibleQuery) WithAnsibleFromEnvironment(opts ...func(*EnvironmentQuer // GroupBy(ansible.FieldName). // Aggregate(ent.Count()). // Scan(ctx, &v) -// func (aq *AnsibleQuery) GroupBy(field string, fields ...string) *AnsibleGroupBy { - group := &AnsibleGroupBy{config: aq.config} - group.fields = append([]string{field}, fields...) - group.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := aq.prepareQuery(ctx); err != nil { - return nil, err - } - return aq.sqlQuery(ctx), nil - } - return group + aq.ctx.Fields = append([]string{field}, fields...) + grbuild := &AnsibleGroupBy{build: aq} + grbuild.flds = &aq.ctx.Fields + grbuild.label = ansible.Label + grbuild.scan = grbuild.Scan + return grbuild } // Select allows the selection one or more fields/columns for the given query, @@ -363,14 +368,31 @@ func (aq *AnsibleQuery) GroupBy(field string, fields ...string) *AnsibleGroupBy // client.Ansible.Query(). // Select(ansible.FieldName). // Scan(ctx, &v) -// func (aq *AnsibleQuery) Select(fields ...string) *AnsibleSelect { - aq.fields = append(aq.fields, fields...) - return &AnsibleSelect{AnsibleQuery: aq} + aq.ctx.Fields = append(aq.ctx.Fields, fields...) + sbuild := &AnsibleSelect{AnsibleQuery: aq} + sbuild.label = ansible.Label + sbuild.flds, sbuild.scan = &aq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a AnsibleSelect configured with the given aggregations. +func (aq *AnsibleQuery) Aggregate(fns ...AggregateFunc) *AnsibleSelect { + return aq.Select().Aggregate(fns...) } func (aq *AnsibleQuery) prepareQuery(ctx context.Context) error { - for _, f := range aq.fields { + for _, inter := range aq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, aq); err != nil { + return err + } + } + } + for _, f := range aq.ctx.Fields { if !ansible.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -385,7 +407,7 @@ func (aq *AnsibleQuery) prepareQuery(ctx context.Context) error { return nil } -func (aq *AnsibleQuery) sqlAll(ctx context.Context) ([]*Ansible, error) { +func (aq *AnsibleQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Ansible, error) { var ( nodes = []*Ansible{} withFKs = aq.withFKs @@ -401,121 +423,140 @@ func (aq *AnsibleQuery) sqlAll(ctx context.Context) ([]*Ansible, error) { if withFKs { _spec.Node.Columns = append(_spec.Node.Columns, ansible.ForeignKeys...) } - _spec.ScanValues = func(columns []string) ([]interface{}, error) { + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Ansible).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { node := &Ansible{config: aq.config} nodes = append(nodes, node) - return node.scanValues(columns) - } - _spec.Assign = func(columns []string, values []interface{}) error { - if len(nodes) == 0 { - return fmt.Errorf("ent: Assign called without calling ScanValues") - } - node := nodes[len(nodes)-1] node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(aq.modifiers) > 0 { + _spec.Modifiers = aq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } if err := sqlgraph.QueryNodes(ctx, aq.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := aq.withAnsibleToUser; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*Ansible) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.AnsibleToUser = []*User{} - } - query.withFKs = true - query.Where(predicate.User(func(s *sql.Selector) { - s.Where(sql.InValues(ansible.AnsibleToUserColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { + if err := aq.loadAnsibleToUser(ctx, query, nodes, + func(n *Ansible) { n.Edges.AnsibleToUser = []*User{} }, + func(n *Ansible, e *User) { n.Edges.AnsibleToUser = append(n.Edges.AnsibleToUser, e) }); err != nil { return nil, err } - for _, n := range neighbors { - fk := n.ansible_ansible_to_user - if fk == nil { - return nil, fmt.Errorf(`foreign-key "ansible_ansible_to_user" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "ansible_ansible_to_user" returned %v for node %v`, *fk, n.ID) - } - node.Edges.AnsibleToUser = append(node.Edges.AnsibleToUser, n) - } } - if query := aq.withAnsibleFromEnvironment; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*Ansible) - for i := range nodes { - if nodes[i].environment_environment_to_ansible == nil { - continue - } - fk := *nodes[i].environment_environment_to_ansible - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + if err := aq.loadAnsibleFromEnvironment(ctx, query, nodes, nil, + func(n *Ansible, e *Environment) { n.Edges.AnsibleFromEnvironment = e }); err != nil { + return nil, err } - query.Where(environment.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + } + for name, query := range aq.withNamedAnsibleToUser { + if err := aq.loadAnsibleToUser(ctx, query, nodes, + func(n *Ansible) { n.appendNamedAnsibleToUser(name) }, + func(n *Ansible, e *User) { n.appendNamedAnsibleToUser(name, e) }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "environment_environment_to_ansible" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.AnsibleFromEnvironment = n - } + } + for i := range aq.loadTotal { + if err := aq.loadTotal[i](ctx, nodes); err != nil { + return nil, err } } - return nodes, nil } -func (aq *AnsibleQuery) sqlCount(ctx context.Context) (int, error) { - _spec := aq.querySpec() - _spec.Node.Columns = aq.fields - if len(aq.fields) > 0 { - _spec.Unique = aq.unique != nil && *aq.unique +func (aq *AnsibleQuery) loadAnsibleToUser(ctx context.Context, query *UserQuery, nodes []*Ansible, init func(*Ansible), assign func(*Ansible, *User)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Ansible) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } } - return sqlgraph.CountNodes(ctx, aq.driver, _spec) + query.withFKs = true + query.Where(predicate.User(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(ansible.AnsibleToUserColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.ansible_ansible_to_user + if fk == nil { + return fmt.Errorf(`foreign-key "ansible_ansible_to_user" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "ansible_ansible_to_user" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil } - -func (aq *AnsibleQuery) sqlExist(ctx context.Context) (bool, error) { - n, err := aq.sqlCount(ctx) +func (aq *AnsibleQuery) loadAnsibleFromEnvironment(ctx context.Context, query *EnvironmentQuery, nodes []*Ansible, init func(*Ansible), assign func(*Ansible, *Environment)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*Ansible) + for i := range nodes { + if nodes[i].environment_environment_to_ansible == nil { + continue + } + fk := *nodes[i].environment_environment_to_ansible + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(environment.IDIn(ids...)) + neighbors, err := query.All(ctx) if err != nil { - return false, fmt.Errorf("ent: check existence: %w", err) + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "environment_environment_to_ansible" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } } - return n > 0, nil + return nil +} + +func (aq *AnsibleQuery) sqlCount(ctx context.Context) (int, error) { + _spec := aq.querySpec() + if len(aq.modifiers) > 0 { + _spec.Modifiers = aq.modifiers + } + _spec.Node.Columns = aq.ctx.Fields + if len(aq.ctx.Fields) > 0 { + _spec.Unique = aq.ctx.Unique != nil && *aq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, aq.driver, _spec) } func (aq *AnsibleQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: ansible.Table, - Columns: ansible.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: ansible.FieldID, - }, - }, - From: aq.sql, - Unique: true, - } - if unique := aq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(ansible.Table, ansible.Columns, sqlgraph.NewFieldSpec(ansible.FieldID, field.TypeUUID)) + _spec.From = aq.sql + if unique := aq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if aq.path != nil { + _spec.Unique = true } - if fields := aq.fields; len(fields) > 0 { + if fields := aq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, ansible.FieldID) for i := range fields { @@ -531,10 +572,10 @@ func (aq *AnsibleQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := aq.limit; limit != nil { + if limit := aq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := aq.offset; offset != nil { + if offset := aq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := aq.order; len(ps) > 0 { @@ -550,7 +591,7 @@ func (aq *AnsibleQuery) querySpec() *sqlgraph.QuerySpec { func (aq *AnsibleQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(aq.driver.Dialect()) t1 := builder.Table(ansible.Table) - columns := aq.fields + columns := aq.ctx.Fields if len(columns) == 0 { columns = ansible.Columns } @@ -559,7 +600,7 @@ func (aq *AnsibleQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = aq.sql selector.Select(selector.Columns(columns...)...) } - if aq.unique != nil && *aq.unique { + if aq.ctx.Unique != nil && *aq.ctx.Unique { selector.Distinct() } for _, p := range aq.predicates { @@ -568,25 +609,35 @@ func (aq *AnsibleQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range aq.order { p(selector) } - if offset := aq.offset; offset != nil { + if offset := aq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := aq.limit; limit != nil { + if limit := aq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector } +// WithNamedAnsibleToUser tells the query-builder to eager-load the nodes that are connected to the "AnsibleToUser" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (aq *AnsibleQuery) WithNamedAnsibleToUser(name string, opts ...func(*UserQuery)) *AnsibleQuery { + query := (&UserClient{config: aq.config}).Query() + for _, opt := range opts { + opt(query) + } + if aq.withNamedAnsibleToUser == nil { + aq.withNamedAnsibleToUser = make(map[string]*UserQuery) + } + aq.withNamedAnsibleToUser[name] = query + return aq +} + // AnsibleGroupBy is the group-by builder for Ansible entities. type AnsibleGroupBy struct { - config - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) + selector + build *AnsibleQuery } // Aggregate adds the given aggregation functions to the group-by query. @@ -595,471 +646,77 @@ func (agb *AnsibleGroupBy) Aggregate(fns ...AggregateFunc) *AnsibleGroupBy { return agb } -// Scan applies the group-by query and scans the result into the given value. -func (agb *AnsibleGroupBy) Scan(ctx context.Context, v interface{}) error { - query, err := agb.path(ctx) - if err != nil { +// Scan applies the selector query and scans the result into the given value. +func (agb *AnsibleGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, agb.build.ctx, "GroupBy") + if err := agb.build.prepareQuery(ctx); err != nil { return err } - agb.sql = query - return agb.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (agb *AnsibleGroupBy) ScanX(ctx context.Context, v interface{}) { - if err := agb.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from group-by. -// It is only allowed when executing a group-by query with one field. -func (agb *AnsibleGroupBy) Strings(ctx context.Context) ([]string, error) { - if len(agb.fields) > 1 { - return nil, errors.New("ent: AnsibleGroupBy.Strings is not achievable when grouping more than 1 field") - } - var v []string - if err := agb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (agb *AnsibleGroupBy) StringsX(ctx context.Context) []string { - v, err := agb.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (agb *AnsibleGroupBy) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = agb.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{ansible.Label} - default: - err = fmt.Errorf("ent: AnsibleGroupBy.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (agb *AnsibleGroupBy) StringX(ctx context.Context) string { - v, err := agb.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from group-by. -// It is only allowed when executing a group-by query with one field. -func (agb *AnsibleGroupBy) Ints(ctx context.Context) ([]int, error) { - if len(agb.fields) > 1 { - return nil, errors.New("ent: AnsibleGroupBy.Ints is not achievable when grouping more than 1 field") - } - var v []int - if err := agb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (agb *AnsibleGroupBy) IntsX(ctx context.Context) []int { - v, err := agb.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (agb *AnsibleGroupBy) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = agb.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{ansible.Label} - default: - err = fmt.Errorf("ent: AnsibleGroupBy.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (agb *AnsibleGroupBy) IntX(ctx context.Context) int { - v, err := agb.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from group-by. -// It is only allowed when executing a group-by query with one field. -func (agb *AnsibleGroupBy) Float64s(ctx context.Context) ([]float64, error) { - if len(agb.fields) > 1 { - return nil, errors.New("ent: AnsibleGroupBy.Float64s is not achievable when grouping more than 1 field") - } - var v []float64 - if err := agb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (agb *AnsibleGroupBy) Float64sX(ctx context.Context) []float64 { - v, err := agb.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (agb *AnsibleGroupBy) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = agb.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{ansible.Label} - default: - err = fmt.Errorf("ent: AnsibleGroupBy.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (agb *AnsibleGroupBy) Float64X(ctx context.Context) float64 { - v, err := agb.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from group-by. -// It is only allowed when executing a group-by query with one field. -func (agb *AnsibleGroupBy) Bools(ctx context.Context) ([]bool, error) { - if len(agb.fields) > 1 { - return nil, errors.New("ent: AnsibleGroupBy.Bools is not achievable when grouping more than 1 field") - } - var v []bool - if err := agb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil + return scanWithInterceptors[*AnsibleQuery, *AnsibleGroupBy](ctx, agb.build, agb, agb.build.inters, v) } -// BoolsX is like Bools, but panics if an error occurs. -func (agb *AnsibleGroupBy) BoolsX(ctx context.Context) []bool { - v, err := agb.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (agb *AnsibleGroupBy) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = agb.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{ansible.Label} - default: - err = fmt.Errorf("ent: AnsibleGroupBy.Bools returned %d results when one was expected", len(v)) - } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (agb *AnsibleGroupBy) BoolX(ctx context.Context) bool { - v, err := agb.Bool(ctx) - if err != nil { - panic(err) +func (agb *AnsibleGroupBy) sqlScan(ctx context.Context, root *AnsibleQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(agb.fns)) + for _, fn := range agb.fns { + aggregation = append(aggregation, fn(selector)) } - return v -} - -func (agb *AnsibleGroupBy) sqlScan(ctx context.Context, v interface{}) error { - for _, f := range agb.fields { - if !ansible.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*agb.flds)+len(agb.fns)) + for _, f := range *agb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := agb.sqlQuery() + selector.GroupBy(selector.Columns(*agb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := agb.driver.Query(ctx, query, args, rows); err != nil { + if err := agb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (agb *AnsibleGroupBy) sqlQuery() *sql.Selector { - selector := agb.sql.Select() - aggregation := make([]string, 0, len(agb.fns)) - for _, fn := range agb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(agb.fields)+len(agb.fns)) - for _, f := range agb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(agb.fields...)...) -} - // AnsibleSelect is the builder for selecting fields of Ansible entities. type AnsibleSelect struct { *AnsibleQuery - // intermediate query (i.e. traversal path). - sql *sql.Selector + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (as *AnsibleSelect) Aggregate(fns ...AggregateFunc) *AnsibleSelect { + as.fns = append(as.fns, fns...) + return as } // Scan applies the selector query and scans the result into the given value. -func (as *AnsibleSelect) Scan(ctx context.Context, v interface{}) error { +func (as *AnsibleSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, as.ctx, "Select") if err := as.prepareQuery(ctx); err != nil { return err } - as.sql = as.AnsibleQuery.sqlQuery(ctx) - return as.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (as *AnsibleSelect) ScanX(ctx context.Context, v interface{}) { - if err := as.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from a selector. It is only allowed when selecting one field. -func (as *AnsibleSelect) Strings(ctx context.Context) ([]string, error) { - if len(as.fields) > 1 { - return nil, errors.New("ent: AnsibleSelect.Strings is not achievable when selecting more than 1 field") - } - var v []string - if err := as.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (as *AnsibleSelect) StringsX(ctx context.Context) []string { - v, err := as.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a selector. It is only allowed when selecting one field. -func (as *AnsibleSelect) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = as.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{ansible.Label} - default: - err = fmt.Errorf("ent: AnsibleSelect.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (as *AnsibleSelect) StringX(ctx context.Context) string { - v, err := as.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from a selector. It is only allowed when selecting one field. -func (as *AnsibleSelect) Ints(ctx context.Context) ([]int, error) { - if len(as.fields) > 1 { - return nil, errors.New("ent: AnsibleSelect.Ints is not achievable when selecting more than 1 field") - } - var v []int - if err := as.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (as *AnsibleSelect) IntsX(ctx context.Context) []int { - v, err := as.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a selector. It is only allowed when selecting one field. -func (as *AnsibleSelect) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = as.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{ansible.Label} - default: - err = fmt.Errorf("ent: AnsibleSelect.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (as *AnsibleSelect) IntX(ctx context.Context) int { - v, err := as.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. -func (as *AnsibleSelect) Float64s(ctx context.Context) ([]float64, error) { - if len(as.fields) > 1 { - return nil, errors.New("ent: AnsibleSelect.Float64s is not achievable when selecting more than 1 field") - } - var v []float64 - if err := as.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (as *AnsibleSelect) Float64sX(ctx context.Context) []float64 { - v, err := as.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. -func (as *AnsibleSelect) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = as.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{ansible.Label} - default: - err = fmt.Errorf("ent: AnsibleSelect.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (as *AnsibleSelect) Float64X(ctx context.Context) float64 { - v, err := as.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from a selector. It is only allowed when selecting one field. -func (as *AnsibleSelect) Bools(ctx context.Context) ([]bool, error) { - if len(as.fields) > 1 { - return nil, errors.New("ent: AnsibleSelect.Bools is not achievable when selecting more than 1 field") - } - var v []bool - if err := as.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil + return scanWithInterceptors[*AnsibleQuery, *AnsibleSelect](ctx, as.AnsibleQuery, as, as.inters, v) } -// BoolsX is like Bools, but panics if an error occurs. -func (as *AnsibleSelect) BoolsX(ctx context.Context) []bool { - v, err := as.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a selector. It is only allowed when selecting one field. -func (as *AnsibleSelect) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = as.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{ansible.Label} - default: - err = fmt.Errorf("ent: AnsibleSelect.Bools returned %d results when one was expected", len(v)) +func (as *AnsibleSelect) sqlScan(ctx context.Context, root *AnsibleQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(as.fns)) + for _, fn := range as.fns { + aggregation = append(aggregation, fn(selector)) } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (as *AnsibleSelect) BoolX(ctx context.Context) bool { - v, err := as.Bool(ctx) - if err != nil { - panic(err) + switch n := len(*as.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) } - return v -} - -func (as *AnsibleSelect) sqlScan(ctx context.Context, v interface{}) error { rows := &sql.Rows{} - query, args := as.sql.Query() + query, args := selector.Query() if err := as.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/ent/ansible_update.go b/ent/ansible_update.go index 6bd2f1b2..65404d00 100755 --- a/ent/ansible_update.go +++ b/ent/ansible_update.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -36,9 +36,25 @@ func (au *AnsibleUpdate) SetName(s string) *AnsibleUpdate { return au } -// SetHclID sets the "hcl_id" field. -func (au *AnsibleUpdate) SetHclID(s string) *AnsibleUpdate { - au.mutation.SetHclID(s) +// SetNillableName sets the "name" field if the given value is not nil. +func (au *AnsibleUpdate) SetNillableName(s *string) *AnsibleUpdate { + if s != nil { + au.SetName(*s) + } + return au +} + +// SetHCLID sets the "hcl_id" field. +func (au *AnsibleUpdate) SetHCLID(s string) *AnsibleUpdate { + au.mutation.SetHCLID(s) + return au +} + +// SetNillableHCLID sets the "hcl_id" field if the given value is not nil. +func (au *AnsibleUpdate) SetNillableHCLID(s *string) *AnsibleUpdate { + if s != nil { + au.SetHCLID(*s) + } return au } @@ -48,36 +64,84 @@ func (au *AnsibleUpdate) SetDescription(s string) *AnsibleUpdate { return au } +// SetNillableDescription sets the "description" field if the given value is not nil. +func (au *AnsibleUpdate) SetNillableDescription(s *string) *AnsibleUpdate { + if s != nil { + au.SetDescription(*s) + } + return au +} + // SetSource sets the "source" field. func (au *AnsibleUpdate) SetSource(s string) *AnsibleUpdate { au.mutation.SetSource(s) return au } +// SetNillableSource sets the "source" field if the given value is not nil. +func (au *AnsibleUpdate) SetNillableSource(s *string) *AnsibleUpdate { + if s != nil { + au.SetSource(*s) + } + return au +} + // SetPlaybookName sets the "playbook_name" field. func (au *AnsibleUpdate) SetPlaybookName(s string) *AnsibleUpdate { au.mutation.SetPlaybookName(s) return au } +// SetNillablePlaybookName sets the "playbook_name" field if the given value is not nil. +func (au *AnsibleUpdate) SetNillablePlaybookName(s *string) *AnsibleUpdate { + if s != nil { + au.SetPlaybookName(*s) + } + return au +} + // SetMethod sets the "method" field. func (au *AnsibleUpdate) SetMethod(a ansible.Method) *AnsibleUpdate { au.mutation.SetMethod(a) return au } +// SetNillableMethod sets the "method" field if the given value is not nil. +func (au *AnsibleUpdate) SetNillableMethod(a *ansible.Method) *AnsibleUpdate { + if a != nil { + au.SetMethod(*a) + } + return au +} + // SetInventory sets the "inventory" field. func (au *AnsibleUpdate) SetInventory(s string) *AnsibleUpdate { au.mutation.SetInventory(s) return au } +// SetNillableInventory sets the "inventory" field if the given value is not nil. +func (au *AnsibleUpdate) SetNillableInventory(s *string) *AnsibleUpdate { + if s != nil { + au.SetInventory(*s) + } + return au +} + // SetAbsPath sets the "abs_path" field. func (au *AnsibleUpdate) SetAbsPath(s string) *AnsibleUpdate { au.mutation.SetAbsPath(s) return au } +// SetNillableAbsPath sets the "abs_path" field if the given value is not nil. +func (au *AnsibleUpdate) SetNillableAbsPath(s *string) *AnsibleUpdate { + if s != nil { + au.SetAbsPath(*s) + } + return au +} + // SetTags sets the "tags" field. func (au *AnsibleUpdate) SetTags(m map[string]string) *AnsibleUpdate { au.mutation.SetTags(m) @@ -152,40 +216,7 @@ func (au *AnsibleUpdate) ClearAnsibleFromEnvironment() *AnsibleUpdate { // Save executes the query and returns the number of nodes affected by the update operation. func (au *AnsibleUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(au.hooks) == 0 { - if err = au.check(); err != nil { - return 0, err - } - affected, err = au.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*AnsibleMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = au.check(); err != nil { - return 0, err - } - au.mutation = mutation - affected, err = au.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(au.hooks) - 1; i >= 0; i-- { - if au.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = au.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, au.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, au.sqlSave, au.mutation, au.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -221,16 +252,10 @@ func (au *AnsibleUpdate) check() error { } func (au *AnsibleUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: ansible.Table, - Columns: ansible.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: ansible.FieldID, - }, - }, + if err := au.check(); err != nil { + return n, err } + _spec := sqlgraph.NewUpdateSpec(ansible.Table, ansible.Columns, sqlgraph.NewFieldSpec(ansible.FieldID, field.TypeUUID)) if ps := au.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -239,67 +264,31 @@ func (au *AnsibleUpdate) sqlSave(ctx context.Context) (n int, err error) { } } if value, ok := au.mutation.Name(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: ansible.FieldName, - }) - } - if value, ok := au.mutation.HclID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: ansible.FieldHclID, - }) + _spec.SetField(ansible.FieldName, field.TypeString, value) + } + if value, ok := au.mutation.HCLID(); ok { + _spec.SetField(ansible.FieldHCLID, field.TypeString, value) } if value, ok := au.mutation.Description(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: ansible.FieldDescription, - }) + _spec.SetField(ansible.FieldDescription, field.TypeString, value) } if value, ok := au.mutation.Source(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: ansible.FieldSource, - }) + _spec.SetField(ansible.FieldSource, field.TypeString, value) } if value, ok := au.mutation.PlaybookName(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: ansible.FieldPlaybookName, - }) + _spec.SetField(ansible.FieldPlaybookName, field.TypeString, value) } if value, ok := au.mutation.Method(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: ansible.FieldMethod, - }) + _spec.SetField(ansible.FieldMethod, field.TypeEnum, value) } if value, ok := au.mutation.Inventory(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: ansible.FieldInventory, - }) + _spec.SetField(ansible.FieldInventory, field.TypeString, value) } if value, ok := au.mutation.AbsPath(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: ansible.FieldAbsPath, - }) + _spec.SetField(ansible.FieldAbsPath, field.TypeString, value) } if value, ok := au.mutation.Tags(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: ansible.FieldTags, - }) + _spec.SetField(ansible.FieldTags, field.TypeJSON, value) } if au.mutation.AnsibleToUserCleared() { edge := &sqlgraph.EdgeSpec{ @@ -309,10 +298,7 @@ func (au *AnsibleUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{ansible.AnsibleToUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -325,10 +311,7 @@ func (au *AnsibleUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{ansible.AnsibleToUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -344,10 +327,7 @@ func (au *AnsibleUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{ansible.AnsibleToUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -363,10 +343,7 @@ func (au *AnsibleUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{ansible.AnsibleFromEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -379,10 +356,7 @@ func (au *AnsibleUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{ansible.AnsibleFromEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -394,10 +368,11 @@ func (au *AnsibleUpdate) sqlSave(ctx context.Context) (n int, err error) { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{ansible.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return 0, err } + au.mutation.done = true return n, nil } @@ -415,9 +390,25 @@ func (auo *AnsibleUpdateOne) SetName(s string) *AnsibleUpdateOne { return auo } -// SetHclID sets the "hcl_id" field. -func (auo *AnsibleUpdateOne) SetHclID(s string) *AnsibleUpdateOne { - auo.mutation.SetHclID(s) +// SetNillableName sets the "name" field if the given value is not nil. +func (auo *AnsibleUpdateOne) SetNillableName(s *string) *AnsibleUpdateOne { + if s != nil { + auo.SetName(*s) + } + return auo +} + +// SetHCLID sets the "hcl_id" field. +func (auo *AnsibleUpdateOne) SetHCLID(s string) *AnsibleUpdateOne { + auo.mutation.SetHCLID(s) + return auo +} + +// SetNillableHCLID sets the "hcl_id" field if the given value is not nil. +func (auo *AnsibleUpdateOne) SetNillableHCLID(s *string) *AnsibleUpdateOne { + if s != nil { + auo.SetHCLID(*s) + } return auo } @@ -427,36 +418,84 @@ func (auo *AnsibleUpdateOne) SetDescription(s string) *AnsibleUpdateOne { return auo } +// SetNillableDescription sets the "description" field if the given value is not nil. +func (auo *AnsibleUpdateOne) SetNillableDescription(s *string) *AnsibleUpdateOne { + if s != nil { + auo.SetDescription(*s) + } + return auo +} + // SetSource sets the "source" field. func (auo *AnsibleUpdateOne) SetSource(s string) *AnsibleUpdateOne { auo.mutation.SetSource(s) return auo } +// SetNillableSource sets the "source" field if the given value is not nil. +func (auo *AnsibleUpdateOne) SetNillableSource(s *string) *AnsibleUpdateOne { + if s != nil { + auo.SetSource(*s) + } + return auo +} + // SetPlaybookName sets the "playbook_name" field. func (auo *AnsibleUpdateOne) SetPlaybookName(s string) *AnsibleUpdateOne { auo.mutation.SetPlaybookName(s) return auo } +// SetNillablePlaybookName sets the "playbook_name" field if the given value is not nil. +func (auo *AnsibleUpdateOne) SetNillablePlaybookName(s *string) *AnsibleUpdateOne { + if s != nil { + auo.SetPlaybookName(*s) + } + return auo +} + // SetMethod sets the "method" field. func (auo *AnsibleUpdateOne) SetMethod(a ansible.Method) *AnsibleUpdateOne { auo.mutation.SetMethod(a) return auo } +// SetNillableMethod sets the "method" field if the given value is not nil. +func (auo *AnsibleUpdateOne) SetNillableMethod(a *ansible.Method) *AnsibleUpdateOne { + if a != nil { + auo.SetMethod(*a) + } + return auo +} + // SetInventory sets the "inventory" field. func (auo *AnsibleUpdateOne) SetInventory(s string) *AnsibleUpdateOne { auo.mutation.SetInventory(s) return auo } +// SetNillableInventory sets the "inventory" field if the given value is not nil. +func (auo *AnsibleUpdateOne) SetNillableInventory(s *string) *AnsibleUpdateOne { + if s != nil { + auo.SetInventory(*s) + } + return auo +} + // SetAbsPath sets the "abs_path" field. func (auo *AnsibleUpdateOne) SetAbsPath(s string) *AnsibleUpdateOne { auo.mutation.SetAbsPath(s) return auo } +// SetNillableAbsPath sets the "abs_path" field if the given value is not nil. +func (auo *AnsibleUpdateOne) SetNillableAbsPath(s *string) *AnsibleUpdateOne { + if s != nil { + auo.SetAbsPath(*s) + } + return auo +} + // SetTags sets the "tags" field. func (auo *AnsibleUpdateOne) SetTags(m map[string]string) *AnsibleUpdateOne { auo.mutation.SetTags(m) @@ -529,6 +568,12 @@ func (auo *AnsibleUpdateOne) ClearAnsibleFromEnvironment() *AnsibleUpdateOne { return auo } +// Where appends a list predicates to the AnsibleUpdate builder. +func (auo *AnsibleUpdateOne) Where(ps ...predicate.Ansible) *AnsibleUpdateOne { + auo.mutation.Where(ps...) + return auo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (auo *AnsibleUpdateOne) Select(field string, fields ...string) *AnsibleUpdateOne { @@ -538,40 +583,7 @@ func (auo *AnsibleUpdateOne) Select(field string, fields ...string) *AnsibleUpda // Save executes the query and returns the updated Ansible entity. func (auo *AnsibleUpdateOne) Save(ctx context.Context) (*Ansible, error) { - var ( - err error - node *Ansible - ) - if len(auo.hooks) == 0 { - if err = auo.check(); err != nil { - return nil, err - } - node, err = auo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*AnsibleMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = auo.check(); err != nil { - return nil, err - } - auo.mutation = mutation - node, err = auo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(auo.hooks) - 1; i >= 0; i-- { - if auo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = auo.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, auo.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, auo.sqlSave, auo.mutation, auo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -607,16 +619,10 @@ func (auo *AnsibleUpdateOne) check() error { } func (auo *AnsibleUpdateOne) sqlSave(ctx context.Context) (_node *Ansible, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: ansible.Table, - Columns: ansible.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: ansible.FieldID, - }, - }, + if err := auo.check(); err != nil { + return _node, err } + _spec := sqlgraph.NewUpdateSpec(ansible.Table, ansible.Columns, sqlgraph.NewFieldSpec(ansible.FieldID, field.TypeUUID)) id, ok := auo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Ansible.id" for update`)} @@ -642,67 +648,31 @@ func (auo *AnsibleUpdateOne) sqlSave(ctx context.Context) (_node *Ansible, err e } } if value, ok := auo.mutation.Name(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: ansible.FieldName, - }) - } - if value, ok := auo.mutation.HclID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: ansible.FieldHclID, - }) + _spec.SetField(ansible.FieldName, field.TypeString, value) + } + if value, ok := auo.mutation.HCLID(); ok { + _spec.SetField(ansible.FieldHCLID, field.TypeString, value) } if value, ok := auo.mutation.Description(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: ansible.FieldDescription, - }) + _spec.SetField(ansible.FieldDescription, field.TypeString, value) } if value, ok := auo.mutation.Source(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: ansible.FieldSource, - }) + _spec.SetField(ansible.FieldSource, field.TypeString, value) } if value, ok := auo.mutation.PlaybookName(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: ansible.FieldPlaybookName, - }) + _spec.SetField(ansible.FieldPlaybookName, field.TypeString, value) } if value, ok := auo.mutation.Method(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: ansible.FieldMethod, - }) + _spec.SetField(ansible.FieldMethod, field.TypeEnum, value) } if value, ok := auo.mutation.Inventory(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: ansible.FieldInventory, - }) + _spec.SetField(ansible.FieldInventory, field.TypeString, value) } if value, ok := auo.mutation.AbsPath(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: ansible.FieldAbsPath, - }) + _spec.SetField(ansible.FieldAbsPath, field.TypeString, value) } if value, ok := auo.mutation.Tags(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: ansible.FieldTags, - }) + _spec.SetField(ansible.FieldTags, field.TypeJSON, value) } if auo.mutation.AnsibleToUserCleared() { edge := &sqlgraph.EdgeSpec{ @@ -712,10 +682,7 @@ func (auo *AnsibleUpdateOne) sqlSave(ctx context.Context) (_node *Ansible, err e Columns: []string{ansible.AnsibleToUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -728,10 +695,7 @@ func (auo *AnsibleUpdateOne) sqlSave(ctx context.Context) (_node *Ansible, err e Columns: []string{ansible.AnsibleToUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -747,10 +711,7 @@ func (auo *AnsibleUpdateOne) sqlSave(ctx context.Context) (_node *Ansible, err e Columns: []string{ansible.AnsibleToUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -766,10 +727,7 @@ func (auo *AnsibleUpdateOne) sqlSave(ctx context.Context) (_node *Ansible, err e Columns: []string{ansible.AnsibleFromEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -782,10 +740,7 @@ func (auo *AnsibleUpdateOne) sqlSave(ctx context.Context) (_node *Ansible, err e Columns: []string{ansible.AnsibleFromEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -800,9 +755,10 @@ func (auo *AnsibleUpdateOne) sqlSave(ctx context.Context) (_node *Ansible, err e if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{ansible.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } + auo.mutation.done = true return _node, nil } diff --git a/ent/authuser.go b/ent/authuser.go index cd3b1fae..21500819 100755 --- a/ent/authuser.go +++ b/ent/authuser.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -6,6 +6,7 @@ import ( "fmt" "strings" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/gen0cide/laforge/ent/authuser" "github.com/google/uuid" @@ -42,13 +43,14 @@ type AuthUser struct { // The values are being populated by the AuthUserQuery when eager-loading is set. Edges AuthUserEdges `json:"edges"` + // vvvvvvvvvvvv CUSTOM vvvvvvvvvvvv // Edges put into the main struct to be loaded via hcl // AuthUserToToken holds the value of the AuthUserToToken edge. HCLAuthUserToToken []*Token `json:"AuthUserToToken,omitempty"` // AuthUserToServerTasks holds the value of the AuthUserToServerTasks edge. HCLAuthUserToServerTasks []*ServerTask `json:"AuthUserToServerTasks,omitempty"` - // - + // ^^^^^^^^^^^^ CUSTOM ^^^^^^^^^^^^^ + selectValues sql.SelectValues } // AuthUserEdges holds the relations/edges for other nodes in the graph. @@ -60,6 +62,11 @@ type AuthUserEdges struct { // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. loadedTypes [2]bool + // totalCount holds the count of the edges above. + totalCount [2]map[string]int + + namedAuthUserToToken map[string][]*Token + namedAuthUserToServerTasks map[string][]*ServerTask } // AuthUserToTokenOrErr returns the AuthUserToToken value or an error if the edge @@ -81,8 +88,8 @@ func (e AuthUserEdges) AuthUserToServerTasksOrErr() ([]*ServerTask, error) { } // scanValues returns the types for scanning values from sql.Rows. -func (*AuthUser) scanValues(columns []string) ([]interface{}, error) { - values := make([]interface{}, len(columns)) +func (*AuthUser) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) for i := range columns { switch columns[i] { case authuser.FieldUsername, authuser.FieldPassword, authuser.FieldFirstName, authuser.FieldLastName, authuser.FieldEmail, authuser.FieldPhone, authuser.FieldCompany, authuser.FieldOccupation, authuser.FieldPrivateKeyPath, authuser.FieldRole, authuser.FieldProvider: @@ -90,7 +97,7 @@ func (*AuthUser) scanValues(columns []string) ([]interface{}, error) { case authuser.FieldID: values[i] = new(uuid.UUID) default: - return nil, fmt.Errorf("unexpected column %q for type AuthUser", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -98,7 +105,7 @@ func (*AuthUser) scanValues(columns []string) ([]interface{}, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the AuthUser fields. -func (au *AuthUser) assignValues(columns []string, values []interface{}) error { +func (au *AuthUser) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -176,36 +183,44 @@ func (au *AuthUser) assignValues(columns []string, values []interface{}) error { } else if value.Valid { au.Provider = authuser.Provider(value.String) } + default: + au.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the AuthUser. +// This includes values selected through modifiers, order, etc. +func (au *AuthUser) Value(name string) (ent.Value, error) { + return au.selectValues.Get(name) +} + // QueryAuthUserToToken queries the "AuthUserToToken" edge of the AuthUser entity. func (au *AuthUser) QueryAuthUserToToken() *TokenQuery { - return (&AuthUserClient{config: au.config}).QueryAuthUserToToken(au) + return NewAuthUserClient(au.config).QueryAuthUserToToken(au) } // QueryAuthUserToServerTasks queries the "AuthUserToServerTasks" edge of the AuthUser entity. func (au *AuthUser) QueryAuthUserToServerTasks() *ServerTaskQuery { - return (&AuthUserClient{config: au.config}).QueryAuthUserToServerTasks(au) + return NewAuthUserClient(au.config).QueryAuthUserToServerTasks(au) } // Update returns a builder for updating this AuthUser. // Note that you need to call AuthUser.Unwrap() before calling this method if this AuthUser // was returned from a transaction, and the transaction was committed or rolled back. func (au *AuthUser) Update() *AuthUserUpdateOne { - return (&AuthUserClient{config: au.config}).UpdateOne(au) + return NewAuthUserClient(au.config).UpdateOne(au) } // Unwrap unwraps the AuthUser entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. func (au *AuthUser) Unwrap() *AuthUser { - tx, ok := au.config.driver.(*txDriver) + _tx, ok := au.config.driver.(*txDriver) if !ok { panic("ent: AuthUser is not a transactional entity") } - au.config.driver = tx.drv + au.config.driver = _tx.drv return au } @@ -213,37 +228,89 @@ func (au *AuthUser) Unwrap() *AuthUser { func (au *AuthUser) String() string { var builder strings.Builder builder.WriteString("AuthUser(") - builder.WriteString(fmt.Sprintf("id=%v", au.ID)) - builder.WriteString(", username=") + builder.WriteString(fmt.Sprintf("id=%v, ", au.ID)) + builder.WriteString("username=") builder.WriteString(au.Username) - builder.WriteString(", password=") - builder.WriteString(", first_name=") + builder.WriteString(", ") + builder.WriteString("password=") + builder.WriteString(", ") + builder.WriteString("first_name=") builder.WriteString(au.FirstName) - builder.WriteString(", last_name=") + builder.WriteString(", ") + builder.WriteString("last_name=") builder.WriteString(au.LastName) - builder.WriteString(", email=") + builder.WriteString(", ") + builder.WriteString("email=") builder.WriteString(au.Email) - builder.WriteString(", phone=") + builder.WriteString(", ") + builder.WriteString("phone=") builder.WriteString(au.Phone) - builder.WriteString(", company=") + builder.WriteString(", ") + builder.WriteString("company=") builder.WriteString(au.Company) - builder.WriteString(", occupation=") + builder.WriteString(", ") + builder.WriteString("occupation=") builder.WriteString(au.Occupation) - builder.WriteString(", private_key_path=") + builder.WriteString(", ") + builder.WriteString("private_key_path=") builder.WriteString(au.PrivateKeyPath) - builder.WriteString(", role=") + builder.WriteString(", ") + builder.WriteString("role=") builder.WriteString(fmt.Sprintf("%v", au.Role)) - builder.WriteString(", provider=") + builder.WriteString(", ") + builder.WriteString("provider=") builder.WriteString(fmt.Sprintf("%v", au.Provider)) builder.WriteByte(')') return builder.String() } -// AuthUsers is a parsable slice of AuthUser. -type AuthUsers []*AuthUser +// NamedAuthUserToToken returns the AuthUserToToken named value or an error if the edge was not +// loaded in eager-loading with this name. +func (au *AuthUser) NamedAuthUserToToken(name string) ([]*Token, error) { + if au.Edges.namedAuthUserToToken == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := au.Edges.namedAuthUserToToken[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} + +func (au *AuthUser) appendNamedAuthUserToToken(name string, edges ...*Token) { + if au.Edges.namedAuthUserToToken == nil { + au.Edges.namedAuthUserToToken = make(map[string][]*Token) + } + if len(edges) == 0 { + au.Edges.namedAuthUserToToken[name] = []*Token{} + } else { + au.Edges.namedAuthUserToToken[name] = append(au.Edges.namedAuthUserToToken[name], edges...) + } +} + +// NamedAuthUserToServerTasks returns the AuthUserToServerTasks named value or an error if the edge was not +// loaded in eager-loading with this name. +func (au *AuthUser) NamedAuthUserToServerTasks(name string) ([]*ServerTask, error) { + if au.Edges.namedAuthUserToServerTasks == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := au.Edges.namedAuthUserToServerTasks[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} -func (au AuthUsers) config(cfg config) { - for _i := range au { - au[_i].config = cfg +func (au *AuthUser) appendNamedAuthUserToServerTasks(name string, edges ...*ServerTask) { + if au.Edges.namedAuthUserToServerTasks == nil { + au.Edges.namedAuthUserToServerTasks = make(map[string][]*ServerTask) + } + if len(edges) == 0 { + au.Edges.namedAuthUserToServerTasks[name] = []*ServerTask{} + } else { + au.Edges.namedAuthUserToServerTasks[name] = append(au.Edges.namedAuthUserToServerTasks[name], edges...) } } + +// AuthUsers is a parsable slice of AuthUser. +type AuthUsers []*AuthUser diff --git a/ent/authuser/authuser.go b/ent/authuser/authuser.go index e16c98db..a929a241 100755 --- a/ent/authuser/authuser.go +++ b/ent/authuser/authuser.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package authuser @@ -7,6 +7,8 @@ import ( "io" "strconv" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -151,37 +153,142 @@ func ProviderValidator(pr Provider) error { } } +// OrderOption defines the ordering options for the AuthUser queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByUsername orders the results by the username field. +func ByUsername(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUsername, opts...).ToFunc() +} + +// ByPassword orders the results by the password field. +func ByPassword(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPassword, opts...).ToFunc() +} + +// ByFirstName orders the results by the first_name field. +func ByFirstName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldFirstName, opts...).ToFunc() +} + +// ByLastName orders the results by the last_name field. +func ByLastName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldLastName, opts...).ToFunc() +} + +// ByEmail orders the results by the email field. +func ByEmail(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldEmail, opts...).ToFunc() +} + +// ByPhone orders the results by the phone field. +func ByPhone(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPhone, opts...).ToFunc() +} + +// ByCompany orders the results by the company field. +func ByCompany(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCompany, opts...).ToFunc() +} + +// ByOccupation orders the results by the occupation field. +func ByOccupation(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldOccupation, opts...).ToFunc() +} + +// ByPrivateKeyPath orders the results by the private_key_path field. +func ByPrivateKeyPath(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPrivateKeyPath, opts...).ToFunc() +} + +// ByRole orders the results by the role field. +func ByRole(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldRole, opts...).ToFunc() +} + +// ByProvider orders the results by the provider field. +func ByProvider(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldProvider, opts...).ToFunc() +} + +// ByAuthUserToTokenCount orders the results by AuthUserToToken count. +func ByAuthUserToTokenCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newAuthUserToTokenStep(), opts...) + } +} + +// ByAuthUserToToken orders the results by AuthUserToToken terms. +func ByAuthUserToToken(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAuthUserToTokenStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByAuthUserToServerTasksCount orders the results by AuthUserToServerTasks count. +func ByAuthUserToServerTasksCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newAuthUserToServerTasksStep(), opts...) + } +} + +// ByAuthUserToServerTasks orders the results by AuthUserToServerTasks terms. +func ByAuthUserToServerTasks(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAuthUserToServerTasksStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newAuthUserToTokenStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AuthUserToTokenInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, AuthUserToTokenTable, AuthUserToTokenColumn), + ) +} +func newAuthUserToServerTasksStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AuthUserToServerTasksInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, AuthUserToServerTasksTable, AuthUserToServerTasksColumn), + ) +} + // MarshalGQL implements graphql.Marshaler interface. -func (r Role) MarshalGQL(w io.Writer) { - io.WriteString(w, strconv.Quote(r.String())) +func (e Role) MarshalGQL(w io.Writer) { + io.WriteString(w, strconv.Quote(e.String())) } // UnmarshalGQL implements graphql.Unmarshaler interface. -func (r *Role) UnmarshalGQL(val interface{}) error { +func (e *Role) UnmarshalGQL(val interface{}) error { str, ok := val.(string) if !ok { return fmt.Errorf("enum %T must be a string", val) } - *r = Role(str) - if err := RoleValidator(*r); err != nil { + *e = Role(str) + if err := RoleValidator(*e); err != nil { return fmt.Errorf("%s is not a valid Role", str) } return nil } // MarshalGQL implements graphql.Marshaler interface. -func (pr Provider) MarshalGQL(w io.Writer) { - io.WriteString(w, strconv.Quote(pr.String())) +func (e Provider) MarshalGQL(w io.Writer) { + io.WriteString(w, strconv.Quote(e.String())) } // UnmarshalGQL implements graphql.Unmarshaler interface. -func (pr *Provider) UnmarshalGQL(val interface{}) error { +func (e *Provider) UnmarshalGQL(val interface{}) error { str, ok := val.(string) if !ok { return fmt.Errorf("enum %T must be a string", val) } - *pr = Provider(str) - if err := ProviderValidator(*pr); err != nil { + *e = Provider(str) + if err := ProviderValidator(*e); err != nil { return fmt.Errorf("%s is not a valid Provider", str) } return nil diff --git a/ent/authuser/where.go b/ent/authuser/where.go index 7847e6fd..92835f84 100755 --- a/ent/authuser/where.go +++ b/ent/authuser/where.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package authuser @@ -11,1243 +11,717 @@ import ( // ID filters vertices based on their ID field. func ID(id uuid.UUID) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.AuthUser(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id uuid.UUID) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.AuthUser(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id uuid.UUID) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.AuthUser(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...uuid.UUID) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.AuthUser(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...uuid.UUID) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.AuthUser(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id uuid.UUID) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.AuthUser(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id uuid.UUID) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.AuthUser(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id uuid.UUID) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.AuthUser(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id uuid.UUID) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.AuthUser(sql.FieldLTE(FieldID, id)) } // Username applies equality check predicate on the "username" field. It's identical to UsernameEQ. func Username(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUsername), v)) - }) + return predicate.AuthUser(sql.FieldEQ(FieldUsername, v)) } // Password applies equality check predicate on the "password" field. It's identical to PasswordEQ. func Password(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldPassword), v)) - }) + return predicate.AuthUser(sql.FieldEQ(FieldPassword, v)) } // FirstName applies equality check predicate on the "first_name" field. It's identical to FirstNameEQ. func FirstName(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldFirstName), v)) - }) + return predicate.AuthUser(sql.FieldEQ(FieldFirstName, v)) } // LastName applies equality check predicate on the "last_name" field. It's identical to LastNameEQ. func LastName(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldLastName), v)) - }) + return predicate.AuthUser(sql.FieldEQ(FieldLastName, v)) } // Email applies equality check predicate on the "email" field. It's identical to EmailEQ. func Email(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldEmail), v)) - }) + return predicate.AuthUser(sql.FieldEQ(FieldEmail, v)) } // Phone applies equality check predicate on the "phone" field. It's identical to PhoneEQ. func Phone(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldPhone), v)) - }) + return predicate.AuthUser(sql.FieldEQ(FieldPhone, v)) } // Company applies equality check predicate on the "company" field. It's identical to CompanyEQ. func Company(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCompany), v)) - }) + return predicate.AuthUser(sql.FieldEQ(FieldCompany, v)) } // Occupation applies equality check predicate on the "occupation" field. It's identical to OccupationEQ. func Occupation(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldOccupation), v)) - }) + return predicate.AuthUser(sql.FieldEQ(FieldOccupation, v)) } // PrivateKeyPath applies equality check predicate on the "private_key_path" field. It's identical to PrivateKeyPathEQ. func PrivateKeyPath(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldPrivateKeyPath), v)) - }) + return predicate.AuthUser(sql.FieldEQ(FieldPrivateKeyPath, v)) } // UsernameEQ applies the EQ predicate on the "username" field. func UsernameEQ(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUsername), v)) - }) + return predicate.AuthUser(sql.FieldEQ(FieldUsername, v)) } // UsernameNEQ applies the NEQ predicate on the "username" field. func UsernameNEQ(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldUsername), v)) - }) + return predicate.AuthUser(sql.FieldNEQ(FieldUsername, v)) } // UsernameIn applies the In predicate on the "username" field. func UsernameIn(vs ...string) predicate.AuthUser { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AuthUser(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldUsername), v...)) - }) + return predicate.AuthUser(sql.FieldIn(FieldUsername, vs...)) } // UsernameNotIn applies the NotIn predicate on the "username" field. func UsernameNotIn(vs ...string) predicate.AuthUser { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AuthUser(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldUsername), v...)) - }) + return predicate.AuthUser(sql.FieldNotIn(FieldUsername, vs...)) } // UsernameGT applies the GT predicate on the "username" field. func UsernameGT(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldUsername), v)) - }) + return predicate.AuthUser(sql.FieldGT(FieldUsername, v)) } // UsernameGTE applies the GTE predicate on the "username" field. func UsernameGTE(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldUsername), v)) - }) + return predicate.AuthUser(sql.FieldGTE(FieldUsername, v)) } // UsernameLT applies the LT predicate on the "username" field. func UsernameLT(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldUsername), v)) - }) + return predicate.AuthUser(sql.FieldLT(FieldUsername, v)) } // UsernameLTE applies the LTE predicate on the "username" field. func UsernameLTE(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldUsername), v)) - }) + return predicate.AuthUser(sql.FieldLTE(FieldUsername, v)) } // UsernameContains applies the Contains predicate on the "username" field. func UsernameContains(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldUsername), v)) - }) + return predicate.AuthUser(sql.FieldContains(FieldUsername, v)) } // UsernameHasPrefix applies the HasPrefix predicate on the "username" field. func UsernameHasPrefix(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldUsername), v)) - }) + return predicate.AuthUser(sql.FieldHasPrefix(FieldUsername, v)) } // UsernameHasSuffix applies the HasSuffix predicate on the "username" field. func UsernameHasSuffix(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldUsername), v)) - }) + return predicate.AuthUser(sql.FieldHasSuffix(FieldUsername, v)) } // UsernameEqualFold applies the EqualFold predicate on the "username" field. func UsernameEqualFold(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldUsername), v)) - }) + return predicate.AuthUser(sql.FieldEqualFold(FieldUsername, v)) } // UsernameContainsFold applies the ContainsFold predicate on the "username" field. func UsernameContainsFold(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldUsername), v)) - }) + return predicate.AuthUser(sql.FieldContainsFold(FieldUsername, v)) } // PasswordEQ applies the EQ predicate on the "password" field. func PasswordEQ(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldPassword), v)) - }) + return predicate.AuthUser(sql.FieldEQ(FieldPassword, v)) } // PasswordNEQ applies the NEQ predicate on the "password" field. func PasswordNEQ(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldPassword), v)) - }) + return predicate.AuthUser(sql.FieldNEQ(FieldPassword, v)) } // PasswordIn applies the In predicate on the "password" field. func PasswordIn(vs ...string) predicate.AuthUser { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AuthUser(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldPassword), v...)) - }) + return predicate.AuthUser(sql.FieldIn(FieldPassword, vs...)) } // PasswordNotIn applies the NotIn predicate on the "password" field. func PasswordNotIn(vs ...string) predicate.AuthUser { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AuthUser(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldPassword), v...)) - }) + return predicate.AuthUser(sql.FieldNotIn(FieldPassword, vs...)) } // PasswordGT applies the GT predicate on the "password" field. func PasswordGT(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldPassword), v)) - }) + return predicate.AuthUser(sql.FieldGT(FieldPassword, v)) } // PasswordGTE applies the GTE predicate on the "password" field. func PasswordGTE(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldPassword), v)) - }) + return predicate.AuthUser(sql.FieldGTE(FieldPassword, v)) } // PasswordLT applies the LT predicate on the "password" field. func PasswordLT(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldPassword), v)) - }) + return predicate.AuthUser(sql.FieldLT(FieldPassword, v)) } // PasswordLTE applies the LTE predicate on the "password" field. func PasswordLTE(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldPassword), v)) - }) + return predicate.AuthUser(sql.FieldLTE(FieldPassword, v)) } // PasswordContains applies the Contains predicate on the "password" field. func PasswordContains(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldPassword), v)) - }) + return predicate.AuthUser(sql.FieldContains(FieldPassword, v)) } // PasswordHasPrefix applies the HasPrefix predicate on the "password" field. func PasswordHasPrefix(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldPassword), v)) - }) + return predicate.AuthUser(sql.FieldHasPrefix(FieldPassword, v)) } // PasswordHasSuffix applies the HasSuffix predicate on the "password" field. func PasswordHasSuffix(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldPassword), v)) - }) + return predicate.AuthUser(sql.FieldHasSuffix(FieldPassword, v)) } // PasswordEqualFold applies the EqualFold predicate on the "password" field. func PasswordEqualFold(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldPassword), v)) - }) + return predicate.AuthUser(sql.FieldEqualFold(FieldPassword, v)) } // PasswordContainsFold applies the ContainsFold predicate on the "password" field. func PasswordContainsFold(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldPassword), v)) - }) + return predicate.AuthUser(sql.FieldContainsFold(FieldPassword, v)) } // FirstNameEQ applies the EQ predicate on the "first_name" field. func FirstNameEQ(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldFirstName), v)) - }) + return predicate.AuthUser(sql.FieldEQ(FieldFirstName, v)) } // FirstNameNEQ applies the NEQ predicate on the "first_name" field. func FirstNameNEQ(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldFirstName), v)) - }) + return predicate.AuthUser(sql.FieldNEQ(FieldFirstName, v)) } // FirstNameIn applies the In predicate on the "first_name" field. func FirstNameIn(vs ...string) predicate.AuthUser { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AuthUser(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldFirstName), v...)) - }) + return predicate.AuthUser(sql.FieldIn(FieldFirstName, vs...)) } // FirstNameNotIn applies the NotIn predicate on the "first_name" field. func FirstNameNotIn(vs ...string) predicate.AuthUser { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AuthUser(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldFirstName), v...)) - }) + return predicate.AuthUser(sql.FieldNotIn(FieldFirstName, vs...)) } // FirstNameGT applies the GT predicate on the "first_name" field. func FirstNameGT(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldFirstName), v)) - }) + return predicate.AuthUser(sql.FieldGT(FieldFirstName, v)) } // FirstNameGTE applies the GTE predicate on the "first_name" field. func FirstNameGTE(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldFirstName), v)) - }) + return predicate.AuthUser(sql.FieldGTE(FieldFirstName, v)) } // FirstNameLT applies the LT predicate on the "first_name" field. func FirstNameLT(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldFirstName), v)) - }) + return predicate.AuthUser(sql.FieldLT(FieldFirstName, v)) } // FirstNameLTE applies the LTE predicate on the "first_name" field. func FirstNameLTE(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldFirstName), v)) - }) + return predicate.AuthUser(sql.FieldLTE(FieldFirstName, v)) } // FirstNameContains applies the Contains predicate on the "first_name" field. func FirstNameContains(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldFirstName), v)) - }) + return predicate.AuthUser(sql.FieldContains(FieldFirstName, v)) } // FirstNameHasPrefix applies the HasPrefix predicate on the "first_name" field. func FirstNameHasPrefix(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldFirstName), v)) - }) + return predicate.AuthUser(sql.FieldHasPrefix(FieldFirstName, v)) } // FirstNameHasSuffix applies the HasSuffix predicate on the "first_name" field. func FirstNameHasSuffix(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldFirstName), v)) - }) + return predicate.AuthUser(sql.FieldHasSuffix(FieldFirstName, v)) } // FirstNameEqualFold applies the EqualFold predicate on the "first_name" field. func FirstNameEqualFold(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldFirstName), v)) - }) + return predicate.AuthUser(sql.FieldEqualFold(FieldFirstName, v)) } // FirstNameContainsFold applies the ContainsFold predicate on the "first_name" field. func FirstNameContainsFold(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldFirstName), v)) - }) + return predicate.AuthUser(sql.FieldContainsFold(FieldFirstName, v)) } // LastNameEQ applies the EQ predicate on the "last_name" field. func LastNameEQ(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldLastName), v)) - }) + return predicate.AuthUser(sql.FieldEQ(FieldLastName, v)) } // LastNameNEQ applies the NEQ predicate on the "last_name" field. func LastNameNEQ(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldLastName), v)) - }) + return predicate.AuthUser(sql.FieldNEQ(FieldLastName, v)) } // LastNameIn applies the In predicate on the "last_name" field. func LastNameIn(vs ...string) predicate.AuthUser { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AuthUser(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldLastName), v...)) - }) + return predicate.AuthUser(sql.FieldIn(FieldLastName, vs...)) } // LastNameNotIn applies the NotIn predicate on the "last_name" field. func LastNameNotIn(vs ...string) predicate.AuthUser { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AuthUser(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldLastName), v...)) - }) + return predicate.AuthUser(sql.FieldNotIn(FieldLastName, vs...)) } // LastNameGT applies the GT predicate on the "last_name" field. func LastNameGT(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldLastName), v)) - }) + return predicate.AuthUser(sql.FieldGT(FieldLastName, v)) } // LastNameGTE applies the GTE predicate on the "last_name" field. func LastNameGTE(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldLastName), v)) - }) + return predicate.AuthUser(sql.FieldGTE(FieldLastName, v)) } // LastNameLT applies the LT predicate on the "last_name" field. func LastNameLT(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldLastName), v)) - }) + return predicate.AuthUser(sql.FieldLT(FieldLastName, v)) } // LastNameLTE applies the LTE predicate on the "last_name" field. func LastNameLTE(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldLastName), v)) - }) + return predicate.AuthUser(sql.FieldLTE(FieldLastName, v)) } // LastNameContains applies the Contains predicate on the "last_name" field. func LastNameContains(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldLastName), v)) - }) + return predicate.AuthUser(sql.FieldContains(FieldLastName, v)) } // LastNameHasPrefix applies the HasPrefix predicate on the "last_name" field. func LastNameHasPrefix(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldLastName), v)) - }) + return predicate.AuthUser(sql.FieldHasPrefix(FieldLastName, v)) } // LastNameHasSuffix applies the HasSuffix predicate on the "last_name" field. func LastNameHasSuffix(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldLastName), v)) - }) + return predicate.AuthUser(sql.FieldHasSuffix(FieldLastName, v)) } // LastNameEqualFold applies the EqualFold predicate on the "last_name" field. func LastNameEqualFold(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldLastName), v)) - }) + return predicate.AuthUser(sql.FieldEqualFold(FieldLastName, v)) } // LastNameContainsFold applies the ContainsFold predicate on the "last_name" field. func LastNameContainsFold(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldLastName), v)) - }) + return predicate.AuthUser(sql.FieldContainsFold(FieldLastName, v)) } // EmailEQ applies the EQ predicate on the "email" field. func EmailEQ(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldEmail), v)) - }) + return predicate.AuthUser(sql.FieldEQ(FieldEmail, v)) } // EmailNEQ applies the NEQ predicate on the "email" field. func EmailNEQ(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldEmail), v)) - }) + return predicate.AuthUser(sql.FieldNEQ(FieldEmail, v)) } // EmailIn applies the In predicate on the "email" field. func EmailIn(vs ...string) predicate.AuthUser { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AuthUser(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldEmail), v...)) - }) + return predicate.AuthUser(sql.FieldIn(FieldEmail, vs...)) } // EmailNotIn applies the NotIn predicate on the "email" field. func EmailNotIn(vs ...string) predicate.AuthUser { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AuthUser(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldEmail), v...)) - }) + return predicate.AuthUser(sql.FieldNotIn(FieldEmail, vs...)) } // EmailGT applies the GT predicate on the "email" field. func EmailGT(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldEmail), v)) - }) + return predicate.AuthUser(sql.FieldGT(FieldEmail, v)) } // EmailGTE applies the GTE predicate on the "email" field. func EmailGTE(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldEmail), v)) - }) + return predicate.AuthUser(sql.FieldGTE(FieldEmail, v)) } // EmailLT applies the LT predicate on the "email" field. func EmailLT(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldEmail), v)) - }) + return predicate.AuthUser(sql.FieldLT(FieldEmail, v)) } // EmailLTE applies the LTE predicate on the "email" field. func EmailLTE(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldEmail), v)) - }) + return predicate.AuthUser(sql.FieldLTE(FieldEmail, v)) } // EmailContains applies the Contains predicate on the "email" field. func EmailContains(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldEmail), v)) - }) + return predicate.AuthUser(sql.FieldContains(FieldEmail, v)) } // EmailHasPrefix applies the HasPrefix predicate on the "email" field. func EmailHasPrefix(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldEmail), v)) - }) + return predicate.AuthUser(sql.FieldHasPrefix(FieldEmail, v)) } // EmailHasSuffix applies the HasSuffix predicate on the "email" field. func EmailHasSuffix(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldEmail), v)) - }) + return predicate.AuthUser(sql.FieldHasSuffix(FieldEmail, v)) } // EmailEqualFold applies the EqualFold predicate on the "email" field. func EmailEqualFold(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldEmail), v)) - }) + return predicate.AuthUser(sql.FieldEqualFold(FieldEmail, v)) } // EmailContainsFold applies the ContainsFold predicate on the "email" field. func EmailContainsFold(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldEmail), v)) - }) + return predicate.AuthUser(sql.FieldContainsFold(FieldEmail, v)) } // PhoneEQ applies the EQ predicate on the "phone" field. func PhoneEQ(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldPhone), v)) - }) + return predicate.AuthUser(sql.FieldEQ(FieldPhone, v)) } // PhoneNEQ applies the NEQ predicate on the "phone" field. func PhoneNEQ(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldPhone), v)) - }) + return predicate.AuthUser(sql.FieldNEQ(FieldPhone, v)) } // PhoneIn applies the In predicate on the "phone" field. func PhoneIn(vs ...string) predicate.AuthUser { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AuthUser(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldPhone), v...)) - }) + return predicate.AuthUser(sql.FieldIn(FieldPhone, vs...)) } // PhoneNotIn applies the NotIn predicate on the "phone" field. func PhoneNotIn(vs ...string) predicate.AuthUser { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AuthUser(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldPhone), v...)) - }) + return predicate.AuthUser(sql.FieldNotIn(FieldPhone, vs...)) } // PhoneGT applies the GT predicate on the "phone" field. func PhoneGT(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldPhone), v)) - }) + return predicate.AuthUser(sql.FieldGT(FieldPhone, v)) } // PhoneGTE applies the GTE predicate on the "phone" field. func PhoneGTE(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldPhone), v)) - }) + return predicate.AuthUser(sql.FieldGTE(FieldPhone, v)) } // PhoneLT applies the LT predicate on the "phone" field. func PhoneLT(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldPhone), v)) - }) + return predicate.AuthUser(sql.FieldLT(FieldPhone, v)) } // PhoneLTE applies the LTE predicate on the "phone" field. func PhoneLTE(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldPhone), v)) - }) + return predicate.AuthUser(sql.FieldLTE(FieldPhone, v)) } // PhoneContains applies the Contains predicate on the "phone" field. func PhoneContains(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldPhone), v)) - }) + return predicate.AuthUser(sql.FieldContains(FieldPhone, v)) } // PhoneHasPrefix applies the HasPrefix predicate on the "phone" field. func PhoneHasPrefix(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldPhone), v)) - }) + return predicate.AuthUser(sql.FieldHasPrefix(FieldPhone, v)) } // PhoneHasSuffix applies the HasSuffix predicate on the "phone" field. func PhoneHasSuffix(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldPhone), v)) - }) + return predicate.AuthUser(sql.FieldHasSuffix(FieldPhone, v)) } // PhoneEqualFold applies the EqualFold predicate on the "phone" field. func PhoneEqualFold(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldPhone), v)) - }) + return predicate.AuthUser(sql.FieldEqualFold(FieldPhone, v)) } // PhoneContainsFold applies the ContainsFold predicate on the "phone" field. func PhoneContainsFold(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldPhone), v)) - }) + return predicate.AuthUser(sql.FieldContainsFold(FieldPhone, v)) } // CompanyEQ applies the EQ predicate on the "company" field. func CompanyEQ(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCompany), v)) - }) + return predicate.AuthUser(sql.FieldEQ(FieldCompany, v)) } // CompanyNEQ applies the NEQ predicate on the "company" field. func CompanyNEQ(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldCompany), v)) - }) + return predicate.AuthUser(sql.FieldNEQ(FieldCompany, v)) } // CompanyIn applies the In predicate on the "company" field. func CompanyIn(vs ...string) predicate.AuthUser { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AuthUser(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldCompany), v...)) - }) + return predicate.AuthUser(sql.FieldIn(FieldCompany, vs...)) } // CompanyNotIn applies the NotIn predicate on the "company" field. func CompanyNotIn(vs ...string) predicate.AuthUser { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AuthUser(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldCompany), v...)) - }) + return predicate.AuthUser(sql.FieldNotIn(FieldCompany, vs...)) } // CompanyGT applies the GT predicate on the "company" field. func CompanyGT(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldCompany), v)) - }) + return predicate.AuthUser(sql.FieldGT(FieldCompany, v)) } // CompanyGTE applies the GTE predicate on the "company" field. func CompanyGTE(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldCompany), v)) - }) + return predicate.AuthUser(sql.FieldGTE(FieldCompany, v)) } // CompanyLT applies the LT predicate on the "company" field. func CompanyLT(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldCompany), v)) - }) + return predicate.AuthUser(sql.FieldLT(FieldCompany, v)) } // CompanyLTE applies the LTE predicate on the "company" field. func CompanyLTE(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldCompany), v)) - }) + return predicate.AuthUser(sql.FieldLTE(FieldCompany, v)) } // CompanyContains applies the Contains predicate on the "company" field. func CompanyContains(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldCompany), v)) - }) + return predicate.AuthUser(sql.FieldContains(FieldCompany, v)) } // CompanyHasPrefix applies the HasPrefix predicate on the "company" field. func CompanyHasPrefix(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldCompany), v)) - }) + return predicate.AuthUser(sql.FieldHasPrefix(FieldCompany, v)) } // CompanyHasSuffix applies the HasSuffix predicate on the "company" field. func CompanyHasSuffix(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldCompany), v)) - }) + return predicate.AuthUser(sql.FieldHasSuffix(FieldCompany, v)) } // CompanyEqualFold applies the EqualFold predicate on the "company" field. func CompanyEqualFold(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldCompany), v)) - }) + return predicate.AuthUser(sql.FieldEqualFold(FieldCompany, v)) } // CompanyContainsFold applies the ContainsFold predicate on the "company" field. func CompanyContainsFold(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldCompany), v)) - }) + return predicate.AuthUser(sql.FieldContainsFold(FieldCompany, v)) } // OccupationEQ applies the EQ predicate on the "occupation" field. func OccupationEQ(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldOccupation), v)) - }) + return predicate.AuthUser(sql.FieldEQ(FieldOccupation, v)) } // OccupationNEQ applies the NEQ predicate on the "occupation" field. func OccupationNEQ(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldOccupation), v)) - }) + return predicate.AuthUser(sql.FieldNEQ(FieldOccupation, v)) } // OccupationIn applies the In predicate on the "occupation" field. func OccupationIn(vs ...string) predicate.AuthUser { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AuthUser(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldOccupation), v...)) - }) + return predicate.AuthUser(sql.FieldIn(FieldOccupation, vs...)) } // OccupationNotIn applies the NotIn predicate on the "occupation" field. func OccupationNotIn(vs ...string) predicate.AuthUser { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AuthUser(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldOccupation), v...)) - }) + return predicate.AuthUser(sql.FieldNotIn(FieldOccupation, vs...)) } // OccupationGT applies the GT predicate on the "occupation" field. func OccupationGT(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldOccupation), v)) - }) + return predicate.AuthUser(sql.FieldGT(FieldOccupation, v)) } // OccupationGTE applies the GTE predicate on the "occupation" field. func OccupationGTE(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldOccupation), v)) - }) + return predicate.AuthUser(sql.FieldGTE(FieldOccupation, v)) } // OccupationLT applies the LT predicate on the "occupation" field. func OccupationLT(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldOccupation), v)) - }) + return predicate.AuthUser(sql.FieldLT(FieldOccupation, v)) } // OccupationLTE applies the LTE predicate on the "occupation" field. func OccupationLTE(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldOccupation), v)) - }) + return predicate.AuthUser(sql.FieldLTE(FieldOccupation, v)) } // OccupationContains applies the Contains predicate on the "occupation" field. func OccupationContains(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldOccupation), v)) - }) + return predicate.AuthUser(sql.FieldContains(FieldOccupation, v)) } // OccupationHasPrefix applies the HasPrefix predicate on the "occupation" field. func OccupationHasPrefix(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldOccupation), v)) - }) + return predicate.AuthUser(sql.FieldHasPrefix(FieldOccupation, v)) } // OccupationHasSuffix applies the HasSuffix predicate on the "occupation" field. func OccupationHasSuffix(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldOccupation), v)) - }) + return predicate.AuthUser(sql.FieldHasSuffix(FieldOccupation, v)) } // OccupationEqualFold applies the EqualFold predicate on the "occupation" field. func OccupationEqualFold(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldOccupation), v)) - }) + return predicate.AuthUser(sql.FieldEqualFold(FieldOccupation, v)) } // OccupationContainsFold applies the ContainsFold predicate on the "occupation" field. func OccupationContainsFold(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldOccupation), v)) - }) + return predicate.AuthUser(sql.FieldContainsFold(FieldOccupation, v)) } // PrivateKeyPathEQ applies the EQ predicate on the "private_key_path" field. func PrivateKeyPathEQ(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldPrivateKeyPath), v)) - }) + return predicate.AuthUser(sql.FieldEQ(FieldPrivateKeyPath, v)) } // PrivateKeyPathNEQ applies the NEQ predicate on the "private_key_path" field. func PrivateKeyPathNEQ(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldPrivateKeyPath), v)) - }) + return predicate.AuthUser(sql.FieldNEQ(FieldPrivateKeyPath, v)) } // PrivateKeyPathIn applies the In predicate on the "private_key_path" field. func PrivateKeyPathIn(vs ...string) predicate.AuthUser { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AuthUser(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldPrivateKeyPath), v...)) - }) + return predicate.AuthUser(sql.FieldIn(FieldPrivateKeyPath, vs...)) } // PrivateKeyPathNotIn applies the NotIn predicate on the "private_key_path" field. func PrivateKeyPathNotIn(vs ...string) predicate.AuthUser { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AuthUser(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldPrivateKeyPath), v...)) - }) + return predicate.AuthUser(sql.FieldNotIn(FieldPrivateKeyPath, vs...)) } // PrivateKeyPathGT applies the GT predicate on the "private_key_path" field. func PrivateKeyPathGT(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldPrivateKeyPath), v)) - }) + return predicate.AuthUser(sql.FieldGT(FieldPrivateKeyPath, v)) } // PrivateKeyPathGTE applies the GTE predicate on the "private_key_path" field. func PrivateKeyPathGTE(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldPrivateKeyPath), v)) - }) + return predicate.AuthUser(sql.FieldGTE(FieldPrivateKeyPath, v)) } // PrivateKeyPathLT applies the LT predicate on the "private_key_path" field. func PrivateKeyPathLT(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldPrivateKeyPath), v)) - }) + return predicate.AuthUser(sql.FieldLT(FieldPrivateKeyPath, v)) } // PrivateKeyPathLTE applies the LTE predicate on the "private_key_path" field. func PrivateKeyPathLTE(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldPrivateKeyPath), v)) - }) + return predicate.AuthUser(sql.FieldLTE(FieldPrivateKeyPath, v)) } // PrivateKeyPathContains applies the Contains predicate on the "private_key_path" field. func PrivateKeyPathContains(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldPrivateKeyPath), v)) - }) + return predicate.AuthUser(sql.FieldContains(FieldPrivateKeyPath, v)) } // PrivateKeyPathHasPrefix applies the HasPrefix predicate on the "private_key_path" field. func PrivateKeyPathHasPrefix(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldPrivateKeyPath), v)) - }) + return predicate.AuthUser(sql.FieldHasPrefix(FieldPrivateKeyPath, v)) } // PrivateKeyPathHasSuffix applies the HasSuffix predicate on the "private_key_path" field. func PrivateKeyPathHasSuffix(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldPrivateKeyPath), v)) - }) + return predicate.AuthUser(sql.FieldHasSuffix(FieldPrivateKeyPath, v)) } // PrivateKeyPathEqualFold applies the EqualFold predicate on the "private_key_path" field. func PrivateKeyPathEqualFold(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldPrivateKeyPath), v)) - }) + return predicate.AuthUser(sql.FieldEqualFold(FieldPrivateKeyPath, v)) } // PrivateKeyPathContainsFold applies the ContainsFold predicate on the "private_key_path" field. func PrivateKeyPathContainsFold(v string) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldPrivateKeyPath), v)) - }) + return predicate.AuthUser(sql.FieldContainsFold(FieldPrivateKeyPath, v)) } // RoleEQ applies the EQ predicate on the "role" field. func RoleEQ(v Role) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldRole), v)) - }) + return predicate.AuthUser(sql.FieldEQ(FieldRole, v)) } // RoleNEQ applies the NEQ predicate on the "role" field. func RoleNEQ(v Role) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldRole), v)) - }) + return predicate.AuthUser(sql.FieldNEQ(FieldRole, v)) } // RoleIn applies the In predicate on the "role" field. func RoleIn(vs ...Role) predicate.AuthUser { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AuthUser(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldRole), v...)) - }) + return predicate.AuthUser(sql.FieldIn(FieldRole, vs...)) } // RoleNotIn applies the NotIn predicate on the "role" field. func RoleNotIn(vs ...Role) predicate.AuthUser { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AuthUser(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldRole), v...)) - }) + return predicate.AuthUser(sql.FieldNotIn(FieldRole, vs...)) } // ProviderEQ applies the EQ predicate on the "provider" field. func ProviderEQ(v Provider) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldProvider), v)) - }) + return predicate.AuthUser(sql.FieldEQ(FieldProvider, v)) } // ProviderNEQ applies the NEQ predicate on the "provider" field. func ProviderNEQ(v Provider) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldProvider), v)) - }) + return predicate.AuthUser(sql.FieldNEQ(FieldProvider, v)) } // ProviderIn applies the In predicate on the "provider" field. func ProviderIn(vs ...Provider) predicate.AuthUser { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AuthUser(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldProvider), v...)) - }) + return predicate.AuthUser(sql.FieldIn(FieldProvider, vs...)) } // ProviderNotIn applies the NotIn predicate on the "provider" field. func ProviderNotIn(vs ...Provider) predicate.AuthUser { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.AuthUser(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldProvider), v...)) - }) + return predicate.AuthUser(sql.FieldNotIn(FieldProvider, vs...)) } // HasAuthUserToToken applies the HasEdge predicate on the "AuthUserToToken" edge. @@ -1255,7 +729,6 @@ func HasAuthUserToToken() predicate.AuthUser { return predicate.AuthUser(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(AuthUserToTokenTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, false, AuthUserToTokenTable, AuthUserToTokenColumn), ) sqlgraph.HasNeighbors(s, step) @@ -1265,11 +738,7 @@ func HasAuthUserToToken() predicate.AuthUser { // HasAuthUserToTokenWith applies the HasEdge predicate on the "AuthUserToToken" edge with a given conditions (other predicates). func HasAuthUserToTokenWith(preds ...predicate.Token) predicate.AuthUser { return predicate.AuthUser(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(AuthUserToTokenInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, AuthUserToTokenTable, AuthUserToTokenColumn), - ) + step := newAuthUserToTokenStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -1283,7 +752,6 @@ func HasAuthUserToServerTasks() predicate.AuthUser { return predicate.AuthUser(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(AuthUserToServerTasksTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, true, AuthUserToServerTasksTable, AuthUserToServerTasksColumn), ) sqlgraph.HasNeighbors(s, step) @@ -1293,11 +761,7 @@ func HasAuthUserToServerTasks() predicate.AuthUser { // HasAuthUserToServerTasksWith applies the HasEdge predicate on the "AuthUserToServerTasks" edge with a given conditions (other predicates). func HasAuthUserToServerTasksWith(preds ...predicate.ServerTask) predicate.AuthUser { return predicate.AuthUser(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(AuthUserToServerTasksInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, true, AuthUserToServerTasksTable, AuthUserToServerTasksColumn), - ) + step := newAuthUserToServerTasksStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -1308,32 +772,15 @@ func HasAuthUserToServerTasksWith(preds ...predicate.ServerTask) predicate.AuthU // And groups predicates with the AND operator between them. func And(predicates ...predicate.AuthUser) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.AuthUser(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.AuthUser) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.AuthUser(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.AuthUser) predicate.AuthUser { - return predicate.AuthUser(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.AuthUser(sql.NotPredicates(p)) } diff --git a/ent/authuser_create.go b/ent/authuser_create.go index da4c7fab..699908ca 100755 --- a/ent/authuser_create.go +++ b/ent/authuser_create.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -195,44 +195,8 @@ func (auc *AuthUserCreate) Mutation() *AuthUserMutation { // Save creates the AuthUser in the database. func (auc *AuthUserCreate) Save(ctx context.Context) (*AuthUser, error) { - var ( - err error - node *AuthUser - ) auc.defaults() - if len(auc.hooks) == 0 { - if err = auc.check(); err != nil { - return nil, err - } - node, err = auc.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*AuthUserMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = auc.check(); err != nil { - return nil, err - } - auc.mutation = mutation - if node, err = auc.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(auc.hooks) - 1; i >= 0; i-- { - if auc.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = auc.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, auc.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, auc.sqlSave, auc.mutation, auc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -342,10 +306,13 @@ func (auc *AuthUserCreate) check() error { } func (auc *AuthUserCreate) sqlSave(ctx context.Context) (*AuthUser, error) { + if err := auc.check(); err != nil { + return nil, err + } _node, _spec := auc.createSpec() if err := sqlgraph.CreateNode(ctx, auc.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } @@ -356,110 +323,62 @@ func (auc *AuthUserCreate) sqlSave(ctx context.Context) (*AuthUser, error) { return nil, err } } + auc.mutation.id = &_node.ID + auc.mutation.done = true return _node, nil } func (auc *AuthUserCreate) createSpec() (*AuthUser, *sqlgraph.CreateSpec) { var ( _node = &AuthUser{config: auc.config} - _spec = &sqlgraph.CreateSpec{ - Table: authuser.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: authuser.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(authuser.Table, sqlgraph.NewFieldSpec(authuser.FieldID, field.TypeUUID)) ) if id, ok := auc.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } if value, ok := auc.mutation.Username(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: authuser.FieldUsername, - }) + _spec.SetField(authuser.FieldUsername, field.TypeString, value) _node.Username = value } if value, ok := auc.mutation.Password(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: authuser.FieldPassword, - }) + _spec.SetField(authuser.FieldPassword, field.TypeString, value) _node.Password = value } if value, ok := auc.mutation.FirstName(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: authuser.FieldFirstName, - }) + _spec.SetField(authuser.FieldFirstName, field.TypeString, value) _node.FirstName = value } if value, ok := auc.mutation.LastName(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: authuser.FieldLastName, - }) + _spec.SetField(authuser.FieldLastName, field.TypeString, value) _node.LastName = value } if value, ok := auc.mutation.Email(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: authuser.FieldEmail, - }) + _spec.SetField(authuser.FieldEmail, field.TypeString, value) _node.Email = value } if value, ok := auc.mutation.Phone(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: authuser.FieldPhone, - }) + _spec.SetField(authuser.FieldPhone, field.TypeString, value) _node.Phone = value } if value, ok := auc.mutation.Company(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: authuser.FieldCompany, - }) + _spec.SetField(authuser.FieldCompany, field.TypeString, value) _node.Company = value } if value, ok := auc.mutation.Occupation(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: authuser.FieldOccupation, - }) + _spec.SetField(authuser.FieldOccupation, field.TypeString, value) _node.Occupation = value } if value, ok := auc.mutation.PrivateKeyPath(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: authuser.FieldPrivateKeyPath, - }) + _spec.SetField(authuser.FieldPrivateKeyPath, field.TypeString, value) _node.PrivateKeyPath = value } if value, ok := auc.mutation.Role(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: authuser.FieldRole, - }) + _spec.SetField(authuser.FieldRole, field.TypeEnum, value) _node.Role = value } if value, ok := auc.mutation.Provider(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: authuser.FieldProvider, - }) + _spec.SetField(authuser.FieldProvider, field.TypeEnum, value) _node.Provider = value } if nodes := auc.mutation.AuthUserToTokenIDs(); len(nodes) > 0 { @@ -470,10 +389,7 @@ func (auc *AuthUserCreate) createSpec() (*AuthUser, *sqlgraph.CreateSpec) { Columns: []string{authuser.AuthUserToTokenColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: token.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(token.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -489,10 +405,7 @@ func (auc *AuthUserCreate) createSpec() (*AuthUser, *sqlgraph.CreateSpec) { Columns: []string{authuser.AuthUserToServerTasksColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: servertask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(servertask.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -506,11 +419,15 @@ func (auc *AuthUserCreate) createSpec() (*AuthUser, *sqlgraph.CreateSpec) { // AuthUserCreateBulk is the builder for creating many AuthUser entities in bulk. type AuthUserCreateBulk struct { config + err error builders []*AuthUserCreate } // Save creates the AuthUser entities in the database. func (aucb *AuthUserCreateBulk) Save(ctx context.Context) ([]*AuthUser, error) { + if aucb.err != nil { + return nil, aucb.err + } specs := make([]*sqlgraph.CreateSpec, len(aucb.builders)) nodes := make([]*AuthUser, len(aucb.builders)) mutators := make([]Mutator, len(aucb.builders)) @@ -527,8 +444,8 @@ func (aucb *AuthUserCreateBulk) Save(ctx context.Context) ([]*AuthUser, error) { return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, aucb.builders[i+1].mutation) } else { @@ -536,7 +453,7 @@ func (aucb *AuthUserCreateBulk) Save(ctx context.Context) ([]*AuthUser, error) { // Invoke the actual operation on the latest mutation in the chain. if err = sqlgraph.BatchCreate(ctx, aucb.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } } } diff --git a/ent/authuser_delete.go b/ent/authuser_delete.go index a536ac95..5cb42c20 100755 --- a/ent/authuser_delete.go +++ b/ent/authuser_delete.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (aud *AuthUserDelete) Where(ps ...predicate.AuthUser) *AuthUserDelete { // Exec executes the deletion query and returns how many vertices were deleted. func (aud *AuthUserDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(aud.hooks) == 0 { - affected, err = aud.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*AuthUserMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - aud.mutation = mutation - affected, err = aud.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(aud.hooks) - 1; i >= 0; i-- { - if aud.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = aud.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, aud.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, aud.sqlExec, aud.mutation, aud.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (aud *AuthUserDelete) ExecX(ctx context.Context) int { } func (aud *AuthUserDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: authuser.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: authuser.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(authuser.Table, sqlgraph.NewFieldSpec(authuser.FieldID, field.TypeUUID)) if ps := aud.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -84,7 +48,12 @@ func (aud *AuthUserDelete) sqlExec(ctx context.Context) (int, error) { } } } - return sqlgraph.DeleteNodes(ctx, aud.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, aud.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + aud.mutation.done = true + return affected, err } // AuthUserDeleteOne is the builder for deleting a single AuthUser entity. @@ -92,6 +61,12 @@ type AuthUserDeleteOne struct { aud *AuthUserDelete } +// Where appends a list predicates to the AuthUserDelete builder. +func (audo *AuthUserDeleteOne) Where(ps ...predicate.AuthUser) *AuthUserDeleteOne { + audo.aud.mutation.Where(ps...) + return audo +} + // Exec executes the deletion query. func (audo *AuthUserDeleteOne) Exec(ctx context.Context) error { n, err := audo.aud.Exec(ctx) @@ -107,5 +82,7 @@ func (audo *AuthUserDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (audo *AuthUserDeleteOne) ExecX(ctx context.Context) { - audo.aud.ExecX(ctx) + if err := audo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/ent/authuser_query.go b/ent/authuser_query.go index 43e722e6..2ebd2a54 100755 --- a/ent/authuser_query.go +++ b/ent/authuser_query.go @@ -1,11 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" "database/sql/driver" - "errors" "fmt" "math" @@ -22,15 +21,16 @@ import ( // AuthUserQuery is the builder for querying AuthUser entities. type AuthUserQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string - predicates []predicate.AuthUser - // eager-loading edges. - withAuthUserToToken *TokenQuery - withAuthUserToServerTasks *ServerTaskQuery + ctx *QueryContext + order []authuser.OrderOption + inters []Interceptor + predicates []predicate.AuthUser + withAuthUserToToken *TokenQuery + withAuthUserToServerTasks *ServerTaskQuery + modifiers []func(*sql.Selector) + loadTotal []func(context.Context, []*AuthUser) error + withNamedAuthUserToToken map[string]*TokenQuery + withNamedAuthUserToServerTasks map[string]*ServerTaskQuery // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -42,34 +42,34 @@ func (auq *AuthUserQuery) Where(ps ...predicate.AuthUser) *AuthUserQuery { return auq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (auq *AuthUserQuery) Limit(limit int) *AuthUserQuery { - auq.limit = &limit + auq.ctx.Limit = &limit return auq } -// Offset adds an offset step to the query. +// Offset to start from. func (auq *AuthUserQuery) Offset(offset int) *AuthUserQuery { - auq.offset = &offset + auq.ctx.Offset = &offset return auq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (auq *AuthUserQuery) Unique(unique bool) *AuthUserQuery { - auq.unique = &unique + auq.ctx.Unique = &unique return auq } -// Order adds an order step to the query. -func (auq *AuthUserQuery) Order(o ...OrderFunc) *AuthUserQuery { +// Order specifies how the records should be ordered. +func (auq *AuthUserQuery) Order(o ...authuser.OrderOption) *AuthUserQuery { auq.order = append(auq.order, o...) return auq } // QueryAuthUserToToken chains the current query on the "AuthUserToToken" edge. func (auq *AuthUserQuery) QueryAuthUserToToken() *TokenQuery { - query := &TokenQuery{config: auq.config} + query := (&TokenClient{config: auq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := auq.prepareQuery(ctx); err != nil { return nil, err @@ -91,7 +91,7 @@ func (auq *AuthUserQuery) QueryAuthUserToToken() *TokenQuery { // QueryAuthUserToServerTasks chains the current query on the "AuthUserToServerTasks" edge. func (auq *AuthUserQuery) QueryAuthUserToServerTasks() *ServerTaskQuery { - query := &ServerTaskQuery{config: auq.config} + query := (&ServerTaskClient{config: auq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := auq.prepareQuery(ctx); err != nil { return nil, err @@ -114,7 +114,7 @@ func (auq *AuthUserQuery) QueryAuthUserToServerTasks() *ServerTaskQuery { // First returns the first AuthUser entity from the query. // Returns a *NotFoundError when no AuthUser was found. func (auq *AuthUserQuery) First(ctx context.Context) (*AuthUser, error) { - nodes, err := auq.Limit(1).All(ctx) + nodes, err := auq.Limit(1).All(setContextOp(ctx, auq.ctx, "First")) if err != nil { return nil, err } @@ -137,7 +137,7 @@ func (auq *AuthUserQuery) FirstX(ctx context.Context) *AuthUser { // Returns a *NotFoundError when no AuthUser ID was found. func (auq *AuthUserQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = auq.Limit(1).IDs(ctx); err != nil { + if ids, err = auq.Limit(1).IDs(setContextOp(ctx, auq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -160,7 +160,7 @@ func (auq *AuthUserQuery) FirstIDX(ctx context.Context) uuid.UUID { // Returns a *NotSingularError when more than one AuthUser entity is found. // Returns a *NotFoundError when no AuthUser entities are found. func (auq *AuthUserQuery) Only(ctx context.Context) (*AuthUser, error) { - nodes, err := auq.Limit(2).All(ctx) + nodes, err := auq.Limit(2).All(setContextOp(ctx, auq.ctx, "Only")) if err != nil { return nil, err } @@ -188,7 +188,7 @@ func (auq *AuthUserQuery) OnlyX(ctx context.Context) *AuthUser { // Returns a *NotFoundError when no entities are found. func (auq *AuthUserQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = auq.Limit(2).IDs(ctx); err != nil { + if ids, err = auq.Limit(2).IDs(setContextOp(ctx, auq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -213,10 +213,12 @@ func (auq *AuthUserQuery) OnlyIDX(ctx context.Context) uuid.UUID { // All executes the query and returns a list of AuthUsers. func (auq *AuthUserQuery) All(ctx context.Context) ([]*AuthUser, error) { + ctx = setContextOp(ctx, auq.ctx, "All") if err := auq.prepareQuery(ctx); err != nil { return nil, err } - return auq.sqlAll(ctx) + qr := querierAll[[]*AuthUser, *AuthUserQuery]() + return withInterceptors[[]*AuthUser](ctx, auq, qr, auq.inters) } // AllX is like All, but panics if an error occurs. @@ -229,9 +231,12 @@ func (auq *AuthUserQuery) AllX(ctx context.Context) []*AuthUser { } // IDs executes the query and returns a list of AuthUser IDs. -func (auq *AuthUserQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { - var ids []uuid.UUID - if err := auq.Select(authuser.FieldID).Scan(ctx, &ids); err != nil { +func (auq *AuthUserQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if auq.ctx.Unique == nil && auq.path != nil { + auq.Unique(true) + } + ctx = setContextOp(ctx, auq.ctx, "IDs") + if err = auq.Select(authuser.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -248,10 +253,11 @@ func (auq *AuthUserQuery) IDsX(ctx context.Context) []uuid.UUID { // Count returns the count of the given query. func (auq *AuthUserQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, auq.ctx, "Count") if err := auq.prepareQuery(ctx); err != nil { return 0, err } - return auq.sqlCount(ctx) + return withInterceptors[int](ctx, auq, querierCount[*AuthUserQuery](), auq.inters) } // CountX is like Count, but panics if an error occurs. @@ -265,10 +271,15 @@ func (auq *AuthUserQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (auq *AuthUserQuery) Exist(ctx context.Context) (bool, error) { - if err := auq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, auq.ctx, "Exist") + switch _, err := auq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return auq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -288,23 +299,22 @@ func (auq *AuthUserQuery) Clone() *AuthUserQuery { } return &AuthUserQuery{ config: auq.config, - limit: auq.limit, - offset: auq.offset, - order: append([]OrderFunc{}, auq.order...), + ctx: auq.ctx.Clone(), + order: append([]authuser.OrderOption{}, auq.order...), + inters: append([]Interceptor{}, auq.inters...), predicates: append([]predicate.AuthUser{}, auq.predicates...), withAuthUserToToken: auq.withAuthUserToToken.Clone(), withAuthUserToServerTasks: auq.withAuthUserToServerTasks.Clone(), // clone intermediate query. - sql: auq.sql.Clone(), - path: auq.path, - unique: auq.unique, + sql: auq.sql.Clone(), + path: auq.path, } } // WithAuthUserToToken tells the query-builder to eager-load the nodes that are connected to // the "AuthUserToToken" edge. The optional arguments are used to configure the query builder of the edge. func (auq *AuthUserQuery) WithAuthUserToToken(opts ...func(*TokenQuery)) *AuthUserQuery { - query := &TokenQuery{config: auq.config} + query := (&TokenClient{config: auq.config}).Query() for _, opt := range opts { opt(query) } @@ -315,7 +325,7 @@ func (auq *AuthUserQuery) WithAuthUserToToken(opts ...func(*TokenQuery)) *AuthUs // WithAuthUserToServerTasks tells the query-builder to eager-load the nodes that are connected to // the "AuthUserToServerTasks" edge. The optional arguments are used to configure the query builder of the edge. func (auq *AuthUserQuery) WithAuthUserToServerTasks(opts ...func(*ServerTaskQuery)) *AuthUserQuery { - query := &ServerTaskQuery{config: auq.config} + query := (&ServerTaskClient{config: auq.config}).Query() for _, opt := range opts { opt(query) } @@ -337,17 +347,13 @@ func (auq *AuthUserQuery) WithAuthUserToServerTasks(opts ...func(*ServerTaskQuer // GroupBy(authuser.FieldUsername). // Aggregate(ent.Count()). // Scan(ctx, &v) -// func (auq *AuthUserQuery) GroupBy(field string, fields ...string) *AuthUserGroupBy { - group := &AuthUserGroupBy{config: auq.config} - group.fields = append([]string{field}, fields...) - group.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := auq.prepareQuery(ctx); err != nil { - return nil, err - } - return auq.sqlQuery(ctx), nil - } - return group + auq.ctx.Fields = append([]string{field}, fields...) + grbuild := &AuthUserGroupBy{build: auq} + grbuild.flds = &auq.ctx.Fields + grbuild.label = authuser.Label + grbuild.scan = grbuild.Scan + return grbuild } // Select allows the selection one or more fields/columns for the given query, @@ -362,14 +368,31 @@ func (auq *AuthUserQuery) GroupBy(field string, fields ...string) *AuthUserGroup // client.AuthUser.Query(). // Select(authuser.FieldUsername). // Scan(ctx, &v) -// func (auq *AuthUserQuery) Select(fields ...string) *AuthUserSelect { - auq.fields = append(auq.fields, fields...) - return &AuthUserSelect{AuthUserQuery: auq} + auq.ctx.Fields = append(auq.ctx.Fields, fields...) + sbuild := &AuthUserSelect{AuthUserQuery: auq} + sbuild.label = authuser.Label + sbuild.flds, sbuild.scan = &auq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a AuthUserSelect configured with the given aggregations. +func (auq *AuthUserQuery) Aggregate(fns ...AggregateFunc) *AuthUserSelect { + return auq.Select().Aggregate(fns...) } func (auq *AuthUserQuery) prepareQuery(ctx context.Context) error { - for _, f := range auq.fields { + for _, inter := range auq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, auq); err != nil { + return err + } + } + } + for _, f := range auq.ctx.Fields { if !authuser.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -384,7 +407,7 @@ func (auq *AuthUserQuery) prepareQuery(ctx context.Context) error { return nil } -func (auq *AuthUserQuery) sqlAll(ctx context.Context) ([]*AuthUser, error) { +func (auq *AuthUserQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*AuthUser, error) { var ( nodes = []*AuthUser{} _spec = auq.querySpec() @@ -393,121 +416,149 @@ func (auq *AuthUserQuery) sqlAll(ctx context.Context) ([]*AuthUser, error) { auq.withAuthUserToServerTasks != nil, } ) - _spec.ScanValues = func(columns []string) ([]interface{}, error) { + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*AuthUser).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { node := &AuthUser{config: auq.config} nodes = append(nodes, node) - return node.scanValues(columns) - } - _spec.Assign = func(columns []string, values []interface{}) error { - if len(nodes) == 0 { - return fmt.Errorf("ent: Assign called without calling ScanValues") - } - node := nodes[len(nodes)-1] node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(auq.modifiers) > 0 { + _spec.Modifiers = auq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } if err := sqlgraph.QueryNodes(ctx, auq.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := auq.withAuthUserToToken; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*AuthUser) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.AuthUserToToken = []*Token{} - } - query.withFKs = true - query.Where(predicate.Token(func(s *sql.Selector) { - s.Where(sql.InValues(authuser.AuthUserToTokenColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { + if err := auq.loadAuthUserToToken(ctx, query, nodes, + func(n *AuthUser) { n.Edges.AuthUserToToken = []*Token{} }, + func(n *AuthUser, e *Token) { n.Edges.AuthUserToToken = append(n.Edges.AuthUserToToken, e) }); err != nil { return nil, err } - for _, n := range neighbors { - fk := n.auth_user_auth_user_to_token - if fk == nil { - return nil, fmt.Errorf(`foreign-key "auth_user_auth_user_to_token" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "auth_user_auth_user_to_token" returned %v for node %v`, *fk, n.ID) - } - node.Edges.AuthUserToToken = append(node.Edges.AuthUserToToken, n) - } } - if query := auq.withAuthUserToServerTasks; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*AuthUser) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.AuthUserToServerTasks = []*ServerTask{} + if err := auq.loadAuthUserToServerTasks(ctx, query, nodes, + func(n *AuthUser) { n.Edges.AuthUserToServerTasks = []*ServerTask{} }, + func(n *AuthUser, e *ServerTask) { + n.Edges.AuthUserToServerTasks = append(n.Edges.AuthUserToServerTasks, e) + }); err != nil { + return nil, err } - query.withFKs = true - query.Where(predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.InValues(authuser.AuthUserToServerTasksColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { + } + for name, query := range auq.withNamedAuthUserToToken { + if err := auq.loadAuthUserToToken(ctx, query, nodes, + func(n *AuthUser) { n.appendNamedAuthUserToToken(name) }, + func(n *AuthUser, e *Token) { n.appendNamedAuthUserToToken(name, e) }); err != nil { return nil, err } - for _, n := range neighbors { - fk := n.server_task_server_task_to_auth_user - if fk == nil { - return nil, fmt.Errorf(`foreign-key "server_task_server_task_to_auth_user" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "server_task_server_task_to_auth_user" returned %v for node %v`, *fk, n.ID) - } - node.Edges.AuthUserToServerTasks = append(node.Edges.AuthUserToServerTasks, n) + } + for name, query := range auq.withNamedAuthUserToServerTasks { + if err := auq.loadAuthUserToServerTasks(ctx, query, nodes, + func(n *AuthUser) { n.appendNamedAuthUserToServerTasks(name) }, + func(n *AuthUser, e *ServerTask) { n.appendNamedAuthUserToServerTasks(name, e) }); err != nil { + return nil, err + } + } + for i := range auq.loadTotal { + if err := auq.loadTotal[i](ctx, nodes); err != nil { + return nil, err } } - return nodes, nil } -func (auq *AuthUserQuery) sqlCount(ctx context.Context) (int, error) { - _spec := auq.querySpec() - _spec.Node.Columns = auq.fields - if len(auq.fields) > 0 { - _spec.Unique = auq.unique != nil && *auq.unique +func (auq *AuthUserQuery) loadAuthUserToToken(ctx context.Context, query *TokenQuery, nodes []*AuthUser, init func(*AuthUser), assign func(*AuthUser, *Token)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*AuthUser) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } } - return sqlgraph.CountNodes(ctx, auq.driver, _spec) + query.withFKs = true + query.Where(predicate.Token(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(authuser.AuthUserToTokenColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.auth_user_auth_user_to_token + if fk == nil { + return fmt.Errorf(`foreign-key "auth_user_auth_user_to_token" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "auth_user_auth_user_to_token" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil } - -func (auq *AuthUserQuery) sqlExist(ctx context.Context) (bool, error) { - n, err := auq.sqlCount(ctx) +func (auq *AuthUserQuery) loadAuthUserToServerTasks(ctx context.Context, query *ServerTaskQuery, nodes []*AuthUser, init func(*AuthUser), assign func(*AuthUser, *ServerTask)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*AuthUser) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.ServerTask(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(authuser.AuthUserToServerTasksColumn), fks...)) + })) + neighbors, err := query.All(ctx) if err != nil { - return false, fmt.Errorf("ent: check existence: %w", err) + return err + } + for _, n := range neighbors { + fk := n.server_task_server_task_to_auth_user + if fk == nil { + return fmt.Errorf(`foreign-key "server_task_server_task_to_auth_user" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "server_task_server_task_to_auth_user" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) } - return n > 0, nil + return nil +} + +func (auq *AuthUserQuery) sqlCount(ctx context.Context) (int, error) { + _spec := auq.querySpec() + if len(auq.modifiers) > 0 { + _spec.Modifiers = auq.modifiers + } + _spec.Node.Columns = auq.ctx.Fields + if len(auq.ctx.Fields) > 0 { + _spec.Unique = auq.ctx.Unique != nil && *auq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, auq.driver, _spec) } func (auq *AuthUserQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: authuser.Table, - Columns: authuser.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: authuser.FieldID, - }, - }, - From: auq.sql, - Unique: true, - } - if unique := auq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(authuser.Table, authuser.Columns, sqlgraph.NewFieldSpec(authuser.FieldID, field.TypeUUID)) + _spec.From = auq.sql + if unique := auq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if auq.path != nil { + _spec.Unique = true } - if fields := auq.fields; len(fields) > 0 { + if fields := auq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, authuser.FieldID) for i := range fields { @@ -523,10 +574,10 @@ func (auq *AuthUserQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := auq.limit; limit != nil { + if limit := auq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := auq.offset; offset != nil { + if offset := auq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := auq.order; len(ps) > 0 { @@ -542,7 +593,7 @@ func (auq *AuthUserQuery) querySpec() *sqlgraph.QuerySpec { func (auq *AuthUserQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(auq.driver.Dialect()) t1 := builder.Table(authuser.Table) - columns := auq.fields + columns := auq.ctx.Fields if len(columns) == 0 { columns = authuser.Columns } @@ -551,7 +602,7 @@ func (auq *AuthUserQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = auq.sql selector.Select(selector.Columns(columns...)...) } - if auq.unique != nil && *auq.unique { + if auq.ctx.Unique != nil && *auq.ctx.Unique { selector.Distinct() } for _, p := range auq.predicates { @@ -560,498 +611,128 @@ func (auq *AuthUserQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range auq.order { p(selector) } - if offset := auq.offset; offset != nil { + if offset := auq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := auq.limit; limit != nil { + if limit := auq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector } -// AuthUserGroupBy is the group-by builder for AuthUser entities. -type AuthUserGroupBy struct { - config - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) -} - -// Aggregate adds the given aggregation functions to the group-by query. -func (augb *AuthUserGroupBy) Aggregate(fns ...AggregateFunc) *AuthUserGroupBy { - augb.fns = append(augb.fns, fns...) - return augb -} - -// Scan applies the group-by query and scans the result into the given value. -func (augb *AuthUserGroupBy) Scan(ctx context.Context, v interface{}) error { - query, err := augb.path(ctx) - if err != nil { - return err - } - augb.sql = query - return augb.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (augb *AuthUserGroupBy) ScanX(ctx context.Context, v interface{}) { - if err := augb.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from group-by. -// It is only allowed when executing a group-by query with one field. -func (augb *AuthUserGroupBy) Strings(ctx context.Context) ([]string, error) { - if len(augb.fields) > 1 { - return nil, errors.New("ent: AuthUserGroupBy.Strings is not achievable when grouping more than 1 field") - } - var v []string - if err := augb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (augb *AuthUserGroupBy) StringsX(ctx context.Context) []string { - v, err := augb.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (augb *AuthUserGroupBy) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = augb.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{authuser.Label} - default: - err = fmt.Errorf("ent: AuthUserGroupBy.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (augb *AuthUserGroupBy) StringX(ctx context.Context) string { - v, err := augb.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from group-by. -// It is only allowed when executing a group-by query with one field. -func (augb *AuthUserGroupBy) Ints(ctx context.Context) ([]int, error) { - if len(augb.fields) > 1 { - return nil, errors.New("ent: AuthUserGroupBy.Ints is not achievable when grouping more than 1 field") - } - var v []int - if err := augb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (augb *AuthUserGroupBy) IntsX(ctx context.Context) []int { - v, err := augb.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (augb *AuthUserGroupBy) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = augb.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{authuser.Label} - default: - err = fmt.Errorf("ent: AuthUserGroupBy.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (augb *AuthUserGroupBy) IntX(ctx context.Context) int { - v, err := augb.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from group-by. -// It is only allowed when executing a group-by query with one field. -func (augb *AuthUserGroupBy) Float64s(ctx context.Context) ([]float64, error) { - if len(augb.fields) > 1 { - return nil, errors.New("ent: AuthUserGroupBy.Float64s is not achievable when grouping more than 1 field") - } - var v []float64 - if err := augb.Scan(ctx, &v); err != nil { - return nil, err +// WithNamedAuthUserToToken tells the query-builder to eager-load the nodes that are connected to the "AuthUserToToken" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (auq *AuthUserQuery) WithNamedAuthUserToToken(name string, opts ...func(*TokenQuery)) *AuthUserQuery { + query := (&TokenClient{config: auq.config}).Query() + for _, opt := range opts { + opt(query) } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (augb *AuthUserGroupBy) Float64sX(ctx context.Context) []float64 { - v, err := augb.Float64s(ctx) - if err != nil { - panic(err) + if auq.withNamedAuthUserToToken == nil { + auq.withNamedAuthUserToToken = make(map[string]*TokenQuery) } - return v + auq.withNamedAuthUserToToken[name] = query + return auq } -// Float64 returns a single float64 from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (augb *AuthUserGroupBy) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = augb.Float64s(ctx); err != nil { - return +// WithNamedAuthUserToServerTasks tells the query-builder to eager-load the nodes that are connected to the "AuthUserToServerTasks" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (auq *AuthUserQuery) WithNamedAuthUserToServerTasks(name string, opts ...func(*ServerTaskQuery)) *AuthUserQuery { + query := (&ServerTaskClient{config: auq.config}).Query() + for _, opt := range opts { + opt(query) } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{authuser.Label} - default: - err = fmt.Errorf("ent: AuthUserGroupBy.Float64s returned %d results when one was expected", len(v)) + if auq.withNamedAuthUserToServerTasks == nil { + auq.withNamedAuthUserToServerTasks = make(map[string]*ServerTaskQuery) } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (augb *AuthUserGroupBy) Float64X(ctx context.Context) float64 { - v, err := augb.Float64(ctx) - if err != nil { - panic(err) - } - return v + auq.withNamedAuthUserToServerTasks[name] = query + return auq } -// Bools returns list of bools from group-by. -// It is only allowed when executing a group-by query with one field. -func (augb *AuthUserGroupBy) Bools(ctx context.Context) ([]bool, error) { - if len(augb.fields) > 1 { - return nil, errors.New("ent: AuthUserGroupBy.Bools is not achievable when grouping more than 1 field") - } - var v []bool - if err := augb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil +// AuthUserGroupBy is the group-by builder for AuthUser entities. +type AuthUserGroupBy struct { + selector + build *AuthUserQuery } -// BoolsX is like Bools, but panics if an error occurs. -func (augb *AuthUserGroupBy) BoolsX(ctx context.Context) []bool { - v, err := augb.Bools(ctx) - if err != nil { - panic(err) - } - return v +// Aggregate adds the given aggregation functions to the group-by query. +func (augb *AuthUserGroupBy) Aggregate(fns ...AggregateFunc) *AuthUserGroupBy { + augb.fns = append(augb.fns, fns...) + return augb } -// Bool returns a single bool from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (augb *AuthUserGroupBy) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = augb.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{authuser.Label} - default: - err = fmt.Errorf("ent: AuthUserGroupBy.Bools returned %d results when one was expected", len(v)) +// Scan applies the selector query and scans the result into the given value. +func (augb *AuthUserGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, augb.build.ctx, "GroupBy") + if err := augb.build.prepareQuery(ctx); err != nil { + return err } - return + return scanWithInterceptors[*AuthUserQuery, *AuthUserGroupBy](ctx, augb.build, augb, augb.build.inters, v) } -// BoolX is like Bool, but panics if an error occurs. -func (augb *AuthUserGroupBy) BoolX(ctx context.Context) bool { - v, err := augb.Bool(ctx) - if err != nil { - panic(err) +func (augb *AuthUserGroupBy) sqlScan(ctx context.Context, root *AuthUserQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(augb.fns)) + for _, fn := range augb.fns { + aggregation = append(aggregation, fn(selector)) } - return v -} - -func (augb *AuthUserGroupBy) sqlScan(ctx context.Context, v interface{}) error { - for _, f := range augb.fields { - if !authuser.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*augb.flds)+len(augb.fns)) + for _, f := range *augb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := augb.sqlQuery() + selector.GroupBy(selector.Columns(*augb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := augb.driver.Query(ctx, query, args, rows); err != nil { + if err := augb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (augb *AuthUserGroupBy) sqlQuery() *sql.Selector { - selector := augb.sql.Select() - aggregation := make([]string, 0, len(augb.fns)) - for _, fn := range augb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(augb.fields)+len(augb.fns)) - for _, f := range augb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(augb.fields...)...) -} - // AuthUserSelect is the builder for selecting fields of AuthUser entities. type AuthUserSelect struct { *AuthUserQuery - // intermediate query (i.e. traversal path). - sql *sql.Selector + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (aus *AuthUserSelect) Aggregate(fns ...AggregateFunc) *AuthUserSelect { + aus.fns = append(aus.fns, fns...) + return aus } // Scan applies the selector query and scans the result into the given value. -func (aus *AuthUserSelect) Scan(ctx context.Context, v interface{}) error { +func (aus *AuthUserSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, aus.ctx, "Select") if err := aus.prepareQuery(ctx); err != nil { return err } - aus.sql = aus.AuthUserQuery.sqlQuery(ctx) - return aus.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (aus *AuthUserSelect) ScanX(ctx context.Context, v interface{}) { - if err := aus.Scan(ctx, v); err != nil { - panic(err) - } + return scanWithInterceptors[*AuthUserQuery, *AuthUserSelect](ctx, aus.AuthUserQuery, aus, aus.inters, v) } -// Strings returns list of strings from a selector. It is only allowed when selecting one field. -func (aus *AuthUserSelect) Strings(ctx context.Context) ([]string, error) { - if len(aus.fields) > 1 { - return nil, errors.New("ent: AuthUserSelect.Strings is not achievable when selecting more than 1 field") - } - var v []string - if err := aus.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (aus *AuthUserSelect) StringsX(ctx context.Context) []string { - v, err := aus.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a selector. It is only allowed when selecting one field. -func (aus *AuthUserSelect) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = aus.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{authuser.Label} - default: - err = fmt.Errorf("ent: AuthUserSelect.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (aus *AuthUserSelect) StringX(ctx context.Context) string { - v, err := aus.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from a selector. It is only allowed when selecting one field. -func (aus *AuthUserSelect) Ints(ctx context.Context) ([]int, error) { - if len(aus.fields) > 1 { - return nil, errors.New("ent: AuthUserSelect.Ints is not achievable when selecting more than 1 field") - } - var v []int - if err := aus.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (aus *AuthUserSelect) IntsX(ctx context.Context) []int { - v, err := aus.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a selector. It is only allowed when selecting one field. -func (aus *AuthUserSelect) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = aus.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{authuser.Label} - default: - err = fmt.Errorf("ent: AuthUserSelect.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (aus *AuthUserSelect) IntX(ctx context.Context) int { - v, err := aus.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. -func (aus *AuthUserSelect) Float64s(ctx context.Context) ([]float64, error) { - if len(aus.fields) > 1 { - return nil, errors.New("ent: AuthUserSelect.Float64s is not achievable when selecting more than 1 field") - } - var v []float64 - if err := aus.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (aus *AuthUserSelect) Float64sX(ctx context.Context) []float64 { - v, err := aus.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. -func (aus *AuthUserSelect) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = aus.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{authuser.Label} - default: - err = fmt.Errorf("ent: AuthUserSelect.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (aus *AuthUserSelect) Float64X(ctx context.Context) float64 { - v, err := aus.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from a selector. It is only allowed when selecting one field. -func (aus *AuthUserSelect) Bools(ctx context.Context) ([]bool, error) { - if len(aus.fields) > 1 { - return nil, errors.New("ent: AuthUserSelect.Bools is not achievable when selecting more than 1 field") - } - var v []bool - if err := aus.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (aus *AuthUserSelect) BoolsX(ctx context.Context) []bool { - v, err := aus.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a selector. It is only allowed when selecting one field. -func (aus *AuthUserSelect) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = aus.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{authuser.Label} - default: - err = fmt.Errorf("ent: AuthUserSelect.Bools returned %d results when one was expected", len(v)) +func (aus *AuthUserSelect) sqlScan(ctx context.Context, root *AuthUserQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(aus.fns)) + for _, fn := range aus.fns { + aggregation = append(aggregation, fn(selector)) } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (aus *AuthUserSelect) BoolX(ctx context.Context) bool { - v, err := aus.Bool(ctx) - if err != nil { - panic(err) + switch n := len(*aus.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) } - return v -} - -func (aus *AuthUserSelect) sqlScan(ctx context.Context, v interface{}) error { rows := &sql.Rows{} - query, args := aus.sql.Query() + query, args := selector.Query() if err := aus.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/ent/authuser_update.go b/ent/authuser_update.go index b922bcd6..58099275 100755 --- a/ent/authuser_update.go +++ b/ent/authuser_update.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -36,12 +36,28 @@ func (auu *AuthUserUpdate) SetUsername(s string) *AuthUserUpdate { return auu } +// SetNillableUsername sets the "username" field if the given value is not nil. +func (auu *AuthUserUpdate) SetNillableUsername(s *string) *AuthUserUpdate { + if s != nil { + auu.SetUsername(*s) + } + return auu +} + // SetPassword sets the "password" field. func (auu *AuthUserUpdate) SetPassword(s string) *AuthUserUpdate { auu.mutation.SetPassword(s) return auu } +// SetNillablePassword sets the "password" field if the given value is not nil. +func (auu *AuthUserUpdate) SetNillablePassword(s *string) *AuthUserUpdate { + if s != nil { + auu.SetPassword(*s) + } + return auu +} + // SetFirstName sets the "first_name" field. func (auu *AuthUserUpdate) SetFirstName(s string) *AuthUserUpdate { auu.mutation.SetFirstName(s) @@ -146,12 +162,28 @@ func (auu *AuthUserUpdate) SetRole(a authuser.Role) *AuthUserUpdate { return auu } +// SetNillableRole sets the "role" field if the given value is not nil. +func (auu *AuthUserUpdate) SetNillableRole(a *authuser.Role) *AuthUserUpdate { + if a != nil { + auu.SetRole(*a) + } + return auu +} + // SetProvider sets the "provider" field. func (auu *AuthUserUpdate) SetProvider(a authuser.Provider) *AuthUserUpdate { auu.mutation.SetProvider(a) return auu } +// SetNillableProvider sets the "provider" field if the given value is not nil. +func (auu *AuthUserUpdate) SetNillableProvider(a *authuser.Provider) *AuthUserUpdate { + if a != nil { + auu.SetProvider(*a) + } + return auu +} + // AddAuthUserToTokenIDs adds the "AuthUserToToken" edge to the Token entity by IDs. func (auu *AuthUserUpdate) AddAuthUserToTokenIDs(ids ...uuid.UUID) *AuthUserUpdate { auu.mutation.AddAuthUserToTokenIDs(ids...) @@ -231,40 +263,7 @@ func (auu *AuthUserUpdate) RemoveAuthUserToServerTasks(s ...*ServerTask) *AuthUs // Save executes the query and returns the number of nodes affected by the update operation. func (auu *AuthUserUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(auu.hooks) == 0 { - if err = auu.check(); err != nil { - return 0, err - } - affected, err = auu.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*AuthUserMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = auu.check(); err != nil { - return 0, err - } - auu.mutation = mutation - affected, err = auu.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(auu.hooks) - 1; i >= 0; i-- { - if auu.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = auu.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, auu.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, auu.sqlSave, auu.mutation, auu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -305,16 +304,10 @@ func (auu *AuthUserUpdate) check() error { } func (auu *AuthUserUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: authuser.Table, - Columns: authuser.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: authuser.FieldID, - }, - }, + if err := auu.check(); err != nil { + return n, err } + _spec := sqlgraph.NewUpdateSpec(authuser.Table, authuser.Columns, sqlgraph.NewFieldSpec(authuser.FieldID, field.TypeUUID)) if ps := auu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -323,81 +316,37 @@ func (auu *AuthUserUpdate) sqlSave(ctx context.Context) (n int, err error) { } } if value, ok := auu.mutation.Username(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: authuser.FieldUsername, - }) + _spec.SetField(authuser.FieldUsername, field.TypeString, value) } if value, ok := auu.mutation.Password(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: authuser.FieldPassword, - }) + _spec.SetField(authuser.FieldPassword, field.TypeString, value) } if value, ok := auu.mutation.FirstName(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: authuser.FieldFirstName, - }) + _spec.SetField(authuser.FieldFirstName, field.TypeString, value) } if value, ok := auu.mutation.LastName(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: authuser.FieldLastName, - }) + _spec.SetField(authuser.FieldLastName, field.TypeString, value) } if value, ok := auu.mutation.Email(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: authuser.FieldEmail, - }) + _spec.SetField(authuser.FieldEmail, field.TypeString, value) } if value, ok := auu.mutation.Phone(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: authuser.FieldPhone, - }) + _spec.SetField(authuser.FieldPhone, field.TypeString, value) } if value, ok := auu.mutation.Company(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: authuser.FieldCompany, - }) + _spec.SetField(authuser.FieldCompany, field.TypeString, value) } if value, ok := auu.mutation.Occupation(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: authuser.FieldOccupation, - }) + _spec.SetField(authuser.FieldOccupation, field.TypeString, value) } if value, ok := auu.mutation.PrivateKeyPath(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: authuser.FieldPrivateKeyPath, - }) + _spec.SetField(authuser.FieldPrivateKeyPath, field.TypeString, value) } if value, ok := auu.mutation.Role(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: authuser.FieldRole, - }) + _spec.SetField(authuser.FieldRole, field.TypeEnum, value) } if value, ok := auu.mutation.Provider(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: authuser.FieldProvider, - }) + _spec.SetField(authuser.FieldProvider, field.TypeEnum, value) } if auu.mutation.AuthUserToTokenCleared() { edge := &sqlgraph.EdgeSpec{ @@ -407,10 +356,7 @@ func (auu *AuthUserUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{authuser.AuthUserToTokenColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: token.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(token.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -423,10 +369,7 @@ func (auu *AuthUserUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{authuser.AuthUserToTokenColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: token.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(token.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -442,10 +385,7 @@ func (auu *AuthUserUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{authuser.AuthUserToTokenColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: token.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(token.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -461,10 +401,7 @@ func (auu *AuthUserUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{authuser.AuthUserToServerTasksColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: servertask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(servertask.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -477,10 +414,7 @@ func (auu *AuthUserUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{authuser.AuthUserToServerTasksColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: servertask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(servertask.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -496,10 +430,7 @@ func (auu *AuthUserUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{authuser.AuthUserToServerTasksColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: servertask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(servertask.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -511,10 +442,11 @@ func (auu *AuthUserUpdate) sqlSave(ctx context.Context) (n int, err error) { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{authuser.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return 0, err } + auu.mutation.done = true return n, nil } @@ -532,12 +464,28 @@ func (auuo *AuthUserUpdateOne) SetUsername(s string) *AuthUserUpdateOne { return auuo } +// SetNillableUsername sets the "username" field if the given value is not nil. +func (auuo *AuthUserUpdateOne) SetNillableUsername(s *string) *AuthUserUpdateOne { + if s != nil { + auuo.SetUsername(*s) + } + return auuo +} + // SetPassword sets the "password" field. func (auuo *AuthUserUpdateOne) SetPassword(s string) *AuthUserUpdateOne { auuo.mutation.SetPassword(s) return auuo } +// SetNillablePassword sets the "password" field if the given value is not nil. +func (auuo *AuthUserUpdateOne) SetNillablePassword(s *string) *AuthUserUpdateOne { + if s != nil { + auuo.SetPassword(*s) + } + return auuo +} + // SetFirstName sets the "first_name" field. func (auuo *AuthUserUpdateOne) SetFirstName(s string) *AuthUserUpdateOne { auuo.mutation.SetFirstName(s) @@ -642,12 +590,28 @@ func (auuo *AuthUserUpdateOne) SetRole(a authuser.Role) *AuthUserUpdateOne { return auuo } +// SetNillableRole sets the "role" field if the given value is not nil. +func (auuo *AuthUserUpdateOne) SetNillableRole(a *authuser.Role) *AuthUserUpdateOne { + if a != nil { + auuo.SetRole(*a) + } + return auuo +} + // SetProvider sets the "provider" field. func (auuo *AuthUserUpdateOne) SetProvider(a authuser.Provider) *AuthUserUpdateOne { auuo.mutation.SetProvider(a) return auuo } +// SetNillableProvider sets the "provider" field if the given value is not nil. +func (auuo *AuthUserUpdateOne) SetNillableProvider(a *authuser.Provider) *AuthUserUpdateOne { + if a != nil { + auuo.SetProvider(*a) + } + return auuo +} + // AddAuthUserToTokenIDs adds the "AuthUserToToken" edge to the Token entity by IDs. func (auuo *AuthUserUpdateOne) AddAuthUserToTokenIDs(ids ...uuid.UUID) *AuthUserUpdateOne { auuo.mutation.AddAuthUserToTokenIDs(ids...) @@ -725,6 +689,12 @@ func (auuo *AuthUserUpdateOne) RemoveAuthUserToServerTasks(s ...*ServerTask) *Au return auuo.RemoveAuthUserToServerTaskIDs(ids...) } +// Where appends a list predicates to the AuthUserUpdate builder. +func (auuo *AuthUserUpdateOne) Where(ps ...predicate.AuthUser) *AuthUserUpdateOne { + auuo.mutation.Where(ps...) + return auuo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (auuo *AuthUserUpdateOne) Select(field string, fields ...string) *AuthUserUpdateOne { @@ -734,40 +704,7 @@ func (auuo *AuthUserUpdateOne) Select(field string, fields ...string) *AuthUserU // Save executes the query and returns the updated AuthUser entity. func (auuo *AuthUserUpdateOne) Save(ctx context.Context) (*AuthUser, error) { - var ( - err error - node *AuthUser - ) - if len(auuo.hooks) == 0 { - if err = auuo.check(); err != nil { - return nil, err - } - node, err = auuo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*AuthUserMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = auuo.check(); err != nil { - return nil, err - } - auuo.mutation = mutation - node, err = auuo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(auuo.hooks) - 1; i >= 0; i-- { - if auuo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = auuo.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, auuo.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, auuo.sqlSave, auuo.mutation, auuo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -808,16 +745,10 @@ func (auuo *AuthUserUpdateOne) check() error { } func (auuo *AuthUserUpdateOne) sqlSave(ctx context.Context) (_node *AuthUser, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: authuser.Table, - Columns: authuser.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: authuser.FieldID, - }, - }, + if err := auuo.check(); err != nil { + return _node, err } + _spec := sqlgraph.NewUpdateSpec(authuser.Table, authuser.Columns, sqlgraph.NewFieldSpec(authuser.FieldID, field.TypeUUID)) id, ok := auuo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "AuthUser.id" for update`)} @@ -843,81 +774,37 @@ func (auuo *AuthUserUpdateOne) sqlSave(ctx context.Context) (_node *AuthUser, er } } if value, ok := auuo.mutation.Username(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: authuser.FieldUsername, - }) + _spec.SetField(authuser.FieldUsername, field.TypeString, value) } if value, ok := auuo.mutation.Password(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: authuser.FieldPassword, - }) + _spec.SetField(authuser.FieldPassword, field.TypeString, value) } if value, ok := auuo.mutation.FirstName(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: authuser.FieldFirstName, - }) + _spec.SetField(authuser.FieldFirstName, field.TypeString, value) } if value, ok := auuo.mutation.LastName(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: authuser.FieldLastName, - }) + _spec.SetField(authuser.FieldLastName, field.TypeString, value) } if value, ok := auuo.mutation.Email(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: authuser.FieldEmail, - }) + _spec.SetField(authuser.FieldEmail, field.TypeString, value) } if value, ok := auuo.mutation.Phone(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: authuser.FieldPhone, - }) + _spec.SetField(authuser.FieldPhone, field.TypeString, value) } if value, ok := auuo.mutation.Company(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: authuser.FieldCompany, - }) + _spec.SetField(authuser.FieldCompany, field.TypeString, value) } if value, ok := auuo.mutation.Occupation(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: authuser.FieldOccupation, - }) + _spec.SetField(authuser.FieldOccupation, field.TypeString, value) } if value, ok := auuo.mutation.PrivateKeyPath(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: authuser.FieldPrivateKeyPath, - }) + _spec.SetField(authuser.FieldPrivateKeyPath, field.TypeString, value) } if value, ok := auuo.mutation.Role(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: authuser.FieldRole, - }) + _spec.SetField(authuser.FieldRole, field.TypeEnum, value) } if value, ok := auuo.mutation.Provider(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: authuser.FieldProvider, - }) + _spec.SetField(authuser.FieldProvider, field.TypeEnum, value) } if auuo.mutation.AuthUserToTokenCleared() { edge := &sqlgraph.EdgeSpec{ @@ -927,10 +814,7 @@ func (auuo *AuthUserUpdateOne) sqlSave(ctx context.Context) (_node *AuthUser, er Columns: []string{authuser.AuthUserToTokenColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: token.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(token.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -943,10 +827,7 @@ func (auuo *AuthUserUpdateOne) sqlSave(ctx context.Context) (_node *AuthUser, er Columns: []string{authuser.AuthUserToTokenColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: token.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(token.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -962,10 +843,7 @@ func (auuo *AuthUserUpdateOne) sqlSave(ctx context.Context) (_node *AuthUser, er Columns: []string{authuser.AuthUserToTokenColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: token.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(token.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -981,10 +859,7 @@ func (auuo *AuthUserUpdateOne) sqlSave(ctx context.Context) (_node *AuthUser, er Columns: []string{authuser.AuthUserToServerTasksColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: servertask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(servertask.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -997,10 +872,7 @@ func (auuo *AuthUserUpdateOne) sqlSave(ctx context.Context) (_node *AuthUser, er Columns: []string{authuser.AuthUserToServerTasksColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: servertask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(servertask.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1016,10 +888,7 @@ func (auuo *AuthUserUpdateOne) sqlSave(ctx context.Context) (_node *AuthUser, er Columns: []string{authuser.AuthUserToServerTasksColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: servertask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(servertask.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1034,9 +903,10 @@ func (auuo *AuthUserUpdateOne) sqlSave(ctx context.Context) (_node *AuthUser, er if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{authuser.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } + auuo.mutation.done = true return _node, nil } diff --git a/ent/build.go b/ent/build.go index 06b153a0..19d859a6 100755 --- a/ent/build.go +++ b/ent/build.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -7,6 +7,7 @@ import ( "fmt" "strings" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/gen0cide/laforge/ent/build" "github.com/gen0cide/laforge/ent/buildcommit" @@ -34,6 +35,7 @@ type Build struct { // The values are being populated by the BuildQuery when eager-loading is set. Edges BuildEdges `json:"edges"` + // vvvvvvvvvvvv CUSTOM vvvvvvvvvvvv // Edges put into the main struct to be loaded via hcl // BuildToStatus holds the value of the BuildToStatus edge. HCLBuildToStatus *Status `json:"BuildToStatus,omitempty"` @@ -59,11 +61,12 @@ type Build struct { HCLBuildToAgentStatuses []*AgentStatus `json:"BuildToAgentStatuses,omitempty"` // BuildToServerTasks holds the value of the BuildToServerTasks edge. HCLBuildToServerTasks []*ServerTask `json:"BuildToServerTasks,omitempty"` - // + // ^^^^^^^^^^^^ CUSTOM ^^^^^^^^^^^^^ build_build_to_environment *uuid.UUID build_build_to_competition *uuid.UUID build_build_to_latest_build_commit *uuid.UUID build_build_to_repo_commit *uuid.UUID + selectValues sql.SelectValues } // BuildEdges holds the relations/edges for other nodes in the graph. @@ -95,6 +98,16 @@ type BuildEdges struct { // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. loadedTypes [12]bool + // totalCount holds the count of the edges above. + totalCount [12]map[string]int + + namedBuildToProvisionedNetwork map[string][]*ProvisionedNetwork + namedBuildToTeam map[string][]*Team + namedBuildToPlan map[string][]*Plan + namedBuildToBuildCommits map[string][]*BuildCommit + namedBuildToAdhocPlans map[string][]*AdhocPlan + namedBuildToAgentStatuses map[string][]*AgentStatus + namedBuildToServerTasks map[string][]*ServerTask } // BuildToStatusOrErr returns the BuildToStatus value or an error if the edge @@ -102,8 +115,7 @@ type BuildEdges struct { func (e BuildEdges) BuildToStatusOrErr() (*Status, error) { if e.loadedTypes[0] { if e.BuildToStatus == nil { - // The edge BuildToStatus was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: status.Label} } return e.BuildToStatus, nil @@ -116,8 +128,7 @@ func (e BuildEdges) BuildToStatusOrErr() (*Status, error) { func (e BuildEdges) BuildToEnvironmentOrErr() (*Environment, error) { if e.loadedTypes[1] { if e.BuildToEnvironment == nil { - // The edge BuildToEnvironment was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: environment.Label} } return e.BuildToEnvironment, nil @@ -130,8 +141,7 @@ func (e BuildEdges) BuildToEnvironmentOrErr() (*Environment, error) { func (e BuildEdges) BuildToCompetitionOrErr() (*Competition, error) { if e.loadedTypes[2] { if e.BuildToCompetition == nil { - // The edge BuildToCompetition was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: competition.Label} } return e.BuildToCompetition, nil @@ -144,8 +154,7 @@ func (e BuildEdges) BuildToCompetitionOrErr() (*Competition, error) { func (e BuildEdges) BuildToLatestBuildCommitOrErr() (*BuildCommit, error) { if e.loadedTypes[3] { if e.BuildToLatestBuildCommit == nil { - // The edge BuildToLatestBuildCommit was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: buildcommit.Label} } return e.BuildToLatestBuildCommit, nil @@ -158,8 +167,7 @@ func (e BuildEdges) BuildToLatestBuildCommitOrErr() (*BuildCommit, error) { func (e BuildEdges) BuildToRepoCommitOrErr() (*RepoCommit, error) { if e.loadedTypes[4] { if e.BuildToRepoCommit == nil { - // The edge BuildToRepoCommit was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: repocommit.Label} } return e.BuildToRepoCommit, nil @@ -231,8 +239,8 @@ func (e BuildEdges) BuildToServerTasksOrErr() ([]*ServerTask, error) { } // scanValues returns the types for scanning values from sql.Rows. -func (*Build) scanValues(columns []string) ([]interface{}, error) { - values := make([]interface{}, len(columns)) +func (*Build) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) for i := range columns { switch columns[i] { case build.FieldVars: @@ -252,7 +260,7 @@ func (*Build) scanValues(columns []string) ([]interface{}, error) { case build.ForeignKeys[3]: // build_build_to_repo_commit values[i] = &sql.NullScanner{S: new(uuid.UUID)} default: - return nil, fmt.Errorf("unexpected column %q for type Build", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -260,7 +268,7 @@ func (*Build) scanValues(columns []string) ([]interface{}, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the Build fields. -func (b *Build) assignValues(columns []string, values []interface{}) error { +func (b *Build) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -326,86 +334,94 @@ func (b *Build) assignValues(columns []string, values []interface{}) error { b.build_build_to_repo_commit = new(uuid.UUID) *b.build_build_to_repo_commit = *value.S.(*uuid.UUID) } + default: + b.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the Build. +// This includes values selected through modifiers, order, etc. +func (b *Build) Value(name string) (ent.Value, error) { + return b.selectValues.Get(name) +} + // QueryBuildToStatus queries the "BuildToStatus" edge of the Build entity. func (b *Build) QueryBuildToStatus() *StatusQuery { - return (&BuildClient{config: b.config}).QueryBuildToStatus(b) + return NewBuildClient(b.config).QueryBuildToStatus(b) } // QueryBuildToEnvironment queries the "BuildToEnvironment" edge of the Build entity. func (b *Build) QueryBuildToEnvironment() *EnvironmentQuery { - return (&BuildClient{config: b.config}).QueryBuildToEnvironment(b) + return NewBuildClient(b.config).QueryBuildToEnvironment(b) } // QueryBuildToCompetition queries the "BuildToCompetition" edge of the Build entity. func (b *Build) QueryBuildToCompetition() *CompetitionQuery { - return (&BuildClient{config: b.config}).QueryBuildToCompetition(b) + return NewBuildClient(b.config).QueryBuildToCompetition(b) } // QueryBuildToLatestBuildCommit queries the "BuildToLatestBuildCommit" edge of the Build entity. func (b *Build) QueryBuildToLatestBuildCommit() *BuildCommitQuery { - return (&BuildClient{config: b.config}).QueryBuildToLatestBuildCommit(b) + return NewBuildClient(b.config).QueryBuildToLatestBuildCommit(b) } // QueryBuildToRepoCommit queries the "BuildToRepoCommit" edge of the Build entity. func (b *Build) QueryBuildToRepoCommit() *RepoCommitQuery { - return (&BuildClient{config: b.config}).QueryBuildToRepoCommit(b) + return NewBuildClient(b.config).QueryBuildToRepoCommit(b) } // QueryBuildToProvisionedNetwork queries the "BuildToProvisionedNetwork" edge of the Build entity. func (b *Build) QueryBuildToProvisionedNetwork() *ProvisionedNetworkQuery { - return (&BuildClient{config: b.config}).QueryBuildToProvisionedNetwork(b) + return NewBuildClient(b.config).QueryBuildToProvisionedNetwork(b) } // QueryBuildToTeam queries the "BuildToTeam" edge of the Build entity. func (b *Build) QueryBuildToTeam() *TeamQuery { - return (&BuildClient{config: b.config}).QueryBuildToTeam(b) + return NewBuildClient(b.config).QueryBuildToTeam(b) } // QueryBuildToPlan queries the "BuildToPlan" edge of the Build entity. func (b *Build) QueryBuildToPlan() *PlanQuery { - return (&BuildClient{config: b.config}).QueryBuildToPlan(b) + return NewBuildClient(b.config).QueryBuildToPlan(b) } // QueryBuildToBuildCommits queries the "BuildToBuildCommits" edge of the Build entity. func (b *Build) QueryBuildToBuildCommits() *BuildCommitQuery { - return (&BuildClient{config: b.config}).QueryBuildToBuildCommits(b) + return NewBuildClient(b.config).QueryBuildToBuildCommits(b) } // QueryBuildToAdhocPlans queries the "BuildToAdhocPlans" edge of the Build entity. func (b *Build) QueryBuildToAdhocPlans() *AdhocPlanQuery { - return (&BuildClient{config: b.config}).QueryBuildToAdhocPlans(b) + return NewBuildClient(b.config).QueryBuildToAdhocPlans(b) } // QueryBuildToAgentStatuses queries the "BuildToAgentStatuses" edge of the Build entity. func (b *Build) QueryBuildToAgentStatuses() *AgentStatusQuery { - return (&BuildClient{config: b.config}).QueryBuildToAgentStatuses(b) + return NewBuildClient(b.config).QueryBuildToAgentStatuses(b) } // QueryBuildToServerTasks queries the "BuildToServerTasks" edge of the Build entity. func (b *Build) QueryBuildToServerTasks() *ServerTaskQuery { - return (&BuildClient{config: b.config}).QueryBuildToServerTasks(b) + return NewBuildClient(b.config).QueryBuildToServerTasks(b) } // Update returns a builder for updating this Build. // Note that you need to call Build.Unwrap() before calling this method if this Build // was returned from a transaction, and the transaction was committed or rolled back. func (b *Build) Update() *BuildUpdateOne { - return (&BuildClient{config: b.config}).UpdateOne(b) + return NewBuildClient(b.config).UpdateOne(b) } // Unwrap unwraps the Build entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. func (b *Build) Unwrap() *Build { - tx, ok := b.config.driver.(*txDriver) + _tx, ok := b.config.driver.(*txDriver) if !ok { panic("ent: Build is not a transactional entity") } - b.config.driver = tx.drv + b.config.driver = _tx.drv return b } @@ -413,24 +429,189 @@ func (b *Build) Unwrap() *Build { func (b *Build) String() string { var builder strings.Builder builder.WriteString("Build(") - builder.WriteString(fmt.Sprintf("id=%v", b.ID)) - builder.WriteString(", revision=") + builder.WriteString(fmt.Sprintf("id=%v, ", b.ID)) + builder.WriteString("revision=") builder.WriteString(fmt.Sprintf("%v", b.Revision)) - builder.WriteString(", environment_revision=") + builder.WriteString(", ") + builder.WriteString("environment_revision=") builder.WriteString(fmt.Sprintf("%v", b.EnvironmentRevision)) - builder.WriteString(", vars=") + builder.WriteString(", ") + builder.WriteString("vars=") builder.WriteString(fmt.Sprintf("%v", b.Vars)) - builder.WriteString(", completed_plan=") + builder.WriteString(", ") + builder.WriteString("completed_plan=") builder.WriteString(fmt.Sprintf("%v", b.CompletedPlan)) builder.WriteByte(')') return builder.String() } -// Builds is a parsable slice of Build. -type Builds []*Build +// NamedBuildToProvisionedNetwork returns the BuildToProvisionedNetwork named value or an error if the edge was not +// loaded in eager-loading with this name. +func (b *Build) NamedBuildToProvisionedNetwork(name string) ([]*ProvisionedNetwork, error) { + if b.Edges.namedBuildToProvisionedNetwork == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := b.Edges.namedBuildToProvisionedNetwork[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} + +func (b *Build) appendNamedBuildToProvisionedNetwork(name string, edges ...*ProvisionedNetwork) { + if b.Edges.namedBuildToProvisionedNetwork == nil { + b.Edges.namedBuildToProvisionedNetwork = make(map[string][]*ProvisionedNetwork) + } + if len(edges) == 0 { + b.Edges.namedBuildToProvisionedNetwork[name] = []*ProvisionedNetwork{} + } else { + b.Edges.namedBuildToProvisionedNetwork[name] = append(b.Edges.namedBuildToProvisionedNetwork[name], edges...) + } +} + +// NamedBuildToTeam returns the BuildToTeam named value or an error if the edge was not +// loaded in eager-loading with this name. +func (b *Build) NamedBuildToTeam(name string) ([]*Team, error) { + if b.Edges.namedBuildToTeam == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := b.Edges.namedBuildToTeam[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} + +func (b *Build) appendNamedBuildToTeam(name string, edges ...*Team) { + if b.Edges.namedBuildToTeam == nil { + b.Edges.namedBuildToTeam = make(map[string][]*Team) + } + if len(edges) == 0 { + b.Edges.namedBuildToTeam[name] = []*Team{} + } else { + b.Edges.namedBuildToTeam[name] = append(b.Edges.namedBuildToTeam[name], edges...) + } +} + +// NamedBuildToPlan returns the BuildToPlan named value or an error if the edge was not +// loaded in eager-loading with this name. +func (b *Build) NamedBuildToPlan(name string) ([]*Plan, error) { + if b.Edges.namedBuildToPlan == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := b.Edges.namedBuildToPlan[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} + +func (b *Build) appendNamedBuildToPlan(name string, edges ...*Plan) { + if b.Edges.namedBuildToPlan == nil { + b.Edges.namedBuildToPlan = make(map[string][]*Plan) + } + if len(edges) == 0 { + b.Edges.namedBuildToPlan[name] = []*Plan{} + } else { + b.Edges.namedBuildToPlan[name] = append(b.Edges.namedBuildToPlan[name], edges...) + } +} + +// NamedBuildToBuildCommits returns the BuildToBuildCommits named value or an error if the edge was not +// loaded in eager-loading with this name. +func (b *Build) NamedBuildToBuildCommits(name string) ([]*BuildCommit, error) { + if b.Edges.namedBuildToBuildCommits == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := b.Edges.namedBuildToBuildCommits[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} + +func (b *Build) appendNamedBuildToBuildCommits(name string, edges ...*BuildCommit) { + if b.Edges.namedBuildToBuildCommits == nil { + b.Edges.namedBuildToBuildCommits = make(map[string][]*BuildCommit) + } + if len(edges) == 0 { + b.Edges.namedBuildToBuildCommits[name] = []*BuildCommit{} + } else { + b.Edges.namedBuildToBuildCommits[name] = append(b.Edges.namedBuildToBuildCommits[name], edges...) + } +} + +// NamedBuildToAdhocPlans returns the BuildToAdhocPlans named value or an error if the edge was not +// loaded in eager-loading with this name. +func (b *Build) NamedBuildToAdhocPlans(name string) ([]*AdhocPlan, error) { + if b.Edges.namedBuildToAdhocPlans == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := b.Edges.namedBuildToAdhocPlans[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} + +func (b *Build) appendNamedBuildToAdhocPlans(name string, edges ...*AdhocPlan) { + if b.Edges.namedBuildToAdhocPlans == nil { + b.Edges.namedBuildToAdhocPlans = make(map[string][]*AdhocPlan) + } + if len(edges) == 0 { + b.Edges.namedBuildToAdhocPlans[name] = []*AdhocPlan{} + } else { + b.Edges.namedBuildToAdhocPlans[name] = append(b.Edges.namedBuildToAdhocPlans[name], edges...) + } +} + +// NamedBuildToAgentStatuses returns the BuildToAgentStatuses named value or an error if the edge was not +// loaded in eager-loading with this name. +func (b *Build) NamedBuildToAgentStatuses(name string) ([]*AgentStatus, error) { + if b.Edges.namedBuildToAgentStatuses == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := b.Edges.namedBuildToAgentStatuses[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} -func (b Builds) config(cfg config) { - for _i := range b { - b[_i].config = cfg +func (b *Build) appendNamedBuildToAgentStatuses(name string, edges ...*AgentStatus) { + if b.Edges.namedBuildToAgentStatuses == nil { + b.Edges.namedBuildToAgentStatuses = make(map[string][]*AgentStatus) + } + if len(edges) == 0 { + b.Edges.namedBuildToAgentStatuses[name] = []*AgentStatus{} + } else { + b.Edges.namedBuildToAgentStatuses[name] = append(b.Edges.namedBuildToAgentStatuses[name], edges...) } } + +// NamedBuildToServerTasks returns the BuildToServerTasks named value or an error if the edge was not +// loaded in eager-loading with this name. +func (b *Build) NamedBuildToServerTasks(name string) ([]*ServerTask, error) { + if b.Edges.namedBuildToServerTasks == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := b.Edges.namedBuildToServerTasks[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} + +func (b *Build) appendNamedBuildToServerTasks(name string, edges ...*ServerTask) { + if b.Edges.namedBuildToServerTasks == nil { + b.Edges.namedBuildToServerTasks = make(map[string][]*ServerTask) + } + if len(edges) == 0 { + b.Edges.namedBuildToServerTasks[name] = []*ServerTask{} + } else { + b.Edges.namedBuildToServerTasks[name] = append(b.Edges.namedBuildToServerTasks[name], edges...) + } +} + +// Builds is a parsable slice of Build. +type Builds []*Build diff --git a/ent/build/build.go b/ent/build/build.go index 7488c98b..924b806c 100755 --- a/ent/build/build.go +++ b/ent/build/build.go @@ -1,8 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package build import ( + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -170,3 +172,243 @@ var ( // DefaultID holds the default value on creation for the "id" field. DefaultID func() uuid.UUID ) + +// OrderOption defines the ordering options for the Build queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByRevision orders the results by the revision field. +func ByRevision(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldRevision, opts...).ToFunc() +} + +// ByEnvironmentRevision orders the results by the environment_revision field. +func ByEnvironmentRevision(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldEnvironmentRevision, opts...).ToFunc() +} + +// ByCompletedPlan orders the results by the completed_plan field. +func ByCompletedPlan(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCompletedPlan, opts...).ToFunc() +} + +// ByBuildToStatusField orders the results by BuildToStatus field. +func ByBuildToStatusField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newBuildToStatusStep(), sql.OrderByField(field, opts...)) + } +} + +// ByBuildToEnvironmentField orders the results by BuildToEnvironment field. +func ByBuildToEnvironmentField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newBuildToEnvironmentStep(), sql.OrderByField(field, opts...)) + } +} + +// ByBuildToCompetitionField orders the results by BuildToCompetition field. +func ByBuildToCompetitionField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newBuildToCompetitionStep(), sql.OrderByField(field, opts...)) + } +} + +// ByBuildToLatestBuildCommitField orders the results by BuildToLatestBuildCommit field. +func ByBuildToLatestBuildCommitField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newBuildToLatestBuildCommitStep(), sql.OrderByField(field, opts...)) + } +} + +// ByBuildToRepoCommitField orders the results by BuildToRepoCommit field. +func ByBuildToRepoCommitField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newBuildToRepoCommitStep(), sql.OrderByField(field, opts...)) + } +} + +// ByBuildToProvisionedNetworkCount orders the results by BuildToProvisionedNetwork count. +func ByBuildToProvisionedNetworkCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newBuildToProvisionedNetworkStep(), opts...) + } +} + +// ByBuildToProvisionedNetwork orders the results by BuildToProvisionedNetwork terms. +func ByBuildToProvisionedNetwork(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newBuildToProvisionedNetworkStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByBuildToTeamCount orders the results by BuildToTeam count. +func ByBuildToTeamCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newBuildToTeamStep(), opts...) + } +} + +// ByBuildToTeam orders the results by BuildToTeam terms. +func ByBuildToTeam(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newBuildToTeamStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByBuildToPlanCount orders the results by BuildToPlan count. +func ByBuildToPlanCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newBuildToPlanStep(), opts...) + } +} + +// ByBuildToPlan orders the results by BuildToPlan terms. +func ByBuildToPlan(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newBuildToPlanStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByBuildToBuildCommitsCount orders the results by BuildToBuildCommits count. +func ByBuildToBuildCommitsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newBuildToBuildCommitsStep(), opts...) + } +} + +// ByBuildToBuildCommits orders the results by BuildToBuildCommits terms. +func ByBuildToBuildCommits(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newBuildToBuildCommitsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByBuildToAdhocPlansCount orders the results by BuildToAdhocPlans count. +func ByBuildToAdhocPlansCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newBuildToAdhocPlansStep(), opts...) + } +} + +// ByBuildToAdhocPlans orders the results by BuildToAdhocPlans terms. +func ByBuildToAdhocPlans(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newBuildToAdhocPlansStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByBuildToAgentStatusesCount orders the results by BuildToAgentStatuses count. +func ByBuildToAgentStatusesCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newBuildToAgentStatusesStep(), opts...) + } +} + +// ByBuildToAgentStatuses orders the results by BuildToAgentStatuses terms. +func ByBuildToAgentStatuses(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newBuildToAgentStatusesStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByBuildToServerTasksCount orders the results by BuildToServerTasks count. +func ByBuildToServerTasksCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newBuildToServerTasksStep(), opts...) + } +} + +// ByBuildToServerTasks orders the results by BuildToServerTasks terms. +func ByBuildToServerTasks(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newBuildToServerTasksStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newBuildToStatusStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(BuildToStatusInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2O, false, BuildToStatusTable, BuildToStatusColumn), + ) +} +func newBuildToEnvironmentStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(BuildToEnvironmentInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, BuildToEnvironmentTable, BuildToEnvironmentColumn), + ) +} +func newBuildToCompetitionStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(BuildToCompetitionInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, BuildToCompetitionTable, BuildToCompetitionColumn), + ) +} +func newBuildToLatestBuildCommitStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(BuildToLatestBuildCommitInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, BuildToLatestBuildCommitTable, BuildToLatestBuildCommitColumn), + ) +} +func newBuildToRepoCommitStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(BuildToRepoCommitInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, BuildToRepoCommitTable, BuildToRepoCommitColumn), + ) +} +func newBuildToProvisionedNetworkStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(BuildToProvisionedNetworkInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, BuildToProvisionedNetworkTable, BuildToProvisionedNetworkColumn), + ) +} +func newBuildToTeamStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(BuildToTeamInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, BuildToTeamTable, BuildToTeamColumn), + ) +} +func newBuildToPlanStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(BuildToPlanInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, BuildToPlanTable, BuildToPlanColumn), + ) +} +func newBuildToBuildCommitsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(BuildToBuildCommitsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, BuildToBuildCommitsTable, BuildToBuildCommitsColumn), + ) +} +func newBuildToAdhocPlansStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(BuildToAdhocPlansInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, BuildToAdhocPlansTable, BuildToAdhocPlansColumn), + ) +} +func newBuildToAgentStatusesStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(BuildToAgentStatusesInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, BuildToAgentStatusesTable, BuildToAgentStatusesColumn), + ) +} +func newBuildToServerTasksStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(BuildToServerTasksInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, BuildToServerTasksTable, BuildToServerTasksColumn), + ) +} diff --git a/ent/build/where.go b/ent/build/where.go index 11546ba5..b8b01ba7 100755 --- a/ent/build/where.go +++ b/ent/build/where.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package build @@ -11,272 +11,152 @@ import ( // ID filters vertices based on their ID field. func ID(id uuid.UUID) predicate.Build { - return predicate.Build(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Build(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id uuid.UUID) predicate.Build { - return predicate.Build(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Build(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id uuid.UUID) predicate.Build { - return predicate.Build(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.Build(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...uuid.UUID) predicate.Build { - return predicate.Build(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.Build(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...uuid.UUID) predicate.Build { - return predicate.Build(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.Build(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id uuid.UUID) predicate.Build { - return predicate.Build(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.Build(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id uuid.UUID) predicate.Build { - return predicate.Build(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.Build(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id uuid.UUID) predicate.Build { - return predicate.Build(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.Build(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id uuid.UUID) predicate.Build { - return predicate.Build(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.Build(sql.FieldLTE(FieldID, id)) } // Revision applies equality check predicate on the "revision" field. It's identical to RevisionEQ. func Revision(v int) predicate.Build { - return predicate.Build(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldRevision), v)) - }) + return predicate.Build(sql.FieldEQ(FieldRevision, v)) } // EnvironmentRevision applies equality check predicate on the "environment_revision" field. It's identical to EnvironmentRevisionEQ. func EnvironmentRevision(v int) predicate.Build { - return predicate.Build(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldEnvironmentRevision), v)) - }) + return predicate.Build(sql.FieldEQ(FieldEnvironmentRevision, v)) } // CompletedPlan applies equality check predicate on the "completed_plan" field. It's identical to CompletedPlanEQ. func CompletedPlan(v bool) predicate.Build { - return predicate.Build(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCompletedPlan), v)) - }) + return predicate.Build(sql.FieldEQ(FieldCompletedPlan, v)) } // RevisionEQ applies the EQ predicate on the "revision" field. func RevisionEQ(v int) predicate.Build { - return predicate.Build(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldRevision), v)) - }) + return predicate.Build(sql.FieldEQ(FieldRevision, v)) } // RevisionNEQ applies the NEQ predicate on the "revision" field. func RevisionNEQ(v int) predicate.Build { - return predicate.Build(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldRevision), v)) - }) + return predicate.Build(sql.FieldNEQ(FieldRevision, v)) } // RevisionIn applies the In predicate on the "revision" field. func RevisionIn(vs ...int) predicate.Build { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Build(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldRevision), v...)) - }) + return predicate.Build(sql.FieldIn(FieldRevision, vs...)) } // RevisionNotIn applies the NotIn predicate on the "revision" field. func RevisionNotIn(vs ...int) predicate.Build { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Build(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldRevision), v...)) - }) + return predicate.Build(sql.FieldNotIn(FieldRevision, vs...)) } // RevisionGT applies the GT predicate on the "revision" field. func RevisionGT(v int) predicate.Build { - return predicate.Build(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldRevision), v)) - }) + return predicate.Build(sql.FieldGT(FieldRevision, v)) } // RevisionGTE applies the GTE predicate on the "revision" field. func RevisionGTE(v int) predicate.Build { - return predicate.Build(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldRevision), v)) - }) + return predicate.Build(sql.FieldGTE(FieldRevision, v)) } // RevisionLT applies the LT predicate on the "revision" field. func RevisionLT(v int) predicate.Build { - return predicate.Build(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldRevision), v)) - }) + return predicate.Build(sql.FieldLT(FieldRevision, v)) } // RevisionLTE applies the LTE predicate on the "revision" field. func RevisionLTE(v int) predicate.Build { - return predicate.Build(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldRevision), v)) - }) + return predicate.Build(sql.FieldLTE(FieldRevision, v)) } // EnvironmentRevisionEQ applies the EQ predicate on the "environment_revision" field. func EnvironmentRevisionEQ(v int) predicate.Build { - return predicate.Build(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldEnvironmentRevision), v)) - }) + return predicate.Build(sql.FieldEQ(FieldEnvironmentRevision, v)) } // EnvironmentRevisionNEQ applies the NEQ predicate on the "environment_revision" field. func EnvironmentRevisionNEQ(v int) predicate.Build { - return predicate.Build(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldEnvironmentRevision), v)) - }) + return predicate.Build(sql.FieldNEQ(FieldEnvironmentRevision, v)) } // EnvironmentRevisionIn applies the In predicate on the "environment_revision" field. func EnvironmentRevisionIn(vs ...int) predicate.Build { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Build(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldEnvironmentRevision), v...)) - }) + return predicate.Build(sql.FieldIn(FieldEnvironmentRevision, vs...)) } // EnvironmentRevisionNotIn applies the NotIn predicate on the "environment_revision" field. func EnvironmentRevisionNotIn(vs ...int) predicate.Build { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Build(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldEnvironmentRevision), v...)) - }) + return predicate.Build(sql.FieldNotIn(FieldEnvironmentRevision, vs...)) } // EnvironmentRevisionGT applies the GT predicate on the "environment_revision" field. func EnvironmentRevisionGT(v int) predicate.Build { - return predicate.Build(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldEnvironmentRevision), v)) - }) + return predicate.Build(sql.FieldGT(FieldEnvironmentRevision, v)) } // EnvironmentRevisionGTE applies the GTE predicate on the "environment_revision" field. func EnvironmentRevisionGTE(v int) predicate.Build { - return predicate.Build(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldEnvironmentRevision), v)) - }) + return predicate.Build(sql.FieldGTE(FieldEnvironmentRevision, v)) } // EnvironmentRevisionLT applies the LT predicate on the "environment_revision" field. func EnvironmentRevisionLT(v int) predicate.Build { - return predicate.Build(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldEnvironmentRevision), v)) - }) + return predicate.Build(sql.FieldLT(FieldEnvironmentRevision, v)) } // EnvironmentRevisionLTE applies the LTE predicate on the "environment_revision" field. func EnvironmentRevisionLTE(v int) predicate.Build { - return predicate.Build(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldEnvironmentRevision), v)) - }) + return predicate.Build(sql.FieldLTE(FieldEnvironmentRevision, v)) } // CompletedPlanEQ applies the EQ predicate on the "completed_plan" field. func CompletedPlanEQ(v bool) predicate.Build { - return predicate.Build(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCompletedPlan), v)) - }) + return predicate.Build(sql.FieldEQ(FieldCompletedPlan, v)) } // CompletedPlanNEQ applies the NEQ predicate on the "completed_plan" field. func CompletedPlanNEQ(v bool) predicate.Build { - return predicate.Build(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldCompletedPlan), v)) - }) + return predicate.Build(sql.FieldNEQ(FieldCompletedPlan, v)) } // HasBuildToStatus applies the HasEdge predicate on the "BuildToStatus" edge. @@ -284,7 +164,6 @@ func HasBuildToStatus() predicate.Build { return predicate.Build(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(BuildToStatusTable, FieldID), sqlgraph.Edge(sqlgraph.O2O, false, BuildToStatusTable, BuildToStatusColumn), ) sqlgraph.HasNeighbors(s, step) @@ -294,11 +173,7 @@ func HasBuildToStatus() predicate.Build { // HasBuildToStatusWith applies the HasEdge predicate on the "BuildToStatus" edge with a given conditions (other predicates). func HasBuildToStatusWith(preds ...predicate.Status) predicate.Build { return predicate.Build(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(BuildToStatusInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2O, false, BuildToStatusTable, BuildToStatusColumn), - ) + step := newBuildToStatusStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -312,7 +187,6 @@ func HasBuildToEnvironment() predicate.Build { return predicate.Build(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(BuildToEnvironmentTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, false, BuildToEnvironmentTable, BuildToEnvironmentColumn), ) sqlgraph.HasNeighbors(s, step) @@ -322,11 +196,7 @@ func HasBuildToEnvironment() predicate.Build { // HasBuildToEnvironmentWith applies the HasEdge predicate on the "BuildToEnvironment" edge with a given conditions (other predicates). func HasBuildToEnvironmentWith(preds ...predicate.Environment) predicate.Build { return predicate.Build(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(BuildToEnvironmentInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, BuildToEnvironmentTable, BuildToEnvironmentColumn), - ) + step := newBuildToEnvironmentStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -340,7 +210,6 @@ func HasBuildToCompetition() predicate.Build { return predicate.Build(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(BuildToCompetitionTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, false, BuildToCompetitionTable, BuildToCompetitionColumn), ) sqlgraph.HasNeighbors(s, step) @@ -350,11 +219,7 @@ func HasBuildToCompetition() predicate.Build { // HasBuildToCompetitionWith applies the HasEdge predicate on the "BuildToCompetition" edge with a given conditions (other predicates). func HasBuildToCompetitionWith(preds ...predicate.Competition) predicate.Build { return predicate.Build(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(BuildToCompetitionInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, BuildToCompetitionTable, BuildToCompetitionColumn), - ) + step := newBuildToCompetitionStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -368,7 +233,6 @@ func HasBuildToLatestBuildCommit() predicate.Build { return predicate.Build(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(BuildToLatestBuildCommitTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, false, BuildToLatestBuildCommitTable, BuildToLatestBuildCommitColumn), ) sqlgraph.HasNeighbors(s, step) @@ -378,11 +242,7 @@ func HasBuildToLatestBuildCommit() predicate.Build { // HasBuildToLatestBuildCommitWith applies the HasEdge predicate on the "BuildToLatestBuildCommit" edge with a given conditions (other predicates). func HasBuildToLatestBuildCommitWith(preds ...predicate.BuildCommit) predicate.Build { return predicate.Build(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(BuildToLatestBuildCommitInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, BuildToLatestBuildCommitTable, BuildToLatestBuildCommitColumn), - ) + step := newBuildToLatestBuildCommitStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -396,7 +256,6 @@ func HasBuildToRepoCommit() predicate.Build { return predicate.Build(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(BuildToRepoCommitTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, false, BuildToRepoCommitTable, BuildToRepoCommitColumn), ) sqlgraph.HasNeighbors(s, step) @@ -406,11 +265,7 @@ func HasBuildToRepoCommit() predicate.Build { // HasBuildToRepoCommitWith applies the HasEdge predicate on the "BuildToRepoCommit" edge with a given conditions (other predicates). func HasBuildToRepoCommitWith(preds ...predicate.RepoCommit) predicate.Build { return predicate.Build(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(BuildToRepoCommitInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, BuildToRepoCommitTable, BuildToRepoCommitColumn), - ) + step := newBuildToRepoCommitStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -424,7 +279,6 @@ func HasBuildToProvisionedNetwork() predicate.Build { return predicate.Build(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(BuildToProvisionedNetworkTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, true, BuildToProvisionedNetworkTable, BuildToProvisionedNetworkColumn), ) sqlgraph.HasNeighbors(s, step) @@ -434,11 +288,7 @@ func HasBuildToProvisionedNetwork() predicate.Build { // HasBuildToProvisionedNetworkWith applies the HasEdge predicate on the "BuildToProvisionedNetwork" edge with a given conditions (other predicates). func HasBuildToProvisionedNetworkWith(preds ...predicate.ProvisionedNetwork) predicate.Build { return predicate.Build(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(BuildToProvisionedNetworkInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, true, BuildToProvisionedNetworkTable, BuildToProvisionedNetworkColumn), - ) + step := newBuildToProvisionedNetworkStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -452,7 +302,6 @@ func HasBuildToTeam() predicate.Build { return predicate.Build(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(BuildToTeamTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, true, BuildToTeamTable, BuildToTeamColumn), ) sqlgraph.HasNeighbors(s, step) @@ -462,11 +311,7 @@ func HasBuildToTeam() predicate.Build { // HasBuildToTeamWith applies the HasEdge predicate on the "BuildToTeam" edge with a given conditions (other predicates). func HasBuildToTeamWith(preds ...predicate.Team) predicate.Build { return predicate.Build(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(BuildToTeamInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, true, BuildToTeamTable, BuildToTeamColumn), - ) + step := newBuildToTeamStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -480,7 +325,6 @@ func HasBuildToPlan() predicate.Build { return predicate.Build(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(BuildToPlanTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, true, BuildToPlanTable, BuildToPlanColumn), ) sqlgraph.HasNeighbors(s, step) @@ -490,11 +334,7 @@ func HasBuildToPlan() predicate.Build { // HasBuildToPlanWith applies the HasEdge predicate on the "BuildToPlan" edge with a given conditions (other predicates). func HasBuildToPlanWith(preds ...predicate.Plan) predicate.Build { return predicate.Build(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(BuildToPlanInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, true, BuildToPlanTable, BuildToPlanColumn), - ) + step := newBuildToPlanStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -508,7 +348,6 @@ func HasBuildToBuildCommits() predicate.Build { return predicate.Build(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(BuildToBuildCommitsTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, true, BuildToBuildCommitsTable, BuildToBuildCommitsColumn), ) sqlgraph.HasNeighbors(s, step) @@ -518,11 +357,7 @@ func HasBuildToBuildCommits() predicate.Build { // HasBuildToBuildCommitsWith applies the HasEdge predicate on the "BuildToBuildCommits" edge with a given conditions (other predicates). func HasBuildToBuildCommitsWith(preds ...predicate.BuildCommit) predicate.Build { return predicate.Build(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(BuildToBuildCommitsInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, true, BuildToBuildCommitsTable, BuildToBuildCommitsColumn), - ) + step := newBuildToBuildCommitsStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -536,7 +371,6 @@ func HasBuildToAdhocPlans() predicate.Build { return predicate.Build(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(BuildToAdhocPlansTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, true, BuildToAdhocPlansTable, BuildToAdhocPlansColumn), ) sqlgraph.HasNeighbors(s, step) @@ -546,11 +380,7 @@ func HasBuildToAdhocPlans() predicate.Build { // HasBuildToAdhocPlansWith applies the HasEdge predicate on the "BuildToAdhocPlans" edge with a given conditions (other predicates). func HasBuildToAdhocPlansWith(preds ...predicate.AdhocPlan) predicate.Build { return predicate.Build(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(BuildToAdhocPlansInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, true, BuildToAdhocPlansTable, BuildToAdhocPlansColumn), - ) + step := newBuildToAdhocPlansStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -564,7 +394,6 @@ func HasBuildToAgentStatuses() predicate.Build { return predicate.Build(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(BuildToAgentStatusesTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, true, BuildToAgentStatusesTable, BuildToAgentStatusesColumn), ) sqlgraph.HasNeighbors(s, step) @@ -574,11 +403,7 @@ func HasBuildToAgentStatuses() predicate.Build { // HasBuildToAgentStatusesWith applies the HasEdge predicate on the "BuildToAgentStatuses" edge with a given conditions (other predicates). func HasBuildToAgentStatusesWith(preds ...predicate.AgentStatus) predicate.Build { return predicate.Build(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(BuildToAgentStatusesInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, true, BuildToAgentStatusesTable, BuildToAgentStatusesColumn), - ) + step := newBuildToAgentStatusesStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -592,7 +417,6 @@ func HasBuildToServerTasks() predicate.Build { return predicate.Build(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(BuildToServerTasksTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, true, BuildToServerTasksTable, BuildToServerTasksColumn), ) sqlgraph.HasNeighbors(s, step) @@ -602,11 +426,7 @@ func HasBuildToServerTasks() predicate.Build { // HasBuildToServerTasksWith applies the HasEdge predicate on the "BuildToServerTasks" edge with a given conditions (other predicates). func HasBuildToServerTasksWith(preds ...predicate.ServerTask) predicate.Build { return predicate.Build(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(BuildToServerTasksInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, true, BuildToServerTasksTable, BuildToServerTasksColumn), - ) + step := newBuildToServerTasksStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -617,32 +437,15 @@ func HasBuildToServerTasksWith(preds ...predicate.ServerTask) predicate.Build { // And groups predicates with the AND operator between them. func And(predicates ...predicate.Build) predicate.Build { - return predicate.Build(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Build(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.Build) predicate.Build { - return predicate.Build(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Build(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.Build) predicate.Build { - return predicate.Build(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.Build(sql.NotPredicates(p)) } diff --git a/ent/build_create.go b/ent/build_create.go index af4784f7..04baaa30 100755 --- a/ent/build_create.go +++ b/ent/build_create.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -268,44 +268,8 @@ func (bc *BuildCreate) Mutation() *BuildMutation { // Save creates the Build in the database. func (bc *BuildCreate) Save(ctx context.Context) (*Build, error) { - var ( - err error - node *Build - ) bc.defaults() - if len(bc.hooks) == 0 { - if err = bc.check(); err != nil { - return nil, err - } - node, err = bc.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*BuildMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = bc.check(); err != nil { - return nil, err - } - bc.mutation = mutation - if node, err = bc.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(bc.hooks) - 1; i >= 0; i-- { - if bc.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = bc.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, bc.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, bc.sqlSave, bc.mutation, bc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -366,10 +330,13 @@ func (bc *BuildCreate) check() error { } func (bc *BuildCreate) sqlSave(ctx context.Context) (*Build, error) { + if err := bc.check(); err != nil { + return nil, err + } _node, _spec := bc.createSpec() if err := sqlgraph.CreateNode(ctx, bc.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } @@ -380,54 +347,34 @@ func (bc *BuildCreate) sqlSave(ctx context.Context) (*Build, error) { return nil, err } } + bc.mutation.id = &_node.ID + bc.mutation.done = true return _node, nil } func (bc *BuildCreate) createSpec() (*Build, *sqlgraph.CreateSpec) { var ( _node = &Build{config: bc.config} - _spec = &sqlgraph.CreateSpec{ - Table: build.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(build.Table, sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID)) ) if id, ok := bc.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } if value, ok := bc.mutation.Revision(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: build.FieldRevision, - }) + _spec.SetField(build.FieldRevision, field.TypeInt, value) _node.Revision = value } if value, ok := bc.mutation.EnvironmentRevision(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: build.FieldEnvironmentRevision, - }) + _spec.SetField(build.FieldEnvironmentRevision, field.TypeInt, value) _node.EnvironmentRevision = value } if value, ok := bc.mutation.Vars(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: build.FieldVars, - }) + _spec.SetField(build.FieldVars, field.TypeJSON, value) _node.Vars = value } if value, ok := bc.mutation.CompletedPlan(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: build.FieldCompletedPlan, - }) + _spec.SetField(build.FieldCompletedPlan, field.TypeBool, value) _node.CompletedPlan = value } if nodes := bc.mutation.BuildToStatusIDs(); len(nodes) > 0 { @@ -438,10 +385,7 @@ func (bc *BuildCreate) createSpec() (*Build, *sqlgraph.CreateSpec) { Columns: []string{build.BuildToStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -457,10 +401,7 @@ func (bc *BuildCreate) createSpec() (*Build, *sqlgraph.CreateSpec) { Columns: []string{build.BuildToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -477,10 +418,7 @@ func (bc *BuildCreate) createSpec() (*Build, *sqlgraph.CreateSpec) { Columns: []string{build.BuildToCompetitionColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: competition.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(competition.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -497,10 +435,7 @@ func (bc *BuildCreate) createSpec() (*Build, *sqlgraph.CreateSpec) { Columns: []string{build.BuildToLatestBuildCommitColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: buildcommit.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(buildcommit.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -517,10 +452,7 @@ func (bc *BuildCreate) createSpec() (*Build, *sqlgraph.CreateSpec) { Columns: []string{build.BuildToRepoCommitColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: repocommit.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(repocommit.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -537,10 +469,7 @@ func (bc *BuildCreate) createSpec() (*Build, *sqlgraph.CreateSpec) { Columns: []string{build.BuildToProvisionedNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionednetwork.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -556,10 +485,7 @@ func (bc *BuildCreate) createSpec() (*Build, *sqlgraph.CreateSpec) { Columns: []string{build.BuildToTeamColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: team.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(team.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -575,10 +501,7 @@ func (bc *BuildCreate) createSpec() (*Build, *sqlgraph.CreateSpec) { Columns: []string{build.BuildToPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -594,10 +517,7 @@ func (bc *BuildCreate) createSpec() (*Build, *sqlgraph.CreateSpec) { Columns: []string{build.BuildToBuildCommitsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: buildcommit.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(buildcommit.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -613,10 +533,7 @@ func (bc *BuildCreate) createSpec() (*Build, *sqlgraph.CreateSpec) { Columns: []string{build.BuildToAdhocPlansColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: adhocplan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(adhocplan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -632,10 +549,7 @@ func (bc *BuildCreate) createSpec() (*Build, *sqlgraph.CreateSpec) { Columns: []string{build.BuildToAgentStatusesColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: agentstatus.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(agentstatus.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -651,10 +565,7 @@ func (bc *BuildCreate) createSpec() (*Build, *sqlgraph.CreateSpec) { Columns: []string{build.BuildToServerTasksColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: servertask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(servertask.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -668,11 +579,15 @@ func (bc *BuildCreate) createSpec() (*Build, *sqlgraph.CreateSpec) { // BuildCreateBulk is the builder for creating many Build entities in bulk. type BuildCreateBulk struct { config + err error builders []*BuildCreate } // Save creates the Build entities in the database. func (bcb *BuildCreateBulk) Save(ctx context.Context) ([]*Build, error) { + if bcb.err != nil { + return nil, bcb.err + } specs := make([]*sqlgraph.CreateSpec, len(bcb.builders)) nodes := make([]*Build, len(bcb.builders)) mutators := make([]Mutator, len(bcb.builders)) @@ -689,8 +604,8 @@ func (bcb *BuildCreateBulk) Save(ctx context.Context) ([]*Build, error) { return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, bcb.builders[i+1].mutation) } else { @@ -698,7 +613,7 @@ func (bcb *BuildCreateBulk) Save(ctx context.Context) ([]*Build, error) { // Invoke the actual operation on the latest mutation in the chain. if err = sqlgraph.BatchCreate(ctx, bcb.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } } } diff --git a/ent/build_delete.go b/ent/build_delete.go index f8658ab3..40674ac4 100755 --- a/ent/build_delete.go +++ b/ent/build_delete.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (bd *BuildDelete) Where(ps ...predicate.Build) *BuildDelete { // Exec executes the deletion query and returns how many vertices were deleted. func (bd *BuildDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(bd.hooks) == 0 { - affected, err = bd.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*BuildMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - bd.mutation = mutation - affected, err = bd.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(bd.hooks) - 1; i >= 0; i-- { - if bd.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = bd.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, bd.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, bd.sqlExec, bd.mutation, bd.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (bd *BuildDelete) ExecX(ctx context.Context) int { } func (bd *BuildDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: build.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(build.Table, sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID)) if ps := bd.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -84,7 +48,12 @@ func (bd *BuildDelete) sqlExec(ctx context.Context) (int, error) { } } } - return sqlgraph.DeleteNodes(ctx, bd.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, bd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + bd.mutation.done = true + return affected, err } // BuildDeleteOne is the builder for deleting a single Build entity. @@ -92,6 +61,12 @@ type BuildDeleteOne struct { bd *BuildDelete } +// Where appends a list predicates to the BuildDelete builder. +func (bdo *BuildDeleteOne) Where(ps ...predicate.Build) *BuildDeleteOne { + bdo.bd.mutation.Where(ps...) + return bdo +} + // Exec executes the deletion query. func (bdo *BuildDeleteOne) Exec(ctx context.Context) error { n, err := bdo.bd.Exec(ctx) @@ -107,5 +82,7 @@ func (bdo *BuildDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (bdo *BuildDeleteOne) ExecX(ctx context.Context) { - bdo.bd.ExecX(ctx) + if err := bdo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/ent/build_query.go b/ent/build_query.go index 0da969fb..4b9790f0 100755 --- a/ent/build_query.go +++ b/ent/build_query.go @@ -1,11 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" "database/sql/driver" - "errors" "fmt" "math" @@ -31,26 +30,32 @@ import ( // BuildQuery is the builder for querying Build entities. type BuildQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string - predicates []predicate.Build - // eager-loading edges. - withBuildToStatus *StatusQuery - withBuildToEnvironment *EnvironmentQuery - withBuildToCompetition *CompetitionQuery - withBuildToLatestBuildCommit *BuildCommitQuery - withBuildToRepoCommit *RepoCommitQuery - withBuildToProvisionedNetwork *ProvisionedNetworkQuery - withBuildToTeam *TeamQuery - withBuildToPlan *PlanQuery - withBuildToBuildCommits *BuildCommitQuery - withBuildToAdhocPlans *AdhocPlanQuery - withBuildToAgentStatuses *AgentStatusQuery - withBuildToServerTasks *ServerTaskQuery - withFKs bool + ctx *QueryContext + order []build.OrderOption + inters []Interceptor + predicates []predicate.Build + withBuildToStatus *StatusQuery + withBuildToEnvironment *EnvironmentQuery + withBuildToCompetition *CompetitionQuery + withBuildToLatestBuildCommit *BuildCommitQuery + withBuildToRepoCommit *RepoCommitQuery + withBuildToProvisionedNetwork *ProvisionedNetworkQuery + withBuildToTeam *TeamQuery + withBuildToPlan *PlanQuery + withBuildToBuildCommits *BuildCommitQuery + withBuildToAdhocPlans *AdhocPlanQuery + withBuildToAgentStatuses *AgentStatusQuery + withBuildToServerTasks *ServerTaskQuery + withFKs bool + modifiers []func(*sql.Selector) + loadTotal []func(context.Context, []*Build) error + withNamedBuildToProvisionedNetwork map[string]*ProvisionedNetworkQuery + withNamedBuildToTeam map[string]*TeamQuery + withNamedBuildToPlan map[string]*PlanQuery + withNamedBuildToBuildCommits map[string]*BuildCommitQuery + withNamedBuildToAdhocPlans map[string]*AdhocPlanQuery + withNamedBuildToAgentStatuses map[string]*AgentStatusQuery + withNamedBuildToServerTasks map[string]*ServerTaskQuery // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -62,34 +67,34 @@ func (bq *BuildQuery) Where(ps ...predicate.Build) *BuildQuery { return bq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (bq *BuildQuery) Limit(limit int) *BuildQuery { - bq.limit = &limit + bq.ctx.Limit = &limit return bq } -// Offset adds an offset step to the query. +// Offset to start from. func (bq *BuildQuery) Offset(offset int) *BuildQuery { - bq.offset = &offset + bq.ctx.Offset = &offset return bq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (bq *BuildQuery) Unique(unique bool) *BuildQuery { - bq.unique = &unique + bq.ctx.Unique = &unique return bq } -// Order adds an order step to the query. -func (bq *BuildQuery) Order(o ...OrderFunc) *BuildQuery { +// Order specifies how the records should be ordered. +func (bq *BuildQuery) Order(o ...build.OrderOption) *BuildQuery { bq.order = append(bq.order, o...) return bq } // QueryBuildToStatus chains the current query on the "BuildToStatus" edge. func (bq *BuildQuery) QueryBuildToStatus() *StatusQuery { - query := &StatusQuery{config: bq.config} + query := (&StatusClient{config: bq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := bq.prepareQuery(ctx); err != nil { return nil, err @@ -111,7 +116,7 @@ func (bq *BuildQuery) QueryBuildToStatus() *StatusQuery { // QueryBuildToEnvironment chains the current query on the "BuildToEnvironment" edge. func (bq *BuildQuery) QueryBuildToEnvironment() *EnvironmentQuery { - query := &EnvironmentQuery{config: bq.config} + query := (&EnvironmentClient{config: bq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := bq.prepareQuery(ctx); err != nil { return nil, err @@ -133,7 +138,7 @@ func (bq *BuildQuery) QueryBuildToEnvironment() *EnvironmentQuery { // QueryBuildToCompetition chains the current query on the "BuildToCompetition" edge. func (bq *BuildQuery) QueryBuildToCompetition() *CompetitionQuery { - query := &CompetitionQuery{config: bq.config} + query := (&CompetitionClient{config: bq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := bq.prepareQuery(ctx); err != nil { return nil, err @@ -155,7 +160,7 @@ func (bq *BuildQuery) QueryBuildToCompetition() *CompetitionQuery { // QueryBuildToLatestBuildCommit chains the current query on the "BuildToLatestBuildCommit" edge. func (bq *BuildQuery) QueryBuildToLatestBuildCommit() *BuildCommitQuery { - query := &BuildCommitQuery{config: bq.config} + query := (&BuildCommitClient{config: bq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := bq.prepareQuery(ctx); err != nil { return nil, err @@ -177,7 +182,7 @@ func (bq *BuildQuery) QueryBuildToLatestBuildCommit() *BuildCommitQuery { // QueryBuildToRepoCommit chains the current query on the "BuildToRepoCommit" edge. func (bq *BuildQuery) QueryBuildToRepoCommit() *RepoCommitQuery { - query := &RepoCommitQuery{config: bq.config} + query := (&RepoCommitClient{config: bq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := bq.prepareQuery(ctx); err != nil { return nil, err @@ -199,7 +204,7 @@ func (bq *BuildQuery) QueryBuildToRepoCommit() *RepoCommitQuery { // QueryBuildToProvisionedNetwork chains the current query on the "BuildToProvisionedNetwork" edge. func (bq *BuildQuery) QueryBuildToProvisionedNetwork() *ProvisionedNetworkQuery { - query := &ProvisionedNetworkQuery{config: bq.config} + query := (&ProvisionedNetworkClient{config: bq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := bq.prepareQuery(ctx); err != nil { return nil, err @@ -221,7 +226,7 @@ func (bq *BuildQuery) QueryBuildToProvisionedNetwork() *ProvisionedNetworkQuery // QueryBuildToTeam chains the current query on the "BuildToTeam" edge. func (bq *BuildQuery) QueryBuildToTeam() *TeamQuery { - query := &TeamQuery{config: bq.config} + query := (&TeamClient{config: bq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := bq.prepareQuery(ctx); err != nil { return nil, err @@ -243,7 +248,7 @@ func (bq *BuildQuery) QueryBuildToTeam() *TeamQuery { // QueryBuildToPlan chains the current query on the "BuildToPlan" edge. func (bq *BuildQuery) QueryBuildToPlan() *PlanQuery { - query := &PlanQuery{config: bq.config} + query := (&PlanClient{config: bq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := bq.prepareQuery(ctx); err != nil { return nil, err @@ -265,7 +270,7 @@ func (bq *BuildQuery) QueryBuildToPlan() *PlanQuery { // QueryBuildToBuildCommits chains the current query on the "BuildToBuildCommits" edge. func (bq *BuildQuery) QueryBuildToBuildCommits() *BuildCommitQuery { - query := &BuildCommitQuery{config: bq.config} + query := (&BuildCommitClient{config: bq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := bq.prepareQuery(ctx); err != nil { return nil, err @@ -287,7 +292,7 @@ func (bq *BuildQuery) QueryBuildToBuildCommits() *BuildCommitQuery { // QueryBuildToAdhocPlans chains the current query on the "BuildToAdhocPlans" edge. func (bq *BuildQuery) QueryBuildToAdhocPlans() *AdhocPlanQuery { - query := &AdhocPlanQuery{config: bq.config} + query := (&AdhocPlanClient{config: bq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := bq.prepareQuery(ctx); err != nil { return nil, err @@ -309,7 +314,7 @@ func (bq *BuildQuery) QueryBuildToAdhocPlans() *AdhocPlanQuery { // QueryBuildToAgentStatuses chains the current query on the "BuildToAgentStatuses" edge. func (bq *BuildQuery) QueryBuildToAgentStatuses() *AgentStatusQuery { - query := &AgentStatusQuery{config: bq.config} + query := (&AgentStatusClient{config: bq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := bq.prepareQuery(ctx); err != nil { return nil, err @@ -331,7 +336,7 @@ func (bq *BuildQuery) QueryBuildToAgentStatuses() *AgentStatusQuery { // QueryBuildToServerTasks chains the current query on the "BuildToServerTasks" edge. func (bq *BuildQuery) QueryBuildToServerTasks() *ServerTaskQuery { - query := &ServerTaskQuery{config: bq.config} + query := (&ServerTaskClient{config: bq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := bq.prepareQuery(ctx); err != nil { return nil, err @@ -354,7 +359,7 @@ func (bq *BuildQuery) QueryBuildToServerTasks() *ServerTaskQuery { // First returns the first Build entity from the query. // Returns a *NotFoundError when no Build was found. func (bq *BuildQuery) First(ctx context.Context) (*Build, error) { - nodes, err := bq.Limit(1).All(ctx) + nodes, err := bq.Limit(1).All(setContextOp(ctx, bq.ctx, "First")) if err != nil { return nil, err } @@ -377,7 +382,7 @@ func (bq *BuildQuery) FirstX(ctx context.Context) *Build { // Returns a *NotFoundError when no Build ID was found. func (bq *BuildQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = bq.Limit(1).IDs(ctx); err != nil { + if ids, err = bq.Limit(1).IDs(setContextOp(ctx, bq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -400,7 +405,7 @@ func (bq *BuildQuery) FirstIDX(ctx context.Context) uuid.UUID { // Returns a *NotSingularError when more than one Build entity is found. // Returns a *NotFoundError when no Build entities are found. func (bq *BuildQuery) Only(ctx context.Context) (*Build, error) { - nodes, err := bq.Limit(2).All(ctx) + nodes, err := bq.Limit(2).All(setContextOp(ctx, bq.ctx, "Only")) if err != nil { return nil, err } @@ -428,7 +433,7 @@ func (bq *BuildQuery) OnlyX(ctx context.Context) *Build { // Returns a *NotFoundError when no entities are found. func (bq *BuildQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = bq.Limit(2).IDs(ctx); err != nil { + if ids, err = bq.Limit(2).IDs(setContextOp(ctx, bq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -453,10 +458,12 @@ func (bq *BuildQuery) OnlyIDX(ctx context.Context) uuid.UUID { // All executes the query and returns a list of Builds. func (bq *BuildQuery) All(ctx context.Context) ([]*Build, error) { + ctx = setContextOp(ctx, bq.ctx, "All") if err := bq.prepareQuery(ctx); err != nil { return nil, err } - return bq.sqlAll(ctx) + qr := querierAll[[]*Build, *BuildQuery]() + return withInterceptors[[]*Build](ctx, bq, qr, bq.inters) } // AllX is like All, but panics if an error occurs. @@ -469,9 +476,12 @@ func (bq *BuildQuery) AllX(ctx context.Context) []*Build { } // IDs executes the query and returns a list of Build IDs. -func (bq *BuildQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { - var ids []uuid.UUID - if err := bq.Select(build.FieldID).Scan(ctx, &ids); err != nil { +func (bq *BuildQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if bq.ctx.Unique == nil && bq.path != nil { + bq.Unique(true) + } + ctx = setContextOp(ctx, bq.ctx, "IDs") + if err = bq.Select(build.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -488,10 +498,11 @@ func (bq *BuildQuery) IDsX(ctx context.Context) []uuid.UUID { // Count returns the count of the given query. func (bq *BuildQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, bq.ctx, "Count") if err := bq.prepareQuery(ctx); err != nil { return 0, err } - return bq.sqlCount(ctx) + return withInterceptors[int](ctx, bq, querierCount[*BuildQuery](), bq.inters) } // CountX is like Count, but panics if an error occurs. @@ -505,10 +516,15 @@ func (bq *BuildQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (bq *BuildQuery) Exist(ctx context.Context) (bool, error) { - if err := bq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, bq.ctx, "Exist") + switch _, err := bq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return bq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -528,9 +544,9 @@ func (bq *BuildQuery) Clone() *BuildQuery { } return &BuildQuery{ config: bq.config, - limit: bq.limit, - offset: bq.offset, - order: append([]OrderFunc{}, bq.order...), + ctx: bq.ctx.Clone(), + order: append([]build.OrderOption{}, bq.order...), + inters: append([]Interceptor{}, bq.inters...), predicates: append([]predicate.Build{}, bq.predicates...), withBuildToStatus: bq.withBuildToStatus.Clone(), withBuildToEnvironment: bq.withBuildToEnvironment.Clone(), @@ -545,16 +561,15 @@ func (bq *BuildQuery) Clone() *BuildQuery { withBuildToAgentStatuses: bq.withBuildToAgentStatuses.Clone(), withBuildToServerTasks: bq.withBuildToServerTasks.Clone(), // clone intermediate query. - sql: bq.sql.Clone(), - path: bq.path, - unique: bq.unique, + sql: bq.sql.Clone(), + path: bq.path, } } // WithBuildToStatus tells the query-builder to eager-load the nodes that are connected to // the "BuildToStatus" edge. The optional arguments are used to configure the query builder of the edge. func (bq *BuildQuery) WithBuildToStatus(opts ...func(*StatusQuery)) *BuildQuery { - query := &StatusQuery{config: bq.config} + query := (&StatusClient{config: bq.config}).Query() for _, opt := range opts { opt(query) } @@ -565,7 +580,7 @@ func (bq *BuildQuery) WithBuildToStatus(opts ...func(*StatusQuery)) *BuildQuery // WithBuildToEnvironment tells the query-builder to eager-load the nodes that are connected to // the "BuildToEnvironment" edge. The optional arguments are used to configure the query builder of the edge. func (bq *BuildQuery) WithBuildToEnvironment(opts ...func(*EnvironmentQuery)) *BuildQuery { - query := &EnvironmentQuery{config: bq.config} + query := (&EnvironmentClient{config: bq.config}).Query() for _, opt := range opts { opt(query) } @@ -576,7 +591,7 @@ func (bq *BuildQuery) WithBuildToEnvironment(opts ...func(*EnvironmentQuery)) *B // WithBuildToCompetition tells the query-builder to eager-load the nodes that are connected to // the "BuildToCompetition" edge. The optional arguments are used to configure the query builder of the edge. func (bq *BuildQuery) WithBuildToCompetition(opts ...func(*CompetitionQuery)) *BuildQuery { - query := &CompetitionQuery{config: bq.config} + query := (&CompetitionClient{config: bq.config}).Query() for _, opt := range opts { opt(query) } @@ -587,7 +602,7 @@ func (bq *BuildQuery) WithBuildToCompetition(opts ...func(*CompetitionQuery)) *B // WithBuildToLatestBuildCommit tells the query-builder to eager-load the nodes that are connected to // the "BuildToLatestBuildCommit" edge. The optional arguments are used to configure the query builder of the edge. func (bq *BuildQuery) WithBuildToLatestBuildCommit(opts ...func(*BuildCommitQuery)) *BuildQuery { - query := &BuildCommitQuery{config: bq.config} + query := (&BuildCommitClient{config: bq.config}).Query() for _, opt := range opts { opt(query) } @@ -598,7 +613,7 @@ func (bq *BuildQuery) WithBuildToLatestBuildCommit(opts ...func(*BuildCommitQuer // WithBuildToRepoCommit tells the query-builder to eager-load the nodes that are connected to // the "BuildToRepoCommit" edge. The optional arguments are used to configure the query builder of the edge. func (bq *BuildQuery) WithBuildToRepoCommit(opts ...func(*RepoCommitQuery)) *BuildQuery { - query := &RepoCommitQuery{config: bq.config} + query := (&RepoCommitClient{config: bq.config}).Query() for _, opt := range opts { opt(query) } @@ -609,7 +624,7 @@ func (bq *BuildQuery) WithBuildToRepoCommit(opts ...func(*RepoCommitQuery)) *Bui // WithBuildToProvisionedNetwork tells the query-builder to eager-load the nodes that are connected to // the "BuildToProvisionedNetwork" edge. The optional arguments are used to configure the query builder of the edge. func (bq *BuildQuery) WithBuildToProvisionedNetwork(opts ...func(*ProvisionedNetworkQuery)) *BuildQuery { - query := &ProvisionedNetworkQuery{config: bq.config} + query := (&ProvisionedNetworkClient{config: bq.config}).Query() for _, opt := range opts { opt(query) } @@ -620,7 +635,7 @@ func (bq *BuildQuery) WithBuildToProvisionedNetwork(opts ...func(*ProvisionedNet // WithBuildToTeam tells the query-builder to eager-load the nodes that are connected to // the "BuildToTeam" edge. The optional arguments are used to configure the query builder of the edge. func (bq *BuildQuery) WithBuildToTeam(opts ...func(*TeamQuery)) *BuildQuery { - query := &TeamQuery{config: bq.config} + query := (&TeamClient{config: bq.config}).Query() for _, opt := range opts { opt(query) } @@ -631,7 +646,7 @@ func (bq *BuildQuery) WithBuildToTeam(opts ...func(*TeamQuery)) *BuildQuery { // WithBuildToPlan tells the query-builder to eager-load the nodes that are connected to // the "BuildToPlan" edge. The optional arguments are used to configure the query builder of the edge. func (bq *BuildQuery) WithBuildToPlan(opts ...func(*PlanQuery)) *BuildQuery { - query := &PlanQuery{config: bq.config} + query := (&PlanClient{config: bq.config}).Query() for _, opt := range opts { opt(query) } @@ -642,7 +657,7 @@ func (bq *BuildQuery) WithBuildToPlan(opts ...func(*PlanQuery)) *BuildQuery { // WithBuildToBuildCommits tells the query-builder to eager-load the nodes that are connected to // the "BuildToBuildCommits" edge. The optional arguments are used to configure the query builder of the edge. func (bq *BuildQuery) WithBuildToBuildCommits(opts ...func(*BuildCommitQuery)) *BuildQuery { - query := &BuildCommitQuery{config: bq.config} + query := (&BuildCommitClient{config: bq.config}).Query() for _, opt := range opts { opt(query) } @@ -653,7 +668,7 @@ func (bq *BuildQuery) WithBuildToBuildCommits(opts ...func(*BuildCommitQuery)) * // WithBuildToAdhocPlans tells the query-builder to eager-load the nodes that are connected to // the "BuildToAdhocPlans" edge. The optional arguments are used to configure the query builder of the edge. func (bq *BuildQuery) WithBuildToAdhocPlans(opts ...func(*AdhocPlanQuery)) *BuildQuery { - query := &AdhocPlanQuery{config: bq.config} + query := (&AdhocPlanClient{config: bq.config}).Query() for _, opt := range opts { opt(query) } @@ -664,7 +679,7 @@ func (bq *BuildQuery) WithBuildToAdhocPlans(opts ...func(*AdhocPlanQuery)) *Buil // WithBuildToAgentStatuses tells the query-builder to eager-load the nodes that are connected to // the "BuildToAgentStatuses" edge. The optional arguments are used to configure the query builder of the edge. func (bq *BuildQuery) WithBuildToAgentStatuses(opts ...func(*AgentStatusQuery)) *BuildQuery { - query := &AgentStatusQuery{config: bq.config} + query := (&AgentStatusClient{config: bq.config}).Query() for _, opt := range opts { opt(query) } @@ -675,7 +690,7 @@ func (bq *BuildQuery) WithBuildToAgentStatuses(opts ...func(*AgentStatusQuery)) // WithBuildToServerTasks tells the query-builder to eager-load the nodes that are connected to // the "BuildToServerTasks" edge. The optional arguments are used to configure the query builder of the edge. func (bq *BuildQuery) WithBuildToServerTasks(opts ...func(*ServerTaskQuery)) *BuildQuery { - query := &ServerTaskQuery{config: bq.config} + query := (&ServerTaskClient{config: bq.config}).Query() for _, opt := range opts { opt(query) } @@ -697,17 +712,13 @@ func (bq *BuildQuery) WithBuildToServerTasks(opts ...func(*ServerTaskQuery)) *Bu // GroupBy(build.FieldRevision). // Aggregate(ent.Count()). // Scan(ctx, &v) -// func (bq *BuildQuery) GroupBy(field string, fields ...string) *BuildGroupBy { - group := &BuildGroupBy{config: bq.config} - group.fields = append([]string{field}, fields...) - group.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := bq.prepareQuery(ctx); err != nil { - return nil, err - } - return bq.sqlQuery(ctx), nil - } - return group + bq.ctx.Fields = append([]string{field}, fields...) + grbuild := &BuildGroupBy{build: bq} + grbuild.flds = &bq.ctx.Fields + grbuild.label = build.Label + grbuild.scan = grbuild.Scan + return grbuild } // Select allows the selection one or more fields/columns for the given query, @@ -722,14 +733,31 @@ func (bq *BuildQuery) GroupBy(field string, fields ...string) *BuildGroupBy { // client.Build.Query(). // Select(build.FieldRevision). // Scan(ctx, &v) -// func (bq *BuildQuery) Select(fields ...string) *BuildSelect { - bq.fields = append(bq.fields, fields...) - return &BuildSelect{BuildQuery: bq} + bq.ctx.Fields = append(bq.ctx.Fields, fields...) + sbuild := &BuildSelect{BuildQuery: bq} + sbuild.label = build.Label + sbuild.flds, sbuild.scan = &bq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a BuildSelect configured with the given aggregations. +func (bq *BuildQuery) Aggregate(fns ...AggregateFunc) *BuildSelect { + return bq.Select().Aggregate(fns...) } func (bq *BuildQuery) prepareQuery(ctx context.Context) error { - for _, f := range bq.fields { + for _, inter := range bq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, bq); err != nil { + return err + } + } + } + for _, f := range bq.ctx.Fields { if !build.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -744,7 +772,7 @@ func (bq *BuildQuery) prepareQuery(ctx context.Context) error { return nil } -func (bq *BuildQuery) sqlAll(ctx context.Context) ([]*Build, error) { +func (bq *BuildQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Build, error) { var ( nodes = []*Build{} withFKs = bq.withFKs @@ -770,410 +798,560 @@ func (bq *BuildQuery) sqlAll(ctx context.Context) ([]*Build, error) { if withFKs { _spec.Node.Columns = append(_spec.Node.Columns, build.ForeignKeys...) } - _spec.ScanValues = func(columns []string) ([]interface{}, error) { + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Build).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { node := &Build{config: bq.config} nodes = append(nodes, node) - return node.scanValues(columns) - } - _spec.Assign = func(columns []string, values []interface{}) error { - if len(nodes) == 0 { - return fmt.Errorf("ent: Assign called without calling ScanValues") - } - node := nodes[len(nodes)-1] node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(bq.modifiers) > 0 { + _spec.Modifiers = bq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } if err := sqlgraph.QueryNodes(ctx, bq.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := bq.withBuildToStatus; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*Build) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - } - query.withFKs = true - query.Where(predicate.Status(func(s *sql.Selector) { - s.Where(sql.InValues(build.BuildToStatusColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { + if err := bq.loadBuildToStatus(ctx, query, nodes, nil, + func(n *Build, e *Status) { n.Edges.BuildToStatus = e }); err != nil { return nil, err } - for _, n := range neighbors { - fk := n.build_build_to_status - if fk == nil { - return nil, fmt.Errorf(`foreign-key "build_build_to_status" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "build_build_to_status" returned %v for node %v`, *fk, n.ID) - } - node.Edges.BuildToStatus = n - } } - if query := bq.withBuildToEnvironment; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*Build) - for i := range nodes { - if nodes[i].build_build_to_environment == nil { - continue - } - fk := *nodes[i].build_build_to_environment - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + if err := bq.loadBuildToEnvironment(ctx, query, nodes, nil, + func(n *Build, e *Environment) { n.Edges.BuildToEnvironment = e }); err != nil { + return nil, err } - query.Where(environment.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + } + if query := bq.withBuildToCompetition; query != nil { + if err := bq.loadBuildToCompetition(ctx, query, nodes, nil, + func(n *Build, e *Competition) { n.Edges.BuildToCompetition = e }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "build_build_to_environment" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.BuildToEnvironment = n - } + } + if query := bq.withBuildToLatestBuildCommit; query != nil { + if err := bq.loadBuildToLatestBuildCommit(ctx, query, nodes, nil, + func(n *Build, e *BuildCommit) { n.Edges.BuildToLatestBuildCommit = e }); err != nil { + return nil, err } } - - if query := bq.withBuildToCompetition; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*Build) - for i := range nodes { - if nodes[i].build_build_to_competition == nil { - continue - } - fk := *nodes[i].build_build_to_competition - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + if query := bq.withBuildToRepoCommit; query != nil { + if err := bq.loadBuildToRepoCommit(ctx, query, nodes, nil, + func(n *Build, e *RepoCommit) { n.Edges.BuildToRepoCommit = e }); err != nil { + return nil, err } - query.Where(competition.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + } + if query := bq.withBuildToProvisionedNetwork; query != nil { + if err := bq.loadBuildToProvisionedNetwork(ctx, query, nodes, + func(n *Build) { n.Edges.BuildToProvisionedNetwork = []*ProvisionedNetwork{} }, + func(n *Build, e *ProvisionedNetwork) { + n.Edges.BuildToProvisionedNetwork = append(n.Edges.BuildToProvisionedNetwork, e) + }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "build_build_to_competition" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.BuildToCompetition = n - } + } + if query := bq.withBuildToTeam; query != nil { + if err := bq.loadBuildToTeam(ctx, query, nodes, + func(n *Build) { n.Edges.BuildToTeam = []*Team{} }, + func(n *Build, e *Team) { n.Edges.BuildToTeam = append(n.Edges.BuildToTeam, e) }); err != nil { + return nil, err } } - - if query := bq.withBuildToLatestBuildCommit; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*Build) - for i := range nodes { - if nodes[i].build_build_to_latest_build_commit == nil { - continue - } - fk := *nodes[i].build_build_to_latest_build_commit - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + if query := bq.withBuildToPlan; query != nil { + if err := bq.loadBuildToPlan(ctx, query, nodes, + func(n *Build) { n.Edges.BuildToPlan = []*Plan{} }, + func(n *Build, e *Plan) { n.Edges.BuildToPlan = append(n.Edges.BuildToPlan, e) }); err != nil { + return nil, err } - query.Where(buildcommit.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + } + if query := bq.withBuildToBuildCommits; query != nil { + if err := bq.loadBuildToBuildCommits(ctx, query, nodes, + func(n *Build) { n.Edges.BuildToBuildCommits = []*BuildCommit{} }, + func(n *Build, e *BuildCommit) { n.Edges.BuildToBuildCommits = append(n.Edges.BuildToBuildCommits, e) }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "build_build_to_latest_build_commit" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.BuildToLatestBuildCommit = n - } + } + if query := bq.withBuildToAdhocPlans; query != nil { + if err := bq.loadBuildToAdhocPlans(ctx, query, nodes, + func(n *Build) { n.Edges.BuildToAdhocPlans = []*AdhocPlan{} }, + func(n *Build, e *AdhocPlan) { n.Edges.BuildToAdhocPlans = append(n.Edges.BuildToAdhocPlans, e) }); err != nil { + return nil, err } } - - if query := bq.withBuildToRepoCommit; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*Build) - for i := range nodes { - if nodes[i].build_build_to_repo_commit == nil { - continue - } - fk := *nodes[i].build_build_to_repo_commit - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + if query := bq.withBuildToAgentStatuses; query != nil { + if err := bq.loadBuildToAgentStatuses(ctx, query, nodes, + func(n *Build) { n.Edges.BuildToAgentStatuses = []*AgentStatus{} }, + func(n *Build, e *AgentStatus) { n.Edges.BuildToAgentStatuses = append(n.Edges.BuildToAgentStatuses, e) }); err != nil { + return nil, err } - query.Where(repocommit.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + } + if query := bq.withBuildToServerTasks; query != nil { + if err := bq.loadBuildToServerTasks(ctx, query, nodes, + func(n *Build) { n.Edges.BuildToServerTasks = []*ServerTask{} }, + func(n *Build, e *ServerTask) { n.Edges.BuildToServerTasks = append(n.Edges.BuildToServerTasks, e) }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "build_build_to_repo_commit" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.BuildToRepoCommit = n - } + } + for name, query := range bq.withNamedBuildToProvisionedNetwork { + if err := bq.loadBuildToProvisionedNetwork(ctx, query, nodes, + func(n *Build) { n.appendNamedBuildToProvisionedNetwork(name) }, + func(n *Build, e *ProvisionedNetwork) { n.appendNamedBuildToProvisionedNetwork(name, e) }); err != nil { + return nil, err } } - - if query := bq.withBuildToProvisionedNetwork; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*Build) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.BuildToProvisionedNetwork = []*ProvisionedNetwork{} - } - query.withFKs = true - query.Where(predicate.ProvisionedNetwork(func(s *sql.Selector) { - s.Where(sql.InValues(build.BuildToProvisionedNetworkColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { + for name, query := range bq.withNamedBuildToTeam { + if err := bq.loadBuildToTeam(ctx, query, nodes, + func(n *Build) { n.appendNamedBuildToTeam(name) }, + func(n *Build, e *Team) { n.appendNamedBuildToTeam(name, e) }); err != nil { return nil, err } - for _, n := range neighbors { - fk := n.provisioned_network_provisioned_network_to_build - if fk == nil { - return nil, fmt.Errorf(`foreign-key "provisioned_network_provisioned_network_to_build" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "provisioned_network_provisioned_network_to_build" returned %v for node %v`, *fk, n.ID) - } - node.Edges.BuildToProvisionedNetwork = append(node.Edges.BuildToProvisionedNetwork, n) + } + for name, query := range bq.withNamedBuildToPlan { + if err := bq.loadBuildToPlan(ctx, query, nodes, + func(n *Build) { n.appendNamedBuildToPlan(name) }, + func(n *Build, e *Plan) { n.appendNamedBuildToPlan(name, e) }); err != nil { + return nil, err } } - - if query := bq.withBuildToTeam; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*Build) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.BuildToTeam = []*Team{} - } - query.withFKs = true - query.Where(predicate.Team(func(s *sql.Selector) { - s.Where(sql.InValues(build.BuildToTeamColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { + for name, query := range bq.withNamedBuildToBuildCommits { + if err := bq.loadBuildToBuildCommits(ctx, query, nodes, + func(n *Build) { n.appendNamedBuildToBuildCommits(name) }, + func(n *Build, e *BuildCommit) { n.appendNamedBuildToBuildCommits(name, e) }); err != nil { return nil, err } - for _, n := range neighbors { - fk := n.team_team_to_build - if fk == nil { - return nil, fmt.Errorf(`foreign-key "team_team_to_build" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "team_team_to_build" returned %v for node %v`, *fk, n.ID) - } - node.Edges.BuildToTeam = append(node.Edges.BuildToTeam, n) + } + for name, query := range bq.withNamedBuildToAdhocPlans { + if err := bq.loadBuildToAdhocPlans(ctx, query, nodes, + func(n *Build) { n.appendNamedBuildToAdhocPlans(name) }, + func(n *Build, e *AdhocPlan) { n.appendNamedBuildToAdhocPlans(name, e) }); err != nil { + return nil, err } } - - if query := bq.withBuildToPlan; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*Build) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.BuildToPlan = []*Plan{} - } - query.withFKs = true - query.Where(predicate.Plan(func(s *sql.Selector) { - s.Where(sql.InValues(build.BuildToPlanColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { + for name, query := range bq.withNamedBuildToAgentStatuses { + if err := bq.loadBuildToAgentStatuses(ctx, query, nodes, + func(n *Build) { n.appendNamedBuildToAgentStatuses(name) }, + func(n *Build, e *AgentStatus) { n.appendNamedBuildToAgentStatuses(name, e) }); err != nil { return nil, err } - for _, n := range neighbors { - fk := n.plan_plan_to_build - if fk == nil { - return nil, fmt.Errorf(`foreign-key "plan_plan_to_build" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "plan_plan_to_build" returned %v for node %v`, *fk, n.ID) - } - node.Edges.BuildToPlan = append(node.Edges.BuildToPlan, n) + } + for name, query := range bq.withNamedBuildToServerTasks { + if err := bq.loadBuildToServerTasks(ctx, query, nodes, + func(n *Build) { n.appendNamedBuildToServerTasks(name) }, + func(n *Build, e *ServerTask) { n.appendNamedBuildToServerTasks(name, e) }); err != nil { + return nil, err } } + for i := range bq.loadTotal { + if err := bq.loadTotal[i](ctx, nodes); err != nil { + return nil, err + } + } + return nodes, nil +} - if query := bq.withBuildToBuildCommits; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*Build) +func (bq *BuildQuery) loadBuildToStatus(ctx context.Context, query *StatusQuery, nodes []*Build, init func(*Build), assign func(*Build, *Status)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Build) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + } + query.withFKs = true + query.Where(predicate.Status(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(build.BuildToStatusColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.build_build_to_status + if fk == nil { + return fmt.Errorf(`foreign-key "build_build_to_status" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "build_build_to_status" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} +func (bq *BuildQuery) loadBuildToEnvironment(ctx context.Context, query *EnvironmentQuery, nodes []*Build, init func(*Build), assign func(*Build, *Environment)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*Build) + for i := range nodes { + if nodes[i].build_build_to_environment == nil { + continue + } + fk := *nodes[i].build_build_to_environment + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(environment.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "build_build_to_environment" returned %v`, n.ID) + } for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.BuildToBuildCommits = []*BuildCommit{} - } - query.withFKs = true - query.Where(predicate.BuildCommit(func(s *sql.Selector) { - s.Where(sql.InValues(build.BuildToBuildCommitsColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + assign(nodes[i], n) } - for _, n := range neighbors { - fk := n.build_commit_build_commit_to_build - if fk == nil { - return nil, fmt.Errorf(`foreign-key "build_commit_build_commit_to_build" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "build_commit_build_commit_to_build" returned %v for node %v`, *fk, n.ID) - } - node.Edges.BuildToBuildCommits = append(node.Edges.BuildToBuildCommits, n) + } + return nil +} +func (bq *BuildQuery) loadBuildToCompetition(ctx context.Context, query *CompetitionQuery, nodes []*Build, init func(*Build), assign func(*Build, *Competition)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*Build) + for i := range nodes { + if nodes[i].build_build_to_competition == nil { + continue + } + fk := *nodes[i].build_build_to_competition + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) } + nodeids[fk] = append(nodeids[fk], nodes[i]) } - - if query := bq.withBuildToAdhocPlans; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*Build) + if len(ids) == 0 { + return nil + } + query.Where(competition.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "build_build_to_competition" returned %v`, n.ID) + } for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.BuildToAdhocPlans = []*AdhocPlan{} - } - query.withFKs = true - query.Where(predicate.AdhocPlan(func(s *sql.Selector) { - s.Where(sql.InValues(build.BuildToAdhocPlansColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + assign(nodes[i], n) } - for _, n := range neighbors { - fk := n.adhoc_plan_adhoc_plan_to_build - if fk == nil { - return nil, fmt.Errorf(`foreign-key "adhoc_plan_adhoc_plan_to_build" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "adhoc_plan_adhoc_plan_to_build" returned %v for node %v`, *fk, n.ID) - } - node.Edges.BuildToAdhocPlans = append(node.Edges.BuildToAdhocPlans, n) + } + return nil +} +func (bq *BuildQuery) loadBuildToLatestBuildCommit(ctx context.Context, query *BuildCommitQuery, nodes []*Build, init func(*Build), assign func(*Build, *BuildCommit)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*Build) + for i := range nodes { + if nodes[i].build_build_to_latest_build_commit == nil { + continue + } + fk := *nodes[i].build_build_to_latest_build_commit + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) } + nodeids[fk] = append(nodeids[fk], nodes[i]) } - - if query := bq.withBuildToAgentStatuses; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*Build) + if len(ids) == 0 { + return nil + } + query.Where(buildcommit.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "build_build_to_latest_build_commit" returned %v`, n.ID) + } for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.BuildToAgentStatuses = []*AgentStatus{} - } - query.withFKs = true - query.Where(predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.InValues(build.BuildToAgentStatusesColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + assign(nodes[i], n) } - for _, n := range neighbors { - fk := n.agent_status_agent_status_to_build - if fk == nil { - return nil, fmt.Errorf(`foreign-key "agent_status_agent_status_to_build" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "agent_status_agent_status_to_build" returned %v for node %v`, *fk, n.ID) - } - node.Edges.BuildToAgentStatuses = append(node.Edges.BuildToAgentStatuses, n) + } + return nil +} +func (bq *BuildQuery) loadBuildToRepoCommit(ctx context.Context, query *RepoCommitQuery, nodes []*Build, init func(*Build), assign func(*Build, *RepoCommit)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*Build) + for i := range nodes { + if nodes[i].build_build_to_repo_commit == nil { + continue } + fk := *nodes[i].build_build_to_repo_commit + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) } - - if query := bq.withBuildToServerTasks; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*Build) + if len(ids) == 0 { + return nil + } + query.Where(repocommit.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "build_build_to_repo_commit" returned %v`, n.ID) + } for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.BuildToServerTasks = []*ServerTask{} - } - query.withFKs = true - query.Where(predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.InValues(build.BuildToServerTasksColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + assign(nodes[i], n) } - for _, n := range neighbors { - fk := n.server_task_server_task_to_build - if fk == nil { - return nil, fmt.Errorf(`foreign-key "server_task_server_task_to_build" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "server_task_server_task_to_build" returned %v for node %v`, *fk, n.ID) - } - node.Edges.BuildToServerTasks = append(node.Edges.BuildToServerTasks, n) + } + return nil +} +func (bq *BuildQuery) loadBuildToProvisionedNetwork(ctx context.Context, query *ProvisionedNetworkQuery, nodes []*Build, init func(*Build), assign func(*Build, *ProvisionedNetwork)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Build) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.ProvisionedNetwork(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(build.BuildToProvisionedNetworkColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.provisioned_network_provisioned_network_to_build + if fk == nil { + return fmt.Errorf(`foreign-key "provisioned_network_provisioned_network_to_build" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "provisioned_network_provisioned_network_to_build" returned %v for node %v`, *fk, n.ID) } + assign(node, n) } - - return nodes, nil + return nil +} +func (bq *BuildQuery) loadBuildToTeam(ctx context.Context, query *TeamQuery, nodes []*Build, init func(*Build), assign func(*Build, *Team)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Build) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.Team(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(build.BuildToTeamColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.team_team_to_build + if fk == nil { + return fmt.Errorf(`foreign-key "team_team_to_build" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "team_team_to_build" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} +func (bq *BuildQuery) loadBuildToPlan(ctx context.Context, query *PlanQuery, nodes []*Build, init func(*Build), assign func(*Build, *Plan)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Build) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.Plan(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(build.BuildToPlanColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.plan_plan_to_build + if fk == nil { + return fmt.Errorf(`foreign-key "plan_plan_to_build" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "plan_plan_to_build" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} +func (bq *BuildQuery) loadBuildToBuildCommits(ctx context.Context, query *BuildCommitQuery, nodes []*Build, init func(*Build), assign func(*Build, *BuildCommit)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Build) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.BuildCommit(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(build.BuildToBuildCommitsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.build_commit_build_commit_to_build + if fk == nil { + return fmt.Errorf(`foreign-key "build_commit_build_commit_to_build" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "build_commit_build_commit_to_build" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} +func (bq *BuildQuery) loadBuildToAdhocPlans(ctx context.Context, query *AdhocPlanQuery, nodes []*Build, init func(*Build), assign func(*Build, *AdhocPlan)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Build) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.AdhocPlan(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(build.BuildToAdhocPlansColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.adhoc_plan_adhoc_plan_to_build + if fk == nil { + return fmt.Errorf(`foreign-key "adhoc_plan_adhoc_plan_to_build" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "adhoc_plan_adhoc_plan_to_build" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} +func (bq *BuildQuery) loadBuildToAgentStatuses(ctx context.Context, query *AgentStatusQuery, nodes []*Build, init func(*Build), assign func(*Build, *AgentStatus)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Build) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.AgentStatus(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(build.BuildToAgentStatusesColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.agent_status_agent_status_to_build + if fk == nil { + return fmt.Errorf(`foreign-key "agent_status_agent_status_to_build" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "agent_status_agent_status_to_build" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} +func (bq *BuildQuery) loadBuildToServerTasks(ctx context.Context, query *ServerTaskQuery, nodes []*Build, init func(*Build), assign func(*Build, *ServerTask)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Build) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.ServerTask(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(build.BuildToServerTasksColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.server_task_server_task_to_build + if fk == nil { + return fmt.Errorf(`foreign-key "server_task_server_task_to_build" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "server_task_server_task_to_build" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil } func (bq *BuildQuery) sqlCount(ctx context.Context) (int, error) { _spec := bq.querySpec() - _spec.Node.Columns = bq.fields - if len(bq.fields) > 0 { - _spec.Unique = bq.unique != nil && *bq.unique + if len(bq.modifiers) > 0 { + _spec.Modifiers = bq.modifiers } - return sqlgraph.CountNodes(ctx, bq.driver, _spec) -} - -func (bq *BuildQuery) sqlExist(ctx context.Context) (bool, error) { - n, err := bq.sqlCount(ctx) - if err != nil { - return false, fmt.Errorf("ent: check existence: %w", err) + _spec.Node.Columns = bq.ctx.Fields + if len(bq.ctx.Fields) > 0 { + _spec.Unique = bq.ctx.Unique != nil && *bq.ctx.Unique } - return n > 0, nil + return sqlgraph.CountNodes(ctx, bq.driver, _spec) } func (bq *BuildQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: build.Table, - Columns: build.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, - }, - From: bq.sql, - Unique: true, - } - if unique := bq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(build.Table, build.Columns, sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID)) + _spec.From = bq.sql + if unique := bq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if bq.path != nil { + _spec.Unique = true } - if fields := bq.fields; len(fields) > 0 { + if fields := bq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, build.FieldID) for i := range fields { @@ -1189,10 +1367,10 @@ func (bq *BuildQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := bq.limit; limit != nil { + if limit := bq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := bq.offset; offset != nil { + if offset := bq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := bq.order; len(ps) > 0 { @@ -1208,7 +1386,7 @@ func (bq *BuildQuery) querySpec() *sqlgraph.QuerySpec { func (bq *BuildQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(bq.driver.Dialect()) t1 := builder.Table(build.Table) - columns := bq.fields + columns := bq.ctx.Fields if len(columns) == 0 { columns = build.Columns } @@ -1217,7 +1395,7 @@ func (bq *BuildQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = bq.sql selector.Select(selector.Columns(columns...)...) } - if bq.unique != nil && *bq.unique { + if bq.ctx.Unique != nil && *bq.ctx.Unique { selector.Distinct() } for _, p := range bq.predicates { @@ -1226,498 +1404,198 @@ func (bq *BuildQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range bq.order { p(selector) } - if offset := bq.offset; offset != nil { + if offset := bq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := bq.limit; limit != nil { + if limit := bq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector } -// BuildGroupBy is the group-by builder for Build entities. -type BuildGroupBy struct { - config - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) -} - -// Aggregate adds the given aggregation functions to the group-by query. -func (bgb *BuildGroupBy) Aggregate(fns ...AggregateFunc) *BuildGroupBy { - bgb.fns = append(bgb.fns, fns...) - return bgb -} - -// Scan applies the group-by query and scans the result into the given value. -func (bgb *BuildGroupBy) Scan(ctx context.Context, v interface{}) error { - query, err := bgb.path(ctx) - if err != nil { - return err +// WithNamedBuildToProvisionedNetwork tells the query-builder to eager-load the nodes that are connected to the "BuildToProvisionedNetwork" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (bq *BuildQuery) WithNamedBuildToProvisionedNetwork(name string, opts ...func(*ProvisionedNetworkQuery)) *BuildQuery { + query := (&ProvisionedNetworkClient{config: bq.config}).Query() + for _, opt := range opts { + opt(query) } - bgb.sql = query - return bgb.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (bgb *BuildGroupBy) ScanX(ctx context.Context, v interface{}) { - if err := bgb.Scan(ctx, v); err != nil { - panic(err) + if bq.withNamedBuildToProvisionedNetwork == nil { + bq.withNamedBuildToProvisionedNetwork = make(map[string]*ProvisionedNetworkQuery) } + bq.withNamedBuildToProvisionedNetwork[name] = query + return bq } -// Strings returns list of strings from group-by. -// It is only allowed when executing a group-by query with one field. -func (bgb *BuildGroupBy) Strings(ctx context.Context) ([]string, error) { - if len(bgb.fields) > 1 { - return nil, errors.New("ent: BuildGroupBy.Strings is not achievable when grouping more than 1 field") - } - var v []string - if err := bgb.Scan(ctx, &v); err != nil { - return nil, err +// WithNamedBuildToTeam tells the query-builder to eager-load the nodes that are connected to the "BuildToTeam" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (bq *BuildQuery) WithNamedBuildToTeam(name string, opts ...func(*TeamQuery)) *BuildQuery { + query := (&TeamClient{config: bq.config}).Query() + for _, opt := range opts { + opt(query) } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (bgb *BuildGroupBy) StringsX(ctx context.Context) []string { - v, err := bgb.Strings(ctx) - if err != nil { - panic(err) + if bq.withNamedBuildToTeam == nil { + bq.withNamedBuildToTeam = make(map[string]*TeamQuery) } - return v + bq.withNamedBuildToTeam[name] = query + return bq } -// String returns a single string from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (bgb *BuildGroupBy) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = bgb.Strings(ctx); err != nil { - return +// WithNamedBuildToPlan tells the query-builder to eager-load the nodes that are connected to the "BuildToPlan" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (bq *BuildQuery) WithNamedBuildToPlan(name string, opts ...func(*PlanQuery)) *BuildQuery { + query := (&PlanClient{config: bq.config}).Query() + for _, opt := range opts { + opt(query) } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{build.Label} - default: - err = fmt.Errorf("ent: BuildGroupBy.Strings returned %d results when one was expected", len(v)) + if bq.withNamedBuildToPlan == nil { + bq.withNamedBuildToPlan = make(map[string]*PlanQuery) } - return -} - -// StringX is like String, but panics if an error occurs. -func (bgb *BuildGroupBy) StringX(ctx context.Context) string { - v, err := bgb.String(ctx) - if err != nil { - panic(err) - } - return v + bq.withNamedBuildToPlan[name] = query + return bq } -// Ints returns list of ints from group-by. -// It is only allowed when executing a group-by query with one field. -func (bgb *BuildGroupBy) Ints(ctx context.Context) ([]int, error) { - if len(bgb.fields) > 1 { - return nil, errors.New("ent: BuildGroupBy.Ints is not achievable when grouping more than 1 field") +// WithNamedBuildToBuildCommits tells the query-builder to eager-load the nodes that are connected to the "BuildToBuildCommits" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (bq *BuildQuery) WithNamedBuildToBuildCommits(name string, opts ...func(*BuildCommitQuery)) *BuildQuery { + query := (&BuildCommitClient{config: bq.config}).Query() + for _, opt := range opts { + opt(query) } - var v []int - if err := bgb.Scan(ctx, &v); err != nil { - return nil, err + if bq.withNamedBuildToBuildCommits == nil { + bq.withNamedBuildToBuildCommits = make(map[string]*BuildCommitQuery) } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (bgb *BuildGroupBy) IntsX(ctx context.Context) []int { - v, err := bgb.Ints(ctx) - if err != nil { - panic(err) - } - return v + bq.withNamedBuildToBuildCommits[name] = query + return bq } -// Int returns a single int from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (bgb *BuildGroupBy) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = bgb.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{build.Label} - default: - err = fmt.Errorf("ent: BuildGroupBy.Ints returned %d results when one was expected", len(v)) +// WithNamedBuildToAdhocPlans tells the query-builder to eager-load the nodes that are connected to the "BuildToAdhocPlans" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (bq *BuildQuery) WithNamedBuildToAdhocPlans(name string, opts ...func(*AdhocPlanQuery)) *BuildQuery { + query := (&AdhocPlanClient{config: bq.config}).Query() + for _, opt := range opts { + opt(query) } - return -} - -// IntX is like Int, but panics if an error occurs. -func (bgb *BuildGroupBy) IntX(ctx context.Context) int { - v, err := bgb.Int(ctx) - if err != nil { - panic(err) + if bq.withNamedBuildToAdhocPlans == nil { + bq.withNamedBuildToAdhocPlans = make(map[string]*AdhocPlanQuery) } - return v + bq.withNamedBuildToAdhocPlans[name] = query + return bq } -// Float64s returns list of float64s from group-by. -// It is only allowed when executing a group-by query with one field. -func (bgb *BuildGroupBy) Float64s(ctx context.Context) ([]float64, error) { - if len(bgb.fields) > 1 { - return nil, errors.New("ent: BuildGroupBy.Float64s is not achievable when grouping more than 1 field") +// WithNamedBuildToAgentStatuses tells the query-builder to eager-load the nodes that are connected to the "BuildToAgentStatuses" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (bq *BuildQuery) WithNamedBuildToAgentStatuses(name string, opts ...func(*AgentStatusQuery)) *BuildQuery { + query := (&AgentStatusClient{config: bq.config}).Query() + for _, opt := range opts { + opt(query) } - var v []float64 - if err := bgb.Scan(ctx, &v); err != nil { - return nil, err + if bq.withNamedBuildToAgentStatuses == nil { + bq.withNamedBuildToAgentStatuses = make(map[string]*AgentStatusQuery) } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (bgb *BuildGroupBy) Float64sX(ctx context.Context) []float64 { - v, err := bgb.Float64s(ctx) - if err != nil { - panic(err) - } - return v + bq.withNamedBuildToAgentStatuses[name] = query + return bq } -// Float64 returns a single float64 from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (bgb *BuildGroupBy) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = bgb.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{build.Label} - default: - err = fmt.Errorf("ent: BuildGroupBy.Float64s returned %d results when one was expected", len(v)) +// WithNamedBuildToServerTasks tells the query-builder to eager-load the nodes that are connected to the "BuildToServerTasks" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (bq *BuildQuery) WithNamedBuildToServerTasks(name string, opts ...func(*ServerTaskQuery)) *BuildQuery { + query := (&ServerTaskClient{config: bq.config}).Query() + for _, opt := range opts { + opt(query) } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (bgb *BuildGroupBy) Float64X(ctx context.Context) float64 { - v, err := bgb.Float64(ctx) - if err != nil { - panic(err) + if bq.withNamedBuildToServerTasks == nil { + bq.withNamedBuildToServerTasks = make(map[string]*ServerTaskQuery) } - return v + bq.withNamedBuildToServerTasks[name] = query + return bq } -// Bools returns list of bools from group-by. -// It is only allowed when executing a group-by query with one field. -func (bgb *BuildGroupBy) Bools(ctx context.Context) ([]bool, error) { - if len(bgb.fields) > 1 { - return nil, errors.New("ent: BuildGroupBy.Bools is not achievable when grouping more than 1 field") - } - var v []bool - if err := bgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil +// BuildGroupBy is the group-by builder for Build entities. +type BuildGroupBy struct { + selector + build *BuildQuery } -// BoolsX is like Bools, but panics if an error occurs. -func (bgb *BuildGroupBy) BoolsX(ctx context.Context) []bool { - v, err := bgb.Bools(ctx) - if err != nil { - panic(err) - } - return v +// Aggregate adds the given aggregation functions to the group-by query. +func (bgb *BuildGroupBy) Aggregate(fns ...AggregateFunc) *BuildGroupBy { + bgb.fns = append(bgb.fns, fns...) + return bgb } -// Bool returns a single bool from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (bgb *BuildGroupBy) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = bgb.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{build.Label} - default: - err = fmt.Errorf("ent: BuildGroupBy.Bools returned %d results when one was expected", len(v)) +// Scan applies the selector query and scans the result into the given value. +func (bgb *BuildGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, bgb.build.ctx, "GroupBy") + if err := bgb.build.prepareQuery(ctx); err != nil { + return err } - return + return scanWithInterceptors[*BuildQuery, *BuildGroupBy](ctx, bgb.build, bgb, bgb.build.inters, v) } -// BoolX is like Bool, but panics if an error occurs. -func (bgb *BuildGroupBy) BoolX(ctx context.Context) bool { - v, err := bgb.Bool(ctx) - if err != nil { - panic(err) +func (bgb *BuildGroupBy) sqlScan(ctx context.Context, root *BuildQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(bgb.fns)) + for _, fn := range bgb.fns { + aggregation = append(aggregation, fn(selector)) } - return v -} - -func (bgb *BuildGroupBy) sqlScan(ctx context.Context, v interface{}) error { - for _, f := range bgb.fields { - if !build.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*bgb.flds)+len(bgb.fns)) + for _, f := range *bgb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := bgb.sqlQuery() + selector.GroupBy(selector.Columns(*bgb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := bgb.driver.Query(ctx, query, args, rows); err != nil { + if err := bgb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (bgb *BuildGroupBy) sqlQuery() *sql.Selector { - selector := bgb.sql.Select() - aggregation := make([]string, 0, len(bgb.fns)) - for _, fn := range bgb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(bgb.fields)+len(bgb.fns)) - for _, f := range bgb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(bgb.fields...)...) -} - // BuildSelect is the builder for selecting fields of Build entities. type BuildSelect struct { *BuildQuery - // intermediate query (i.e. traversal path). - sql *sql.Selector + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (bs *BuildSelect) Aggregate(fns ...AggregateFunc) *BuildSelect { + bs.fns = append(bs.fns, fns...) + return bs } // Scan applies the selector query and scans the result into the given value. -func (bs *BuildSelect) Scan(ctx context.Context, v interface{}) error { +func (bs *BuildSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, bs.ctx, "Select") if err := bs.prepareQuery(ctx); err != nil { return err } - bs.sql = bs.BuildQuery.sqlQuery(ctx) - return bs.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (bs *BuildSelect) ScanX(ctx context.Context, v interface{}) { - if err := bs.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from a selector. It is only allowed when selecting one field. -func (bs *BuildSelect) Strings(ctx context.Context) ([]string, error) { - if len(bs.fields) > 1 { - return nil, errors.New("ent: BuildSelect.Strings is not achievable when selecting more than 1 field") - } - var v []string - if err := bs.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (bs *BuildSelect) StringsX(ctx context.Context) []string { - v, err := bs.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a selector. It is only allowed when selecting one field. -func (bs *BuildSelect) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = bs.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{build.Label} - default: - err = fmt.Errorf("ent: BuildSelect.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (bs *BuildSelect) StringX(ctx context.Context) string { - v, err := bs.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from a selector. It is only allowed when selecting one field. -func (bs *BuildSelect) Ints(ctx context.Context) ([]int, error) { - if len(bs.fields) > 1 { - return nil, errors.New("ent: BuildSelect.Ints is not achievable when selecting more than 1 field") - } - var v []int - if err := bs.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (bs *BuildSelect) IntsX(ctx context.Context) []int { - v, err := bs.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a selector. It is only allowed when selecting one field. -func (bs *BuildSelect) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = bs.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{build.Label} - default: - err = fmt.Errorf("ent: BuildSelect.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (bs *BuildSelect) IntX(ctx context.Context) int { - v, err := bs.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. -func (bs *BuildSelect) Float64s(ctx context.Context) ([]float64, error) { - if len(bs.fields) > 1 { - return nil, errors.New("ent: BuildSelect.Float64s is not achievable when selecting more than 1 field") - } - var v []float64 - if err := bs.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (bs *BuildSelect) Float64sX(ctx context.Context) []float64 { - v, err := bs.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. -func (bs *BuildSelect) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = bs.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{build.Label} - default: - err = fmt.Errorf("ent: BuildSelect.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (bs *BuildSelect) Float64X(ctx context.Context) float64 { - v, err := bs.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from a selector. It is only allowed when selecting one field. -func (bs *BuildSelect) Bools(ctx context.Context) ([]bool, error) { - if len(bs.fields) > 1 { - return nil, errors.New("ent: BuildSelect.Bools is not achievable when selecting more than 1 field") - } - var v []bool - if err := bs.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (bs *BuildSelect) BoolsX(ctx context.Context) []bool { - v, err := bs.Bools(ctx) - if err != nil { - panic(err) - } - return v + return scanWithInterceptors[*BuildQuery, *BuildSelect](ctx, bs.BuildQuery, bs, bs.inters, v) } -// Bool returns a single bool from a selector. It is only allowed when selecting one field. -func (bs *BuildSelect) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = bs.Bools(ctx); err != nil { - return +func (bs *BuildSelect) sqlScan(ctx context.Context, root *BuildQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(bs.fns)) + for _, fn := range bs.fns { + aggregation = append(aggregation, fn(selector)) } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{build.Label} - default: - err = fmt.Errorf("ent: BuildSelect.Bools returned %d results when one was expected", len(v)) + switch n := len(*bs.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (bs *BuildSelect) BoolX(ctx context.Context) bool { - v, err := bs.Bool(ctx) - if err != nil { - panic(err) - } - return v -} - -func (bs *BuildSelect) sqlScan(ctx context.Context, v interface{}) error { rows := &sql.Rows{} - query, args := bs.sql.Query() + query, args := selector.Query() if err := bs.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/ent/build_update.go b/ent/build_update.go index 2216ac14..49cfefc3 100755 --- a/ent/build_update.go +++ b/ent/build_update.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -46,6 +46,14 @@ func (bu *BuildUpdate) SetRevision(i int) *BuildUpdate { return bu } +// SetNillableRevision sets the "revision" field if the given value is not nil. +func (bu *BuildUpdate) SetNillableRevision(i *int) *BuildUpdate { + if i != nil { + bu.SetRevision(*i) + } + return bu +} + // AddRevision adds i to the "revision" field. func (bu *BuildUpdate) AddRevision(i int) *BuildUpdate { bu.mutation.AddRevision(i) @@ -59,6 +67,14 @@ func (bu *BuildUpdate) SetEnvironmentRevision(i int) *BuildUpdate { return bu } +// SetNillableEnvironmentRevision sets the "environment_revision" field if the given value is not nil. +func (bu *BuildUpdate) SetNillableEnvironmentRevision(i *int) *BuildUpdate { + if i != nil { + bu.SetEnvironmentRevision(*i) + } + return bu +} + // AddEnvironmentRevision adds i to the "environment_revision" field. func (bu *BuildUpdate) AddEnvironmentRevision(i int) *BuildUpdate { bu.mutation.AddEnvironmentRevision(i) @@ -453,40 +469,7 @@ func (bu *BuildUpdate) RemoveBuildToServerTasks(s ...*ServerTask) *BuildUpdate { // Save executes the query and returns the number of nodes affected by the update operation. func (bu *BuildUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(bu.hooks) == 0 { - if err = bu.check(); err != nil { - return 0, err - } - affected, err = bu.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*BuildMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = bu.check(); err != nil { - return 0, err - } - bu.mutation = mutation - affected, err = bu.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(bu.hooks) - 1; i >= 0; i-- { - if bu.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = bu.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, bu.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, bu.sqlSave, bu.mutation, bu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -523,16 +506,10 @@ func (bu *BuildUpdate) check() error { } func (bu *BuildUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: build.Table, - Columns: build.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, - }, + if err := bu.check(); err != nil { + return n, err } + _spec := sqlgraph.NewUpdateSpec(build.Table, build.Columns, sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID)) if ps := bu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -541,46 +518,22 @@ func (bu *BuildUpdate) sqlSave(ctx context.Context) (n int, err error) { } } if value, ok := bu.mutation.Revision(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: build.FieldRevision, - }) + _spec.SetField(build.FieldRevision, field.TypeInt, value) } if value, ok := bu.mutation.AddedRevision(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: build.FieldRevision, - }) + _spec.AddField(build.FieldRevision, field.TypeInt, value) } if value, ok := bu.mutation.EnvironmentRevision(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: build.FieldEnvironmentRevision, - }) + _spec.SetField(build.FieldEnvironmentRevision, field.TypeInt, value) } if value, ok := bu.mutation.AddedEnvironmentRevision(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: build.FieldEnvironmentRevision, - }) + _spec.AddField(build.FieldEnvironmentRevision, field.TypeInt, value) } if value, ok := bu.mutation.Vars(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: build.FieldVars, - }) + _spec.SetField(build.FieldVars, field.TypeJSON, value) } if value, ok := bu.mutation.CompletedPlan(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: build.FieldCompletedPlan, - }) + _spec.SetField(build.FieldCompletedPlan, field.TypeBool, value) } if bu.mutation.BuildToStatusCleared() { edge := &sqlgraph.EdgeSpec{ @@ -590,10 +543,7 @@ func (bu *BuildUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{build.BuildToStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -606,10 +556,7 @@ func (bu *BuildUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{build.BuildToStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -625,10 +572,7 @@ func (bu *BuildUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{build.BuildToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -641,10 +585,7 @@ func (bu *BuildUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{build.BuildToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -660,10 +601,7 @@ func (bu *BuildUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{build.BuildToCompetitionColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: competition.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(competition.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -676,10 +614,7 @@ func (bu *BuildUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{build.BuildToCompetitionColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: competition.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(competition.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -695,10 +630,7 @@ func (bu *BuildUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{build.BuildToLatestBuildCommitColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: buildcommit.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(buildcommit.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -711,10 +643,7 @@ func (bu *BuildUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{build.BuildToLatestBuildCommitColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: buildcommit.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(buildcommit.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -730,10 +659,7 @@ func (bu *BuildUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{build.BuildToRepoCommitColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: repocommit.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(repocommit.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -746,10 +672,7 @@ func (bu *BuildUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{build.BuildToRepoCommitColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: repocommit.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(repocommit.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -765,10 +688,7 @@ func (bu *BuildUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{build.BuildToProvisionedNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionednetwork.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -781,10 +701,7 @@ func (bu *BuildUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{build.BuildToProvisionedNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionednetwork.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -800,10 +717,7 @@ func (bu *BuildUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{build.BuildToProvisionedNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionednetwork.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -819,10 +733,7 @@ func (bu *BuildUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{build.BuildToTeamColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: team.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(team.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -835,10 +746,7 @@ func (bu *BuildUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{build.BuildToTeamColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: team.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(team.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -854,10 +762,7 @@ func (bu *BuildUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{build.BuildToTeamColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: team.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(team.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -873,10 +778,7 @@ func (bu *BuildUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{build.BuildToPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -889,10 +791,7 @@ func (bu *BuildUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{build.BuildToPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -908,10 +807,7 @@ func (bu *BuildUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{build.BuildToPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -927,10 +823,7 @@ func (bu *BuildUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{build.BuildToBuildCommitsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: buildcommit.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(buildcommit.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -943,10 +836,7 @@ func (bu *BuildUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{build.BuildToBuildCommitsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: buildcommit.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(buildcommit.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -962,10 +852,7 @@ func (bu *BuildUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{build.BuildToBuildCommitsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: buildcommit.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(buildcommit.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -981,10 +868,7 @@ func (bu *BuildUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{build.BuildToAdhocPlansColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: adhocplan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(adhocplan.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -997,10 +881,7 @@ func (bu *BuildUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{build.BuildToAdhocPlansColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: adhocplan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(adhocplan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1016,10 +897,7 @@ func (bu *BuildUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{build.BuildToAdhocPlansColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: adhocplan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(adhocplan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1035,10 +913,7 @@ func (bu *BuildUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{build.BuildToAgentStatusesColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: agentstatus.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(agentstatus.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1051,10 +926,7 @@ func (bu *BuildUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{build.BuildToAgentStatusesColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: agentstatus.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(agentstatus.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1070,10 +942,7 @@ func (bu *BuildUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{build.BuildToAgentStatusesColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: agentstatus.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(agentstatus.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1089,10 +958,7 @@ func (bu *BuildUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{build.BuildToServerTasksColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: servertask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(servertask.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1105,10 +971,7 @@ func (bu *BuildUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{build.BuildToServerTasksColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: servertask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(servertask.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1124,10 +987,7 @@ func (bu *BuildUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{build.BuildToServerTasksColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: servertask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(servertask.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1139,10 +999,11 @@ func (bu *BuildUpdate) sqlSave(ctx context.Context) (n int, err error) { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{build.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return 0, err } + bu.mutation.done = true return n, nil } @@ -1161,6 +1022,14 @@ func (buo *BuildUpdateOne) SetRevision(i int) *BuildUpdateOne { return buo } +// SetNillableRevision sets the "revision" field if the given value is not nil. +func (buo *BuildUpdateOne) SetNillableRevision(i *int) *BuildUpdateOne { + if i != nil { + buo.SetRevision(*i) + } + return buo +} + // AddRevision adds i to the "revision" field. func (buo *BuildUpdateOne) AddRevision(i int) *BuildUpdateOne { buo.mutation.AddRevision(i) @@ -1174,6 +1043,14 @@ func (buo *BuildUpdateOne) SetEnvironmentRevision(i int) *BuildUpdateOne { return buo } +// SetNillableEnvironmentRevision sets the "environment_revision" field if the given value is not nil. +func (buo *BuildUpdateOne) SetNillableEnvironmentRevision(i *int) *BuildUpdateOne { + if i != nil { + buo.SetEnvironmentRevision(*i) + } + return buo +} + // AddEnvironmentRevision adds i to the "environment_revision" field. func (buo *BuildUpdateOne) AddEnvironmentRevision(i int) *BuildUpdateOne { buo.mutation.AddEnvironmentRevision(i) @@ -1566,6 +1443,12 @@ func (buo *BuildUpdateOne) RemoveBuildToServerTasks(s ...*ServerTask) *BuildUpda return buo.RemoveBuildToServerTaskIDs(ids...) } +// Where appends a list predicates to the BuildUpdate builder. +func (buo *BuildUpdateOne) Where(ps ...predicate.Build) *BuildUpdateOne { + buo.mutation.Where(ps...) + return buo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (buo *BuildUpdateOne) Select(field string, fields ...string) *BuildUpdateOne { @@ -1575,40 +1458,7 @@ func (buo *BuildUpdateOne) Select(field string, fields ...string) *BuildUpdateOn // Save executes the query and returns the updated Build entity. func (buo *BuildUpdateOne) Save(ctx context.Context) (*Build, error) { - var ( - err error - node *Build - ) - if len(buo.hooks) == 0 { - if err = buo.check(); err != nil { - return nil, err - } - node, err = buo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*BuildMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = buo.check(); err != nil { - return nil, err - } - buo.mutation = mutation - node, err = buo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(buo.hooks) - 1; i >= 0; i-- { - if buo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = buo.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, buo.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, buo.sqlSave, buo.mutation, buo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -1645,16 +1495,10 @@ func (buo *BuildUpdateOne) check() error { } func (buo *BuildUpdateOne) sqlSave(ctx context.Context) (_node *Build, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: build.Table, - Columns: build.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, - }, + if err := buo.check(); err != nil { + return _node, err } + _spec := sqlgraph.NewUpdateSpec(build.Table, build.Columns, sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID)) id, ok := buo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Build.id" for update`)} @@ -1680,46 +1524,22 @@ func (buo *BuildUpdateOne) sqlSave(ctx context.Context) (_node *Build, err error } } if value, ok := buo.mutation.Revision(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: build.FieldRevision, - }) + _spec.SetField(build.FieldRevision, field.TypeInt, value) } if value, ok := buo.mutation.AddedRevision(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: build.FieldRevision, - }) + _spec.AddField(build.FieldRevision, field.TypeInt, value) } if value, ok := buo.mutation.EnvironmentRevision(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: build.FieldEnvironmentRevision, - }) + _spec.SetField(build.FieldEnvironmentRevision, field.TypeInt, value) } if value, ok := buo.mutation.AddedEnvironmentRevision(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: build.FieldEnvironmentRevision, - }) + _spec.AddField(build.FieldEnvironmentRevision, field.TypeInt, value) } if value, ok := buo.mutation.Vars(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: build.FieldVars, - }) + _spec.SetField(build.FieldVars, field.TypeJSON, value) } if value, ok := buo.mutation.CompletedPlan(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: build.FieldCompletedPlan, - }) + _spec.SetField(build.FieldCompletedPlan, field.TypeBool, value) } if buo.mutation.BuildToStatusCleared() { edge := &sqlgraph.EdgeSpec{ @@ -1729,10 +1549,7 @@ func (buo *BuildUpdateOne) sqlSave(ctx context.Context) (_node *Build, err error Columns: []string{build.BuildToStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1745,10 +1562,7 @@ func (buo *BuildUpdateOne) sqlSave(ctx context.Context) (_node *Build, err error Columns: []string{build.BuildToStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1764,10 +1578,7 @@ func (buo *BuildUpdateOne) sqlSave(ctx context.Context) (_node *Build, err error Columns: []string{build.BuildToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1780,10 +1591,7 @@ func (buo *BuildUpdateOne) sqlSave(ctx context.Context) (_node *Build, err error Columns: []string{build.BuildToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1799,10 +1607,7 @@ func (buo *BuildUpdateOne) sqlSave(ctx context.Context) (_node *Build, err error Columns: []string{build.BuildToCompetitionColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: competition.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(competition.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1815,10 +1620,7 @@ func (buo *BuildUpdateOne) sqlSave(ctx context.Context) (_node *Build, err error Columns: []string{build.BuildToCompetitionColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: competition.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(competition.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1834,10 +1636,7 @@ func (buo *BuildUpdateOne) sqlSave(ctx context.Context) (_node *Build, err error Columns: []string{build.BuildToLatestBuildCommitColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: buildcommit.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(buildcommit.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1850,10 +1649,7 @@ func (buo *BuildUpdateOne) sqlSave(ctx context.Context) (_node *Build, err error Columns: []string{build.BuildToLatestBuildCommitColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: buildcommit.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(buildcommit.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1869,10 +1665,7 @@ func (buo *BuildUpdateOne) sqlSave(ctx context.Context) (_node *Build, err error Columns: []string{build.BuildToRepoCommitColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: repocommit.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(repocommit.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1885,10 +1678,7 @@ func (buo *BuildUpdateOne) sqlSave(ctx context.Context) (_node *Build, err error Columns: []string{build.BuildToRepoCommitColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: repocommit.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(repocommit.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1904,10 +1694,7 @@ func (buo *BuildUpdateOne) sqlSave(ctx context.Context) (_node *Build, err error Columns: []string{build.BuildToProvisionedNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionednetwork.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1920,10 +1707,7 @@ func (buo *BuildUpdateOne) sqlSave(ctx context.Context) (_node *Build, err error Columns: []string{build.BuildToProvisionedNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionednetwork.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1939,10 +1723,7 @@ func (buo *BuildUpdateOne) sqlSave(ctx context.Context) (_node *Build, err error Columns: []string{build.BuildToProvisionedNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionednetwork.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1958,10 +1739,7 @@ func (buo *BuildUpdateOne) sqlSave(ctx context.Context) (_node *Build, err error Columns: []string{build.BuildToTeamColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: team.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(team.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1974,10 +1752,7 @@ func (buo *BuildUpdateOne) sqlSave(ctx context.Context) (_node *Build, err error Columns: []string{build.BuildToTeamColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: team.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(team.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1993,10 +1768,7 @@ func (buo *BuildUpdateOne) sqlSave(ctx context.Context) (_node *Build, err error Columns: []string{build.BuildToTeamColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: team.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(team.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -2012,10 +1784,7 @@ func (buo *BuildUpdateOne) sqlSave(ctx context.Context) (_node *Build, err error Columns: []string{build.BuildToPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -2028,10 +1797,7 @@ func (buo *BuildUpdateOne) sqlSave(ctx context.Context) (_node *Build, err error Columns: []string{build.BuildToPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -2047,10 +1813,7 @@ func (buo *BuildUpdateOne) sqlSave(ctx context.Context) (_node *Build, err error Columns: []string{build.BuildToPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -2066,10 +1829,7 @@ func (buo *BuildUpdateOne) sqlSave(ctx context.Context) (_node *Build, err error Columns: []string{build.BuildToBuildCommitsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: buildcommit.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(buildcommit.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -2082,10 +1842,7 @@ func (buo *BuildUpdateOne) sqlSave(ctx context.Context) (_node *Build, err error Columns: []string{build.BuildToBuildCommitsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: buildcommit.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(buildcommit.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -2101,10 +1858,7 @@ func (buo *BuildUpdateOne) sqlSave(ctx context.Context) (_node *Build, err error Columns: []string{build.BuildToBuildCommitsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: buildcommit.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(buildcommit.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -2120,10 +1874,7 @@ func (buo *BuildUpdateOne) sqlSave(ctx context.Context) (_node *Build, err error Columns: []string{build.BuildToAdhocPlansColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: adhocplan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(adhocplan.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -2136,10 +1887,7 @@ func (buo *BuildUpdateOne) sqlSave(ctx context.Context) (_node *Build, err error Columns: []string{build.BuildToAdhocPlansColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: adhocplan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(adhocplan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -2155,10 +1903,7 @@ func (buo *BuildUpdateOne) sqlSave(ctx context.Context) (_node *Build, err error Columns: []string{build.BuildToAdhocPlansColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: adhocplan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(adhocplan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -2174,10 +1919,7 @@ func (buo *BuildUpdateOne) sqlSave(ctx context.Context) (_node *Build, err error Columns: []string{build.BuildToAgentStatusesColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: agentstatus.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(agentstatus.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -2190,10 +1932,7 @@ func (buo *BuildUpdateOne) sqlSave(ctx context.Context) (_node *Build, err error Columns: []string{build.BuildToAgentStatusesColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: agentstatus.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(agentstatus.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -2209,10 +1948,7 @@ func (buo *BuildUpdateOne) sqlSave(ctx context.Context) (_node *Build, err error Columns: []string{build.BuildToAgentStatusesColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: agentstatus.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(agentstatus.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -2228,10 +1964,7 @@ func (buo *BuildUpdateOne) sqlSave(ctx context.Context) (_node *Build, err error Columns: []string{build.BuildToServerTasksColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: servertask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(servertask.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -2244,10 +1977,7 @@ func (buo *BuildUpdateOne) sqlSave(ctx context.Context) (_node *Build, err error Columns: []string{build.BuildToServerTasksColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: servertask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(servertask.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -2263,10 +1993,7 @@ func (buo *BuildUpdateOne) sqlSave(ctx context.Context) (_node *Build, err error Columns: []string{build.BuildToServerTasksColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: servertask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(servertask.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -2281,9 +2008,10 @@ func (buo *BuildUpdateOne) sqlSave(ctx context.Context) (_node *Build, err error if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{build.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } + buo.mutation.done = true return _node, nil } diff --git a/ent/buildcommit.go b/ent/buildcommit.go index 69c64c40..c52bd279 100755 --- a/ent/buildcommit.go +++ b/ent/buildcommit.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -7,6 +7,7 @@ import ( "strings" "time" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/gen0cide/laforge/ent/build" "github.com/gen0cide/laforge/ent/buildcommit" @@ -30,6 +31,7 @@ type BuildCommit struct { // The values are being populated by the BuildCommitQuery when eager-loading is set. Edges BuildCommitEdges `json:"edges"` + // vvvvvvvvvvvv CUSTOM vvvvvvvvvvvv // Edges put into the main struct to be loaded via hcl // BuildCommitToBuild holds the value of the BuildCommitToBuild edge. HCLBuildCommitToBuild *Build `json:"BuildCommitToBuild,omitempty"` @@ -37,8 +39,9 @@ type BuildCommit struct { HCLBuildCommitToServerTask []*ServerTask `json:"BuildCommitToServerTask,omitempty"` // BuildCommitToPlanDiffs holds the value of the BuildCommitToPlanDiffs edge. HCLBuildCommitToPlanDiffs []*PlanDiff `json:"BuildCommitToPlanDiffs,omitempty"` - // + // ^^^^^^^^^^^^ CUSTOM ^^^^^^^^^^^^^ build_commit_build_commit_to_build *uuid.UUID + selectValues sql.SelectValues } // BuildCommitEdges holds the relations/edges for other nodes in the graph. @@ -52,6 +55,11 @@ type BuildCommitEdges struct { // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. loadedTypes [3]bool + // totalCount holds the count of the edges above. + totalCount [3]map[string]int + + namedBuildCommitToServerTask map[string][]*ServerTask + namedBuildCommitToPlanDiffs map[string][]*PlanDiff } // BuildCommitToBuildOrErr returns the BuildCommitToBuild value or an error if the edge @@ -59,8 +67,7 @@ type BuildCommitEdges struct { func (e BuildCommitEdges) BuildCommitToBuildOrErr() (*Build, error) { if e.loadedTypes[0] { if e.BuildCommitToBuild == nil { - // The edge BuildCommitToBuild was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: build.Label} } return e.BuildCommitToBuild, nil @@ -87,8 +94,8 @@ func (e BuildCommitEdges) BuildCommitToPlanDiffsOrErr() ([]*PlanDiff, error) { } // scanValues returns the types for scanning values from sql.Rows. -func (*BuildCommit) scanValues(columns []string) ([]interface{}, error) { - values := make([]interface{}, len(columns)) +func (*BuildCommit) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) for i := range columns { switch columns[i] { case buildcommit.FieldRevision: @@ -102,7 +109,7 @@ func (*BuildCommit) scanValues(columns []string) ([]interface{}, error) { case buildcommit.ForeignKeys[0]: // build_commit_build_commit_to_build values[i] = &sql.NullScanner{S: new(uuid.UUID)} default: - return nil, fmt.Errorf("unexpected column %q for type BuildCommit", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -110,7 +117,7 @@ func (*BuildCommit) scanValues(columns []string) ([]interface{}, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the BuildCommit fields. -func (bc *BuildCommit) assignValues(columns []string, values []interface{}) error { +func (bc *BuildCommit) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -153,41 +160,49 @@ func (bc *BuildCommit) assignValues(columns []string, values []interface{}) erro bc.build_commit_build_commit_to_build = new(uuid.UUID) *bc.build_commit_build_commit_to_build = *value.S.(*uuid.UUID) } + default: + bc.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the BuildCommit. +// This includes values selected through modifiers, order, etc. +func (bc *BuildCommit) Value(name string) (ent.Value, error) { + return bc.selectValues.Get(name) +} + // QueryBuildCommitToBuild queries the "BuildCommitToBuild" edge of the BuildCommit entity. func (bc *BuildCommit) QueryBuildCommitToBuild() *BuildQuery { - return (&BuildCommitClient{config: bc.config}).QueryBuildCommitToBuild(bc) + return NewBuildCommitClient(bc.config).QueryBuildCommitToBuild(bc) } // QueryBuildCommitToServerTask queries the "BuildCommitToServerTask" edge of the BuildCommit entity. func (bc *BuildCommit) QueryBuildCommitToServerTask() *ServerTaskQuery { - return (&BuildCommitClient{config: bc.config}).QueryBuildCommitToServerTask(bc) + return NewBuildCommitClient(bc.config).QueryBuildCommitToServerTask(bc) } // QueryBuildCommitToPlanDiffs queries the "BuildCommitToPlanDiffs" edge of the BuildCommit entity. func (bc *BuildCommit) QueryBuildCommitToPlanDiffs() *PlanDiffQuery { - return (&BuildCommitClient{config: bc.config}).QueryBuildCommitToPlanDiffs(bc) + return NewBuildCommitClient(bc.config).QueryBuildCommitToPlanDiffs(bc) } // Update returns a builder for updating this BuildCommit. // Note that you need to call BuildCommit.Unwrap() before calling this method if this BuildCommit // was returned from a transaction, and the transaction was committed or rolled back. func (bc *BuildCommit) Update() *BuildCommitUpdateOne { - return (&BuildCommitClient{config: bc.config}).UpdateOne(bc) + return NewBuildCommitClient(bc.config).UpdateOne(bc) } // Unwrap unwraps the BuildCommit entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. func (bc *BuildCommit) Unwrap() *BuildCommit { - tx, ok := bc.config.driver.(*txDriver) + _tx, ok := bc.config.driver.(*txDriver) if !ok { panic("ent: BuildCommit is not a transactional entity") } - bc.config.driver = tx.drv + bc.config.driver = _tx.drv return bc } @@ -195,24 +210,69 @@ func (bc *BuildCommit) Unwrap() *BuildCommit { func (bc *BuildCommit) String() string { var builder strings.Builder builder.WriteString("BuildCommit(") - builder.WriteString(fmt.Sprintf("id=%v", bc.ID)) - builder.WriteString(", type=") + builder.WriteString(fmt.Sprintf("id=%v, ", bc.ID)) + builder.WriteString("type=") builder.WriteString(fmt.Sprintf("%v", bc.Type)) - builder.WriteString(", revision=") + builder.WriteString(", ") + builder.WriteString("revision=") builder.WriteString(fmt.Sprintf("%v", bc.Revision)) - builder.WriteString(", state=") + builder.WriteString(", ") + builder.WriteString("state=") builder.WriteString(fmt.Sprintf("%v", bc.State)) - builder.WriteString(", created_at=") + builder.WriteString(", ") + builder.WriteString("created_at=") builder.WriteString(bc.CreatedAt.Format(time.ANSIC)) builder.WriteByte(')') return builder.String() } -// BuildCommits is a parsable slice of BuildCommit. -type BuildCommits []*BuildCommit +// NamedBuildCommitToServerTask returns the BuildCommitToServerTask named value or an error if the edge was not +// loaded in eager-loading with this name. +func (bc *BuildCommit) NamedBuildCommitToServerTask(name string) ([]*ServerTask, error) { + if bc.Edges.namedBuildCommitToServerTask == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := bc.Edges.namedBuildCommitToServerTask[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} -func (bc BuildCommits) config(cfg config) { - for _i := range bc { - bc[_i].config = cfg +func (bc *BuildCommit) appendNamedBuildCommitToServerTask(name string, edges ...*ServerTask) { + if bc.Edges.namedBuildCommitToServerTask == nil { + bc.Edges.namedBuildCommitToServerTask = make(map[string][]*ServerTask) + } + if len(edges) == 0 { + bc.Edges.namedBuildCommitToServerTask[name] = []*ServerTask{} + } else { + bc.Edges.namedBuildCommitToServerTask[name] = append(bc.Edges.namedBuildCommitToServerTask[name], edges...) } } + +// NamedBuildCommitToPlanDiffs returns the BuildCommitToPlanDiffs named value or an error if the edge was not +// loaded in eager-loading with this name. +func (bc *BuildCommit) NamedBuildCommitToPlanDiffs(name string) ([]*PlanDiff, error) { + if bc.Edges.namedBuildCommitToPlanDiffs == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := bc.Edges.namedBuildCommitToPlanDiffs[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} + +func (bc *BuildCommit) appendNamedBuildCommitToPlanDiffs(name string, edges ...*PlanDiff) { + if bc.Edges.namedBuildCommitToPlanDiffs == nil { + bc.Edges.namedBuildCommitToPlanDiffs = make(map[string][]*PlanDiff) + } + if len(edges) == 0 { + bc.Edges.namedBuildCommitToPlanDiffs[name] = []*PlanDiff{} + } else { + bc.Edges.namedBuildCommitToPlanDiffs[name] = append(bc.Edges.namedBuildCommitToPlanDiffs[name], edges...) + } +} + +// BuildCommits is a parsable slice of BuildCommit. +type BuildCommits []*BuildCommit diff --git a/ent/buildcommit/buildcommit.go b/ent/buildcommit/buildcommit.go index b948c015..a6880563 100755 --- a/ent/buildcommit/buildcommit.go +++ b/ent/buildcommit/buildcommit.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package buildcommit @@ -8,6 +8,8 @@ import ( "strconv" "time" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -142,37 +144,121 @@ func StateValidator(s State) error { } } +// OrderOption defines the ordering options for the BuildCommit queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByType orders the results by the type field. +func ByType(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldType, opts...).ToFunc() +} + +// ByRevision orders the results by the revision field. +func ByRevision(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldRevision, opts...).ToFunc() +} + +// ByState orders the results by the state field. +func ByState(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldState, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByBuildCommitToBuildField orders the results by BuildCommitToBuild field. +func ByBuildCommitToBuildField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newBuildCommitToBuildStep(), sql.OrderByField(field, opts...)) + } +} + +// ByBuildCommitToServerTaskCount orders the results by BuildCommitToServerTask count. +func ByBuildCommitToServerTaskCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newBuildCommitToServerTaskStep(), opts...) + } +} + +// ByBuildCommitToServerTask orders the results by BuildCommitToServerTask terms. +func ByBuildCommitToServerTask(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newBuildCommitToServerTaskStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByBuildCommitToPlanDiffsCount orders the results by BuildCommitToPlanDiffs count. +func ByBuildCommitToPlanDiffsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newBuildCommitToPlanDiffsStep(), opts...) + } +} + +// ByBuildCommitToPlanDiffs orders the results by BuildCommitToPlanDiffs terms. +func ByBuildCommitToPlanDiffs(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newBuildCommitToPlanDiffsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newBuildCommitToBuildStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(BuildCommitToBuildInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, BuildCommitToBuildTable, BuildCommitToBuildColumn), + ) +} +func newBuildCommitToServerTaskStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(BuildCommitToServerTaskInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, BuildCommitToServerTaskTable, BuildCommitToServerTaskColumn), + ) +} +func newBuildCommitToPlanDiffsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(BuildCommitToPlanDiffsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, BuildCommitToPlanDiffsTable, BuildCommitToPlanDiffsColumn), + ) +} + // MarshalGQL implements graphql.Marshaler interface. -func (_type Type) MarshalGQL(w io.Writer) { - io.WriteString(w, strconv.Quote(_type.String())) +func (e Type) MarshalGQL(w io.Writer) { + io.WriteString(w, strconv.Quote(e.String())) } // UnmarshalGQL implements graphql.Unmarshaler interface. -func (_type *Type) UnmarshalGQL(val interface{}) error { +func (e *Type) UnmarshalGQL(val interface{}) error { str, ok := val.(string) if !ok { return fmt.Errorf("enum %T must be a string", val) } - *_type = Type(str) - if err := TypeValidator(*_type); err != nil { + *e = Type(str) + if err := TypeValidator(*e); err != nil { return fmt.Errorf("%s is not a valid Type", str) } return nil } // MarshalGQL implements graphql.Marshaler interface. -func (s State) MarshalGQL(w io.Writer) { - io.WriteString(w, strconv.Quote(s.String())) +func (e State) MarshalGQL(w io.Writer) { + io.WriteString(w, strconv.Quote(e.String())) } // UnmarshalGQL implements graphql.Unmarshaler interface. -func (s *State) UnmarshalGQL(val interface{}) error { +func (e *State) UnmarshalGQL(val interface{}) error { str, ok := val.(string) if !ok { return fmt.Errorf("enum %T must be a string", val) } - *s = State(str) - if err := StateValidator(*s); err != nil { + *e = State(str) + if err := StateValidator(*e); err != nil { return fmt.Errorf("%s is not a valid State", str) } return nil diff --git a/ent/buildcommit/where.go b/ent/buildcommit/where.go index 2e63b8d0..177899f3 100755 --- a/ent/buildcommit/where.go +++ b/ent/buildcommit/where.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package buildcommit @@ -13,347 +13,177 @@ import ( // ID filters vertices based on their ID field. func ID(id uuid.UUID) predicate.BuildCommit { - return predicate.BuildCommit(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.BuildCommit(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id uuid.UUID) predicate.BuildCommit { - return predicate.BuildCommit(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.BuildCommit(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id uuid.UUID) predicate.BuildCommit { - return predicate.BuildCommit(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.BuildCommit(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...uuid.UUID) predicate.BuildCommit { - return predicate.BuildCommit(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.BuildCommit(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...uuid.UUID) predicate.BuildCommit { - return predicate.BuildCommit(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.BuildCommit(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id uuid.UUID) predicate.BuildCommit { - return predicate.BuildCommit(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.BuildCommit(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id uuid.UUID) predicate.BuildCommit { - return predicate.BuildCommit(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.BuildCommit(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id uuid.UUID) predicate.BuildCommit { - return predicate.BuildCommit(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.BuildCommit(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id uuid.UUID) predicate.BuildCommit { - return predicate.BuildCommit(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.BuildCommit(sql.FieldLTE(FieldID, id)) } // Revision applies equality check predicate on the "revision" field. It's identical to RevisionEQ. func Revision(v int) predicate.BuildCommit { - return predicate.BuildCommit(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldRevision), v)) - }) + return predicate.BuildCommit(sql.FieldEQ(FieldRevision, v)) } // CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. func CreatedAt(v time.Time) predicate.BuildCommit { - return predicate.BuildCommit(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCreatedAt), v)) - }) + return predicate.BuildCommit(sql.FieldEQ(FieldCreatedAt, v)) } // TypeEQ applies the EQ predicate on the "type" field. func TypeEQ(v Type) predicate.BuildCommit { - return predicate.BuildCommit(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldType), v)) - }) + return predicate.BuildCommit(sql.FieldEQ(FieldType, v)) } // TypeNEQ applies the NEQ predicate on the "type" field. func TypeNEQ(v Type) predicate.BuildCommit { - return predicate.BuildCommit(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldType), v)) - }) + return predicate.BuildCommit(sql.FieldNEQ(FieldType, v)) } // TypeIn applies the In predicate on the "type" field. func TypeIn(vs ...Type) predicate.BuildCommit { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.BuildCommit(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldType), v...)) - }) + return predicate.BuildCommit(sql.FieldIn(FieldType, vs...)) } // TypeNotIn applies the NotIn predicate on the "type" field. func TypeNotIn(vs ...Type) predicate.BuildCommit { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.BuildCommit(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldType), v...)) - }) + return predicate.BuildCommit(sql.FieldNotIn(FieldType, vs...)) } // RevisionEQ applies the EQ predicate on the "revision" field. func RevisionEQ(v int) predicate.BuildCommit { - return predicate.BuildCommit(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldRevision), v)) - }) + return predicate.BuildCommit(sql.FieldEQ(FieldRevision, v)) } // RevisionNEQ applies the NEQ predicate on the "revision" field. func RevisionNEQ(v int) predicate.BuildCommit { - return predicate.BuildCommit(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldRevision), v)) - }) + return predicate.BuildCommit(sql.FieldNEQ(FieldRevision, v)) } // RevisionIn applies the In predicate on the "revision" field. func RevisionIn(vs ...int) predicate.BuildCommit { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.BuildCommit(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldRevision), v...)) - }) + return predicate.BuildCommit(sql.FieldIn(FieldRevision, vs...)) } // RevisionNotIn applies the NotIn predicate on the "revision" field. func RevisionNotIn(vs ...int) predicate.BuildCommit { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.BuildCommit(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldRevision), v...)) - }) + return predicate.BuildCommit(sql.FieldNotIn(FieldRevision, vs...)) } // RevisionGT applies the GT predicate on the "revision" field. func RevisionGT(v int) predicate.BuildCommit { - return predicate.BuildCommit(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldRevision), v)) - }) + return predicate.BuildCommit(sql.FieldGT(FieldRevision, v)) } // RevisionGTE applies the GTE predicate on the "revision" field. func RevisionGTE(v int) predicate.BuildCommit { - return predicate.BuildCommit(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldRevision), v)) - }) + return predicate.BuildCommit(sql.FieldGTE(FieldRevision, v)) } // RevisionLT applies the LT predicate on the "revision" field. func RevisionLT(v int) predicate.BuildCommit { - return predicate.BuildCommit(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldRevision), v)) - }) + return predicate.BuildCommit(sql.FieldLT(FieldRevision, v)) } // RevisionLTE applies the LTE predicate on the "revision" field. func RevisionLTE(v int) predicate.BuildCommit { - return predicate.BuildCommit(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldRevision), v)) - }) + return predicate.BuildCommit(sql.FieldLTE(FieldRevision, v)) } // StateEQ applies the EQ predicate on the "state" field. func StateEQ(v State) predicate.BuildCommit { - return predicate.BuildCommit(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldState), v)) - }) + return predicate.BuildCommit(sql.FieldEQ(FieldState, v)) } // StateNEQ applies the NEQ predicate on the "state" field. func StateNEQ(v State) predicate.BuildCommit { - return predicate.BuildCommit(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldState), v)) - }) + return predicate.BuildCommit(sql.FieldNEQ(FieldState, v)) } // StateIn applies the In predicate on the "state" field. func StateIn(vs ...State) predicate.BuildCommit { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.BuildCommit(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldState), v...)) - }) + return predicate.BuildCommit(sql.FieldIn(FieldState, vs...)) } // StateNotIn applies the NotIn predicate on the "state" field. func StateNotIn(vs ...State) predicate.BuildCommit { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.BuildCommit(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldState), v...)) - }) + return predicate.BuildCommit(sql.FieldNotIn(FieldState, vs...)) } // CreatedAtEQ applies the EQ predicate on the "created_at" field. func CreatedAtEQ(v time.Time) predicate.BuildCommit { - return predicate.BuildCommit(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCreatedAt), v)) - }) + return predicate.BuildCommit(sql.FieldEQ(FieldCreatedAt, v)) } // CreatedAtNEQ applies the NEQ predicate on the "created_at" field. func CreatedAtNEQ(v time.Time) predicate.BuildCommit { - return predicate.BuildCommit(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldCreatedAt), v)) - }) + return predicate.BuildCommit(sql.FieldNEQ(FieldCreatedAt, v)) } // CreatedAtIn applies the In predicate on the "created_at" field. func CreatedAtIn(vs ...time.Time) predicate.BuildCommit { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.BuildCommit(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldCreatedAt), v...)) - }) + return predicate.BuildCommit(sql.FieldIn(FieldCreatedAt, vs...)) } // CreatedAtNotIn applies the NotIn predicate on the "created_at" field. func CreatedAtNotIn(vs ...time.Time) predicate.BuildCommit { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.BuildCommit(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldCreatedAt), v...)) - }) + return predicate.BuildCommit(sql.FieldNotIn(FieldCreatedAt, vs...)) } // CreatedAtGT applies the GT predicate on the "created_at" field. func CreatedAtGT(v time.Time) predicate.BuildCommit { - return predicate.BuildCommit(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldCreatedAt), v)) - }) + return predicate.BuildCommit(sql.FieldGT(FieldCreatedAt, v)) } // CreatedAtGTE applies the GTE predicate on the "created_at" field. func CreatedAtGTE(v time.Time) predicate.BuildCommit { - return predicate.BuildCommit(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldCreatedAt), v)) - }) + return predicate.BuildCommit(sql.FieldGTE(FieldCreatedAt, v)) } // CreatedAtLT applies the LT predicate on the "created_at" field. func CreatedAtLT(v time.Time) predicate.BuildCommit { - return predicate.BuildCommit(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldCreatedAt), v)) - }) + return predicate.BuildCommit(sql.FieldLT(FieldCreatedAt, v)) } // CreatedAtLTE applies the LTE predicate on the "created_at" field. func CreatedAtLTE(v time.Time) predicate.BuildCommit { - return predicate.BuildCommit(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldCreatedAt), v)) - }) + return predicate.BuildCommit(sql.FieldLTE(FieldCreatedAt, v)) } // HasBuildCommitToBuild applies the HasEdge predicate on the "BuildCommitToBuild" edge. @@ -361,7 +191,6 @@ func HasBuildCommitToBuild() predicate.BuildCommit { return predicate.BuildCommit(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(BuildCommitToBuildTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, false, BuildCommitToBuildTable, BuildCommitToBuildColumn), ) sqlgraph.HasNeighbors(s, step) @@ -371,11 +200,7 @@ func HasBuildCommitToBuild() predicate.BuildCommit { // HasBuildCommitToBuildWith applies the HasEdge predicate on the "BuildCommitToBuild" edge with a given conditions (other predicates). func HasBuildCommitToBuildWith(preds ...predicate.Build) predicate.BuildCommit { return predicate.BuildCommit(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(BuildCommitToBuildInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, BuildCommitToBuildTable, BuildCommitToBuildColumn), - ) + step := newBuildCommitToBuildStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -389,7 +214,6 @@ func HasBuildCommitToServerTask() predicate.BuildCommit { return predicate.BuildCommit(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(BuildCommitToServerTaskTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, true, BuildCommitToServerTaskTable, BuildCommitToServerTaskColumn), ) sqlgraph.HasNeighbors(s, step) @@ -399,11 +223,7 @@ func HasBuildCommitToServerTask() predicate.BuildCommit { // HasBuildCommitToServerTaskWith applies the HasEdge predicate on the "BuildCommitToServerTask" edge with a given conditions (other predicates). func HasBuildCommitToServerTaskWith(preds ...predicate.ServerTask) predicate.BuildCommit { return predicate.BuildCommit(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(BuildCommitToServerTaskInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, true, BuildCommitToServerTaskTable, BuildCommitToServerTaskColumn), - ) + step := newBuildCommitToServerTaskStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -417,7 +237,6 @@ func HasBuildCommitToPlanDiffs() predicate.BuildCommit { return predicate.BuildCommit(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(BuildCommitToPlanDiffsTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, true, BuildCommitToPlanDiffsTable, BuildCommitToPlanDiffsColumn), ) sqlgraph.HasNeighbors(s, step) @@ -427,11 +246,7 @@ func HasBuildCommitToPlanDiffs() predicate.BuildCommit { // HasBuildCommitToPlanDiffsWith applies the HasEdge predicate on the "BuildCommitToPlanDiffs" edge with a given conditions (other predicates). func HasBuildCommitToPlanDiffsWith(preds ...predicate.PlanDiff) predicate.BuildCommit { return predicate.BuildCommit(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(BuildCommitToPlanDiffsInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, true, BuildCommitToPlanDiffsTable, BuildCommitToPlanDiffsColumn), - ) + step := newBuildCommitToPlanDiffsStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -442,32 +257,15 @@ func HasBuildCommitToPlanDiffsWith(preds ...predicate.PlanDiff) predicate.BuildC // And groups predicates with the AND operator between them. func And(predicates ...predicate.BuildCommit) predicate.BuildCommit { - return predicate.BuildCommit(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.BuildCommit(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.BuildCommit) predicate.BuildCommit { - return predicate.BuildCommit(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.BuildCommit(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.BuildCommit) predicate.BuildCommit { - return predicate.BuildCommit(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.BuildCommit(sql.NotPredicates(p)) } diff --git a/ent/buildcommit_create.go b/ent/buildcommit_create.go index ed389e3e..e12ccf03 100755 --- a/ent/buildcommit_create.go +++ b/ent/buildcommit_create.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -118,44 +118,8 @@ func (bcc *BuildCommitCreate) Mutation() *BuildCommitMutation { // Save creates the BuildCommit in the database. func (bcc *BuildCommitCreate) Save(ctx context.Context) (*BuildCommit, error) { - var ( - err error - node *BuildCommit - ) bcc.defaults() - if len(bcc.hooks) == 0 { - if err = bcc.check(); err != nil { - return nil, err - } - node, err = bcc.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*BuildCommitMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = bcc.check(); err != nil { - return nil, err - } - bcc.mutation = mutation - if node, err = bcc.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(bcc.hooks) - 1; i >= 0; i-- { - if bcc.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = bcc.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, bcc.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, bcc.sqlSave, bcc.mutation, bcc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -223,10 +187,13 @@ func (bcc *BuildCommitCreate) check() error { } func (bcc *BuildCommitCreate) sqlSave(ctx context.Context) (*BuildCommit, error) { + if err := bcc.check(); err != nil { + return nil, err + } _node, _spec := bcc.createSpec() if err := sqlgraph.CreateNode(ctx, bcc.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } @@ -237,54 +204,34 @@ func (bcc *BuildCommitCreate) sqlSave(ctx context.Context) (*BuildCommit, error) return nil, err } } + bcc.mutation.id = &_node.ID + bcc.mutation.done = true return _node, nil } func (bcc *BuildCommitCreate) createSpec() (*BuildCommit, *sqlgraph.CreateSpec) { var ( _node = &BuildCommit{config: bcc.config} - _spec = &sqlgraph.CreateSpec{ - Table: buildcommit.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: buildcommit.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(buildcommit.Table, sqlgraph.NewFieldSpec(buildcommit.FieldID, field.TypeUUID)) ) if id, ok := bcc.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } if value, ok := bcc.mutation.GetType(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: buildcommit.FieldType, - }) + _spec.SetField(buildcommit.FieldType, field.TypeEnum, value) _node.Type = value } if value, ok := bcc.mutation.Revision(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: buildcommit.FieldRevision, - }) + _spec.SetField(buildcommit.FieldRevision, field.TypeInt, value) _node.Revision = value } if value, ok := bcc.mutation.State(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: buildcommit.FieldState, - }) + _spec.SetField(buildcommit.FieldState, field.TypeEnum, value) _node.State = value } if value, ok := bcc.mutation.CreatedAt(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: buildcommit.FieldCreatedAt, - }) + _spec.SetField(buildcommit.FieldCreatedAt, field.TypeTime, value) _node.CreatedAt = value } if nodes := bcc.mutation.BuildCommitToBuildIDs(); len(nodes) > 0 { @@ -295,10 +242,7 @@ func (bcc *BuildCommitCreate) createSpec() (*BuildCommit, *sqlgraph.CreateSpec) Columns: []string{buildcommit.BuildCommitToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -315,10 +259,7 @@ func (bcc *BuildCommitCreate) createSpec() (*BuildCommit, *sqlgraph.CreateSpec) Columns: []string{buildcommit.BuildCommitToServerTaskColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: servertask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(servertask.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -334,10 +275,7 @@ func (bcc *BuildCommitCreate) createSpec() (*BuildCommit, *sqlgraph.CreateSpec) Columns: []string{buildcommit.BuildCommitToPlanDiffsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plandiff.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plandiff.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -351,11 +289,15 @@ func (bcc *BuildCommitCreate) createSpec() (*BuildCommit, *sqlgraph.CreateSpec) // BuildCommitCreateBulk is the builder for creating many BuildCommit entities in bulk. type BuildCommitCreateBulk struct { config + err error builders []*BuildCommitCreate } // Save creates the BuildCommit entities in the database. func (bccb *BuildCommitCreateBulk) Save(ctx context.Context) ([]*BuildCommit, error) { + if bccb.err != nil { + return nil, bccb.err + } specs := make([]*sqlgraph.CreateSpec, len(bccb.builders)) nodes := make([]*BuildCommit, len(bccb.builders)) mutators := make([]Mutator, len(bccb.builders)) @@ -372,8 +314,8 @@ func (bccb *BuildCommitCreateBulk) Save(ctx context.Context) ([]*BuildCommit, er return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, bccb.builders[i+1].mutation) } else { @@ -381,7 +323,7 @@ func (bccb *BuildCommitCreateBulk) Save(ctx context.Context) ([]*BuildCommit, er // Invoke the actual operation on the latest mutation in the chain. if err = sqlgraph.BatchCreate(ctx, bccb.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } } } diff --git a/ent/buildcommit_delete.go b/ent/buildcommit_delete.go index dc4e8df6..84708a9d 100755 --- a/ent/buildcommit_delete.go +++ b/ent/buildcommit_delete.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (bcd *BuildCommitDelete) Where(ps ...predicate.BuildCommit) *BuildCommitDel // Exec executes the deletion query and returns how many vertices were deleted. func (bcd *BuildCommitDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(bcd.hooks) == 0 { - affected, err = bcd.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*BuildCommitMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - bcd.mutation = mutation - affected, err = bcd.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(bcd.hooks) - 1; i >= 0; i-- { - if bcd.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = bcd.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, bcd.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, bcd.sqlExec, bcd.mutation, bcd.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (bcd *BuildCommitDelete) ExecX(ctx context.Context) int { } func (bcd *BuildCommitDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: buildcommit.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: buildcommit.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(buildcommit.Table, sqlgraph.NewFieldSpec(buildcommit.FieldID, field.TypeUUID)) if ps := bcd.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -84,7 +48,12 @@ func (bcd *BuildCommitDelete) sqlExec(ctx context.Context) (int, error) { } } } - return sqlgraph.DeleteNodes(ctx, bcd.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, bcd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + bcd.mutation.done = true + return affected, err } // BuildCommitDeleteOne is the builder for deleting a single BuildCommit entity. @@ -92,6 +61,12 @@ type BuildCommitDeleteOne struct { bcd *BuildCommitDelete } +// Where appends a list predicates to the BuildCommitDelete builder. +func (bcdo *BuildCommitDeleteOne) Where(ps ...predicate.BuildCommit) *BuildCommitDeleteOne { + bcdo.bcd.mutation.Where(ps...) + return bcdo +} + // Exec executes the deletion query. func (bcdo *BuildCommitDeleteOne) Exec(ctx context.Context) error { n, err := bcdo.bcd.Exec(ctx) @@ -107,5 +82,7 @@ func (bcdo *BuildCommitDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (bcdo *BuildCommitDeleteOne) ExecX(ctx context.Context) { - bcdo.bcd.ExecX(ctx) + if err := bcdo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/ent/buildcommit_query.go b/ent/buildcommit_query.go index 2564523b..c001ae09 100755 --- a/ent/buildcommit_query.go +++ b/ent/buildcommit_query.go @@ -1,11 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" "database/sql/driver" - "errors" "fmt" "math" @@ -23,17 +22,18 @@ import ( // BuildCommitQuery is the builder for querying BuildCommit entities. type BuildCommitQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string - predicates []predicate.BuildCommit - // eager-loading edges. - withBuildCommitToBuild *BuildQuery - withBuildCommitToServerTask *ServerTaskQuery - withBuildCommitToPlanDiffs *PlanDiffQuery - withFKs bool + ctx *QueryContext + order []buildcommit.OrderOption + inters []Interceptor + predicates []predicate.BuildCommit + withBuildCommitToBuild *BuildQuery + withBuildCommitToServerTask *ServerTaskQuery + withBuildCommitToPlanDiffs *PlanDiffQuery + withFKs bool + modifiers []func(*sql.Selector) + loadTotal []func(context.Context, []*BuildCommit) error + withNamedBuildCommitToServerTask map[string]*ServerTaskQuery + withNamedBuildCommitToPlanDiffs map[string]*PlanDiffQuery // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -45,34 +45,34 @@ func (bcq *BuildCommitQuery) Where(ps ...predicate.BuildCommit) *BuildCommitQuer return bcq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (bcq *BuildCommitQuery) Limit(limit int) *BuildCommitQuery { - bcq.limit = &limit + bcq.ctx.Limit = &limit return bcq } -// Offset adds an offset step to the query. +// Offset to start from. func (bcq *BuildCommitQuery) Offset(offset int) *BuildCommitQuery { - bcq.offset = &offset + bcq.ctx.Offset = &offset return bcq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (bcq *BuildCommitQuery) Unique(unique bool) *BuildCommitQuery { - bcq.unique = &unique + bcq.ctx.Unique = &unique return bcq } -// Order adds an order step to the query. -func (bcq *BuildCommitQuery) Order(o ...OrderFunc) *BuildCommitQuery { +// Order specifies how the records should be ordered. +func (bcq *BuildCommitQuery) Order(o ...buildcommit.OrderOption) *BuildCommitQuery { bcq.order = append(bcq.order, o...) return bcq } // QueryBuildCommitToBuild chains the current query on the "BuildCommitToBuild" edge. func (bcq *BuildCommitQuery) QueryBuildCommitToBuild() *BuildQuery { - query := &BuildQuery{config: bcq.config} + query := (&BuildClient{config: bcq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := bcq.prepareQuery(ctx); err != nil { return nil, err @@ -94,7 +94,7 @@ func (bcq *BuildCommitQuery) QueryBuildCommitToBuild() *BuildQuery { // QueryBuildCommitToServerTask chains the current query on the "BuildCommitToServerTask" edge. func (bcq *BuildCommitQuery) QueryBuildCommitToServerTask() *ServerTaskQuery { - query := &ServerTaskQuery{config: bcq.config} + query := (&ServerTaskClient{config: bcq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := bcq.prepareQuery(ctx); err != nil { return nil, err @@ -116,7 +116,7 @@ func (bcq *BuildCommitQuery) QueryBuildCommitToServerTask() *ServerTaskQuery { // QueryBuildCommitToPlanDiffs chains the current query on the "BuildCommitToPlanDiffs" edge. func (bcq *BuildCommitQuery) QueryBuildCommitToPlanDiffs() *PlanDiffQuery { - query := &PlanDiffQuery{config: bcq.config} + query := (&PlanDiffClient{config: bcq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := bcq.prepareQuery(ctx); err != nil { return nil, err @@ -139,7 +139,7 @@ func (bcq *BuildCommitQuery) QueryBuildCommitToPlanDiffs() *PlanDiffQuery { // First returns the first BuildCommit entity from the query. // Returns a *NotFoundError when no BuildCommit was found. func (bcq *BuildCommitQuery) First(ctx context.Context) (*BuildCommit, error) { - nodes, err := bcq.Limit(1).All(ctx) + nodes, err := bcq.Limit(1).All(setContextOp(ctx, bcq.ctx, "First")) if err != nil { return nil, err } @@ -162,7 +162,7 @@ func (bcq *BuildCommitQuery) FirstX(ctx context.Context) *BuildCommit { // Returns a *NotFoundError when no BuildCommit ID was found. func (bcq *BuildCommitQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = bcq.Limit(1).IDs(ctx); err != nil { + if ids, err = bcq.Limit(1).IDs(setContextOp(ctx, bcq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -185,7 +185,7 @@ func (bcq *BuildCommitQuery) FirstIDX(ctx context.Context) uuid.UUID { // Returns a *NotSingularError when more than one BuildCommit entity is found. // Returns a *NotFoundError when no BuildCommit entities are found. func (bcq *BuildCommitQuery) Only(ctx context.Context) (*BuildCommit, error) { - nodes, err := bcq.Limit(2).All(ctx) + nodes, err := bcq.Limit(2).All(setContextOp(ctx, bcq.ctx, "Only")) if err != nil { return nil, err } @@ -213,7 +213,7 @@ func (bcq *BuildCommitQuery) OnlyX(ctx context.Context) *BuildCommit { // Returns a *NotFoundError when no entities are found. func (bcq *BuildCommitQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = bcq.Limit(2).IDs(ctx); err != nil { + if ids, err = bcq.Limit(2).IDs(setContextOp(ctx, bcq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -238,10 +238,12 @@ func (bcq *BuildCommitQuery) OnlyIDX(ctx context.Context) uuid.UUID { // All executes the query and returns a list of BuildCommits. func (bcq *BuildCommitQuery) All(ctx context.Context) ([]*BuildCommit, error) { + ctx = setContextOp(ctx, bcq.ctx, "All") if err := bcq.prepareQuery(ctx); err != nil { return nil, err } - return bcq.sqlAll(ctx) + qr := querierAll[[]*BuildCommit, *BuildCommitQuery]() + return withInterceptors[[]*BuildCommit](ctx, bcq, qr, bcq.inters) } // AllX is like All, but panics if an error occurs. @@ -254,9 +256,12 @@ func (bcq *BuildCommitQuery) AllX(ctx context.Context) []*BuildCommit { } // IDs executes the query and returns a list of BuildCommit IDs. -func (bcq *BuildCommitQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { - var ids []uuid.UUID - if err := bcq.Select(buildcommit.FieldID).Scan(ctx, &ids); err != nil { +func (bcq *BuildCommitQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if bcq.ctx.Unique == nil && bcq.path != nil { + bcq.Unique(true) + } + ctx = setContextOp(ctx, bcq.ctx, "IDs") + if err = bcq.Select(buildcommit.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -273,10 +278,11 @@ func (bcq *BuildCommitQuery) IDsX(ctx context.Context) []uuid.UUID { // Count returns the count of the given query. func (bcq *BuildCommitQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, bcq.ctx, "Count") if err := bcq.prepareQuery(ctx); err != nil { return 0, err } - return bcq.sqlCount(ctx) + return withInterceptors[int](ctx, bcq, querierCount[*BuildCommitQuery](), bcq.inters) } // CountX is like Count, but panics if an error occurs. @@ -290,10 +296,15 @@ func (bcq *BuildCommitQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (bcq *BuildCommitQuery) Exist(ctx context.Context) (bool, error) { - if err := bcq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, bcq.ctx, "Exist") + switch _, err := bcq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return bcq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -313,24 +324,23 @@ func (bcq *BuildCommitQuery) Clone() *BuildCommitQuery { } return &BuildCommitQuery{ config: bcq.config, - limit: bcq.limit, - offset: bcq.offset, - order: append([]OrderFunc{}, bcq.order...), + ctx: bcq.ctx.Clone(), + order: append([]buildcommit.OrderOption{}, bcq.order...), + inters: append([]Interceptor{}, bcq.inters...), predicates: append([]predicate.BuildCommit{}, bcq.predicates...), withBuildCommitToBuild: bcq.withBuildCommitToBuild.Clone(), withBuildCommitToServerTask: bcq.withBuildCommitToServerTask.Clone(), withBuildCommitToPlanDiffs: bcq.withBuildCommitToPlanDiffs.Clone(), // clone intermediate query. - sql: bcq.sql.Clone(), - path: bcq.path, - unique: bcq.unique, + sql: bcq.sql.Clone(), + path: bcq.path, } } // WithBuildCommitToBuild tells the query-builder to eager-load the nodes that are connected to // the "BuildCommitToBuild" edge. The optional arguments are used to configure the query builder of the edge. func (bcq *BuildCommitQuery) WithBuildCommitToBuild(opts ...func(*BuildQuery)) *BuildCommitQuery { - query := &BuildQuery{config: bcq.config} + query := (&BuildClient{config: bcq.config}).Query() for _, opt := range opts { opt(query) } @@ -341,7 +351,7 @@ func (bcq *BuildCommitQuery) WithBuildCommitToBuild(opts ...func(*BuildQuery)) * // WithBuildCommitToServerTask tells the query-builder to eager-load the nodes that are connected to // the "BuildCommitToServerTask" edge. The optional arguments are used to configure the query builder of the edge. func (bcq *BuildCommitQuery) WithBuildCommitToServerTask(opts ...func(*ServerTaskQuery)) *BuildCommitQuery { - query := &ServerTaskQuery{config: bcq.config} + query := (&ServerTaskClient{config: bcq.config}).Query() for _, opt := range opts { opt(query) } @@ -352,7 +362,7 @@ func (bcq *BuildCommitQuery) WithBuildCommitToServerTask(opts ...func(*ServerTas // WithBuildCommitToPlanDiffs tells the query-builder to eager-load the nodes that are connected to // the "BuildCommitToPlanDiffs" edge. The optional arguments are used to configure the query builder of the edge. func (bcq *BuildCommitQuery) WithBuildCommitToPlanDiffs(opts ...func(*PlanDiffQuery)) *BuildCommitQuery { - query := &PlanDiffQuery{config: bcq.config} + query := (&PlanDiffClient{config: bcq.config}).Query() for _, opt := range opts { opt(query) } @@ -374,17 +384,13 @@ func (bcq *BuildCommitQuery) WithBuildCommitToPlanDiffs(opts ...func(*PlanDiffQu // GroupBy(buildcommit.FieldType). // Aggregate(ent.Count()). // Scan(ctx, &v) -// func (bcq *BuildCommitQuery) GroupBy(field string, fields ...string) *BuildCommitGroupBy { - group := &BuildCommitGroupBy{config: bcq.config} - group.fields = append([]string{field}, fields...) - group.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := bcq.prepareQuery(ctx); err != nil { - return nil, err - } - return bcq.sqlQuery(ctx), nil - } - return group + bcq.ctx.Fields = append([]string{field}, fields...) + grbuild := &BuildCommitGroupBy{build: bcq} + grbuild.flds = &bcq.ctx.Fields + grbuild.label = buildcommit.Label + grbuild.scan = grbuild.Scan + return grbuild } // Select allows the selection one or more fields/columns for the given query, @@ -399,14 +405,31 @@ func (bcq *BuildCommitQuery) GroupBy(field string, fields ...string) *BuildCommi // client.BuildCommit.Query(). // Select(buildcommit.FieldType). // Scan(ctx, &v) -// func (bcq *BuildCommitQuery) Select(fields ...string) *BuildCommitSelect { - bcq.fields = append(bcq.fields, fields...) - return &BuildCommitSelect{BuildCommitQuery: bcq} + bcq.ctx.Fields = append(bcq.ctx.Fields, fields...) + sbuild := &BuildCommitSelect{BuildCommitQuery: bcq} + sbuild.label = buildcommit.Label + sbuild.flds, sbuild.scan = &bcq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a BuildCommitSelect configured with the given aggregations. +func (bcq *BuildCommitQuery) Aggregate(fns ...AggregateFunc) *BuildCommitSelect { + return bcq.Select().Aggregate(fns...) } func (bcq *BuildCommitQuery) prepareQuery(ctx context.Context) error { - for _, f := range bcq.fields { + for _, inter := range bcq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, bcq); err != nil { + return err + } + } + } + for _, f := range bcq.ctx.Fields { if !buildcommit.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -421,7 +444,7 @@ func (bcq *BuildCommitQuery) prepareQuery(ctx context.Context) error { return nil } -func (bcq *BuildCommitQuery) sqlAll(ctx context.Context) ([]*BuildCommit, error) { +func (bcq *BuildCommitQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*BuildCommit, error) { var ( nodes = []*BuildCommit{} withFKs = bcq.withFKs @@ -438,150 +461,189 @@ func (bcq *BuildCommitQuery) sqlAll(ctx context.Context) ([]*BuildCommit, error) if withFKs { _spec.Node.Columns = append(_spec.Node.Columns, buildcommit.ForeignKeys...) } - _spec.ScanValues = func(columns []string) ([]interface{}, error) { + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*BuildCommit).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { node := &BuildCommit{config: bcq.config} nodes = append(nodes, node) - return node.scanValues(columns) - } - _spec.Assign = func(columns []string, values []interface{}) error { - if len(nodes) == 0 { - return fmt.Errorf("ent: Assign called without calling ScanValues") - } - node := nodes[len(nodes)-1] node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(bcq.modifiers) > 0 { + _spec.Modifiers = bcq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } if err := sqlgraph.QueryNodes(ctx, bcq.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := bcq.withBuildCommitToBuild; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*BuildCommit) - for i := range nodes { - if nodes[i].build_commit_build_commit_to_build == nil { - continue - } - fk := *nodes[i].build_commit_build_commit_to_build - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + if err := bcq.loadBuildCommitToBuild(ctx, query, nodes, nil, + func(n *BuildCommit, e *Build) { n.Edges.BuildCommitToBuild = e }); err != nil { + return nil, err } - query.Where(build.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + } + if query := bcq.withBuildCommitToServerTask; query != nil { + if err := bcq.loadBuildCommitToServerTask(ctx, query, nodes, + func(n *BuildCommit) { n.Edges.BuildCommitToServerTask = []*ServerTask{} }, + func(n *BuildCommit, e *ServerTask) { + n.Edges.BuildCommitToServerTask = append(n.Edges.BuildCommitToServerTask, e) + }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "build_commit_build_commit_to_build" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.BuildCommitToBuild = n - } + } + if query := bcq.withBuildCommitToPlanDiffs; query != nil { + if err := bcq.loadBuildCommitToPlanDiffs(ctx, query, nodes, + func(n *BuildCommit) { n.Edges.BuildCommitToPlanDiffs = []*PlanDiff{} }, + func(n *BuildCommit, e *PlanDiff) { + n.Edges.BuildCommitToPlanDiffs = append(n.Edges.BuildCommitToPlanDiffs, e) + }); err != nil { + return nil, err } } - - if query := bcq.withBuildCommitToServerTask; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*BuildCommit) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.BuildCommitToServerTask = []*ServerTask{} + for name, query := range bcq.withNamedBuildCommitToServerTask { + if err := bcq.loadBuildCommitToServerTask(ctx, query, nodes, + func(n *BuildCommit) { n.appendNamedBuildCommitToServerTask(name) }, + func(n *BuildCommit, e *ServerTask) { n.appendNamedBuildCommitToServerTask(name, e) }); err != nil { + return nil, err } - query.withFKs = true - query.Where(predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.InValues(buildcommit.BuildCommitToServerTaskColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { + } + for name, query := range bcq.withNamedBuildCommitToPlanDiffs { + if err := bcq.loadBuildCommitToPlanDiffs(ctx, query, nodes, + func(n *BuildCommit) { n.appendNamedBuildCommitToPlanDiffs(name) }, + func(n *BuildCommit, e *PlanDiff) { n.appendNamedBuildCommitToPlanDiffs(name, e) }); err != nil { return nil, err } - for _, n := range neighbors { - fk := n.server_task_server_task_to_build_commit - if fk == nil { - return nil, fmt.Errorf(`foreign-key "server_task_server_task_to_build_commit" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "server_task_server_task_to_build_commit" returned %v for node %v`, *fk, n.ID) - } - node.Edges.BuildCommitToServerTask = append(node.Edges.BuildCommitToServerTask, n) + } + for i := range bcq.loadTotal { + if err := bcq.loadTotal[i](ctx, nodes); err != nil { + return nil, err } } + return nodes, nil +} - if query := bcq.withBuildCommitToPlanDiffs; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*BuildCommit) +func (bcq *BuildCommitQuery) loadBuildCommitToBuild(ctx context.Context, query *BuildQuery, nodes []*BuildCommit, init func(*BuildCommit), assign func(*BuildCommit, *Build)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*BuildCommit) + for i := range nodes { + if nodes[i].build_commit_build_commit_to_build == nil { + continue + } + fk := *nodes[i].build_commit_build_commit_to_build + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(build.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "build_commit_build_commit_to_build" returned %v`, n.ID) + } for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.BuildCommitToPlanDiffs = []*PlanDiff{} + assign(nodes[i], n) } - query.withFKs = true - query.Where(predicate.PlanDiff(func(s *sql.Selector) { - s.Where(sql.InValues(buildcommit.BuildCommitToPlanDiffsColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + } + return nil +} +func (bcq *BuildCommitQuery) loadBuildCommitToServerTask(ctx context.Context, query *ServerTaskQuery, nodes []*BuildCommit, init func(*BuildCommit), assign func(*BuildCommit, *ServerTask)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*BuildCommit) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) } - for _, n := range neighbors { - fk := n.plan_diff_plan_diff_to_build_commit - if fk == nil { - return nil, fmt.Errorf(`foreign-key "plan_diff_plan_diff_to_build_commit" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "plan_diff_plan_diff_to_build_commit" returned %v for node %v`, *fk, n.ID) - } - node.Edges.BuildCommitToPlanDiffs = append(node.Edges.BuildCommitToPlanDiffs, n) + } + query.withFKs = true + query.Where(predicate.ServerTask(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(buildcommit.BuildCommitToServerTaskColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.server_task_server_task_to_build_commit + if fk == nil { + return fmt.Errorf(`foreign-key "server_task_server_task_to_build_commit" is nil for node %v`, n.ID) } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "server_task_server_task_to_build_commit" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) } - - return nodes, nil + return nil +} +func (bcq *BuildCommitQuery) loadBuildCommitToPlanDiffs(ctx context.Context, query *PlanDiffQuery, nodes []*BuildCommit, init func(*BuildCommit), assign func(*BuildCommit, *PlanDiff)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*BuildCommit) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.PlanDiff(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(buildcommit.BuildCommitToPlanDiffsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.plan_diff_plan_diff_to_build_commit + if fk == nil { + return fmt.Errorf(`foreign-key "plan_diff_plan_diff_to_build_commit" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "plan_diff_plan_diff_to_build_commit" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil } func (bcq *BuildCommitQuery) sqlCount(ctx context.Context) (int, error) { _spec := bcq.querySpec() - _spec.Node.Columns = bcq.fields - if len(bcq.fields) > 0 { - _spec.Unique = bcq.unique != nil && *bcq.unique + if len(bcq.modifiers) > 0 { + _spec.Modifiers = bcq.modifiers } - return sqlgraph.CountNodes(ctx, bcq.driver, _spec) -} - -func (bcq *BuildCommitQuery) sqlExist(ctx context.Context) (bool, error) { - n, err := bcq.sqlCount(ctx) - if err != nil { - return false, fmt.Errorf("ent: check existence: %w", err) + _spec.Node.Columns = bcq.ctx.Fields + if len(bcq.ctx.Fields) > 0 { + _spec.Unique = bcq.ctx.Unique != nil && *bcq.ctx.Unique } - return n > 0, nil + return sqlgraph.CountNodes(ctx, bcq.driver, _spec) } func (bcq *BuildCommitQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: buildcommit.Table, - Columns: buildcommit.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: buildcommit.FieldID, - }, - }, - From: bcq.sql, - Unique: true, - } - if unique := bcq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(buildcommit.Table, buildcommit.Columns, sqlgraph.NewFieldSpec(buildcommit.FieldID, field.TypeUUID)) + _spec.From = bcq.sql + if unique := bcq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if bcq.path != nil { + _spec.Unique = true } - if fields := bcq.fields; len(fields) > 0 { + if fields := bcq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, buildcommit.FieldID) for i := range fields { @@ -597,10 +659,10 @@ func (bcq *BuildCommitQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := bcq.limit; limit != nil { + if limit := bcq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := bcq.offset; offset != nil { + if offset := bcq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := bcq.order; len(ps) > 0 { @@ -616,7 +678,7 @@ func (bcq *BuildCommitQuery) querySpec() *sqlgraph.QuerySpec { func (bcq *BuildCommitQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(bcq.driver.Dialect()) t1 := builder.Table(buildcommit.Table) - columns := bcq.fields + columns := bcq.ctx.Fields if len(columns) == 0 { columns = buildcommit.Columns } @@ -625,7 +687,7 @@ func (bcq *BuildCommitQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = bcq.sql selector.Select(selector.Columns(columns...)...) } - if bcq.unique != nil && *bcq.unique { + if bcq.ctx.Unique != nil && *bcq.ctx.Unique { selector.Distinct() } for _, p := range bcq.predicates { @@ -634,498 +696,128 @@ func (bcq *BuildCommitQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range bcq.order { p(selector) } - if offset := bcq.offset; offset != nil { + if offset := bcq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := bcq.limit; limit != nil { + if limit := bcq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector } -// BuildCommitGroupBy is the group-by builder for BuildCommit entities. -type BuildCommitGroupBy struct { - config - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) -} - -// Aggregate adds the given aggregation functions to the group-by query. -func (bcgb *BuildCommitGroupBy) Aggregate(fns ...AggregateFunc) *BuildCommitGroupBy { - bcgb.fns = append(bcgb.fns, fns...) - return bcgb -} - -// Scan applies the group-by query and scans the result into the given value. -func (bcgb *BuildCommitGroupBy) Scan(ctx context.Context, v interface{}) error { - query, err := bcgb.path(ctx) - if err != nil { - return err - } - bcgb.sql = query - return bcgb.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (bcgb *BuildCommitGroupBy) ScanX(ctx context.Context, v interface{}) { - if err := bcgb.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from group-by. -// It is only allowed when executing a group-by query with one field. -func (bcgb *BuildCommitGroupBy) Strings(ctx context.Context) ([]string, error) { - if len(bcgb.fields) > 1 { - return nil, errors.New("ent: BuildCommitGroupBy.Strings is not achievable when grouping more than 1 field") - } - var v []string - if err := bcgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (bcgb *BuildCommitGroupBy) StringsX(ctx context.Context) []string { - v, err := bcgb.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (bcgb *BuildCommitGroupBy) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = bcgb.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{buildcommit.Label} - default: - err = fmt.Errorf("ent: BuildCommitGroupBy.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (bcgb *BuildCommitGroupBy) StringX(ctx context.Context) string { - v, err := bcgb.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from group-by. -// It is only allowed when executing a group-by query with one field. -func (bcgb *BuildCommitGroupBy) Ints(ctx context.Context) ([]int, error) { - if len(bcgb.fields) > 1 { - return nil, errors.New("ent: BuildCommitGroupBy.Ints is not achievable when grouping more than 1 field") - } - var v []int - if err := bcgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (bcgb *BuildCommitGroupBy) IntsX(ctx context.Context) []int { - v, err := bcgb.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (bcgb *BuildCommitGroupBy) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = bcgb.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{buildcommit.Label} - default: - err = fmt.Errorf("ent: BuildCommitGroupBy.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (bcgb *BuildCommitGroupBy) IntX(ctx context.Context) int { - v, err := bcgb.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from group-by. -// It is only allowed when executing a group-by query with one field. -func (bcgb *BuildCommitGroupBy) Float64s(ctx context.Context) ([]float64, error) { - if len(bcgb.fields) > 1 { - return nil, errors.New("ent: BuildCommitGroupBy.Float64s is not achievable when grouping more than 1 field") - } - var v []float64 - if err := bcgb.Scan(ctx, &v); err != nil { - return nil, err +// WithNamedBuildCommitToServerTask tells the query-builder to eager-load the nodes that are connected to the "BuildCommitToServerTask" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (bcq *BuildCommitQuery) WithNamedBuildCommitToServerTask(name string, opts ...func(*ServerTaskQuery)) *BuildCommitQuery { + query := (&ServerTaskClient{config: bcq.config}).Query() + for _, opt := range opts { + opt(query) } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (bcgb *BuildCommitGroupBy) Float64sX(ctx context.Context) []float64 { - v, err := bcgb.Float64s(ctx) - if err != nil { - panic(err) + if bcq.withNamedBuildCommitToServerTask == nil { + bcq.withNamedBuildCommitToServerTask = make(map[string]*ServerTaskQuery) } - return v + bcq.withNamedBuildCommitToServerTask[name] = query + return bcq } -// Float64 returns a single float64 from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (bcgb *BuildCommitGroupBy) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = bcgb.Float64s(ctx); err != nil { - return +// WithNamedBuildCommitToPlanDiffs tells the query-builder to eager-load the nodes that are connected to the "BuildCommitToPlanDiffs" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (bcq *BuildCommitQuery) WithNamedBuildCommitToPlanDiffs(name string, opts ...func(*PlanDiffQuery)) *BuildCommitQuery { + query := (&PlanDiffClient{config: bcq.config}).Query() + for _, opt := range opts { + opt(query) } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{buildcommit.Label} - default: - err = fmt.Errorf("ent: BuildCommitGroupBy.Float64s returned %d results when one was expected", len(v)) + if bcq.withNamedBuildCommitToPlanDiffs == nil { + bcq.withNamedBuildCommitToPlanDiffs = make(map[string]*PlanDiffQuery) } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (bcgb *BuildCommitGroupBy) Float64X(ctx context.Context) float64 { - v, err := bcgb.Float64(ctx) - if err != nil { - panic(err) - } - return v + bcq.withNamedBuildCommitToPlanDiffs[name] = query + return bcq } -// Bools returns list of bools from group-by. -// It is only allowed when executing a group-by query with one field. -func (bcgb *BuildCommitGroupBy) Bools(ctx context.Context) ([]bool, error) { - if len(bcgb.fields) > 1 { - return nil, errors.New("ent: BuildCommitGroupBy.Bools is not achievable when grouping more than 1 field") - } - var v []bool - if err := bcgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil +// BuildCommitGroupBy is the group-by builder for BuildCommit entities. +type BuildCommitGroupBy struct { + selector + build *BuildCommitQuery } -// BoolsX is like Bools, but panics if an error occurs. -func (bcgb *BuildCommitGroupBy) BoolsX(ctx context.Context) []bool { - v, err := bcgb.Bools(ctx) - if err != nil { - panic(err) - } - return v +// Aggregate adds the given aggregation functions to the group-by query. +func (bcgb *BuildCommitGroupBy) Aggregate(fns ...AggregateFunc) *BuildCommitGroupBy { + bcgb.fns = append(bcgb.fns, fns...) + return bcgb } -// Bool returns a single bool from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (bcgb *BuildCommitGroupBy) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = bcgb.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{buildcommit.Label} - default: - err = fmt.Errorf("ent: BuildCommitGroupBy.Bools returned %d results when one was expected", len(v)) +// Scan applies the selector query and scans the result into the given value. +func (bcgb *BuildCommitGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, bcgb.build.ctx, "GroupBy") + if err := bcgb.build.prepareQuery(ctx); err != nil { + return err } - return + return scanWithInterceptors[*BuildCommitQuery, *BuildCommitGroupBy](ctx, bcgb.build, bcgb, bcgb.build.inters, v) } -// BoolX is like Bool, but panics if an error occurs. -func (bcgb *BuildCommitGroupBy) BoolX(ctx context.Context) bool { - v, err := bcgb.Bool(ctx) - if err != nil { - panic(err) +func (bcgb *BuildCommitGroupBy) sqlScan(ctx context.Context, root *BuildCommitQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(bcgb.fns)) + for _, fn := range bcgb.fns { + aggregation = append(aggregation, fn(selector)) } - return v -} - -func (bcgb *BuildCommitGroupBy) sqlScan(ctx context.Context, v interface{}) error { - for _, f := range bcgb.fields { - if !buildcommit.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*bcgb.flds)+len(bcgb.fns)) + for _, f := range *bcgb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := bcgb.sqlQuery() + selector.GroupBy(selector.Columns(*bcgb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := bcgb.driver.Query(ctx, query, args, rows); err != nil { + if err := bcgb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (bcgb *BuildCommitGroupBy) sqlQuery() *sql.Selector { - selector := bcgb.sql.Select() - aggregation := make([]string, 0, len(bcgb.fns)) - for _, fn := range bcgb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(bcgb.fields)+len(bcgb.fns)) - for _, f := range bcgb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(bcgb.fields...)...) -} - // BuildCommitSelect is the builder for selecting fields of BuildCommit entities. type BuildCommitSelect struct { *BuildCommitQuery - // intermediate query (i.e. traversal path). - sql *sql.Selector + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (bcs *BuildCommitSelect) Aggregate(fns ...AggregateFunc) *BuildCommitSelect { + bcs.fns = append(bcs.fns, fns...) + return bcs } // Scan applies the selector query and scans the result into the given value. -func (bcs *BuildCommitSelect) Scan(ctx context.Context, v interface{}) error { +func (bcs *BuildCommitSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, bcs.ctx, "Select") if err := bcs.prepareQuery(ctx); err != nil { return err } - bcs.sql = bcs.BuildCommitQuery.sqlQuery(ctx) - return bcs.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (bcs *BuildCommitSelect) ScanX(ctx context.Context, v interface{}) { - if err := bcs.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from a selector. It is only allowed when selecting one field. -func (bcs *BuildCommitSelect) Strings(ctx context.Context) ([]string, error) { - if len(bcs.fields) > 1 { - return nil, errors.New("ent: BuildCommitSelect.Strings is not achievable when selecting more than 1 field") - } - var v []string - if err := bcs.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (bcs *BuildCommitSelect) StringsX(ctx context.Context) []string { - v, err := bcs.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a selector. It is only allowed when selecting one field. -func (bcs *BuildCommitSelect) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = bcs.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{buildcommit.Label} - default: - err = fmt.Errorf("ent: BuildCommitSelect.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (bcs *BuildCommitSelect) StringX(ctx context.Context) string { - v, err := bcs.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from a selector. It is only allowed when selecting one field. -func (bcs *BuildCommitSelect) Ints(ctx context.Context) ([]int, error) { - if len(bcs.fields) > 1 { - return nil, errors.New("ent: BuildCommitSelect.Ints is not achievable when selecting more than 1 field") - } - var v []int - if err := bcs.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (bcs *BuildCommitSelect) IntsX(ctx context.Context) []int { - v, err := bcs.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a selector. It is only allowed when selecting one field. -func (bcs *BuildCommitSelect) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = bcs.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{buildcommit.Label} - default: - err = fmt.Errorf("ent: BuildCommitSelect.Ints returned %d results when one was expected", len(v)) - } - return + return scanWithInterceptors[*BuildCommitQuery, *BuildCommitSelect](ctx, bcs.BuildCommitQuery, bcs, bcs.inters, v) } -// IntX is like Int, but panics if an error occurs. -func (bcs *BuildCommitSelect) IntX(ctx context.Context) int { - v, err := bcs.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. -func (bcs *BuildCommitSelect) Float64s(ctx context.Context) ([]float64, error) { - if len(bcs.fields) > 1 { - return nil, errors.New("ent: BuildCommitSelect.Float64s is not achievable when selecting more than 1 field") - } - var v []float64 - if err := bcs.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (bcs *BuildCommitSelect) Float64sX(ctx context.Context) []float64 { - v, err := bcs.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. -func (bcs *BuildCommitSelect) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = bcs.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{buildcommit.Label} - default: - err = fmt.Errorf("ent: BuildCommitSelect.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (bcs *BuildCommitSelect) Float64X(ctx context.Context) float64 { - v, err := bcs.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from a selector. It is only allowed when selecting one field. -func (bcs *BuildCommitSelect) Bools(ctx context.Context) ([]bool, error) { - if len(bcs.fields) > 1 { - return nil, errors.New("ent: BuildCommitSelect.Bools is not achievable when selecting more than 1 field") - } - var v []bool - if err := bcs.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (bcs *BuildCommitSelect) BoolsX(ctx context.Context) []bool { - v, err := bcs.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a selector. It is only allowed when selecting one field. -func (bcs *BuildCommitSelect) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = bcs.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{buildcommit.Label} - default: - err = fmt.Errorf("ent: BuildCommitSelect.Bools returned %d results when one was expected", len(v)) +func (bcs *BuildCommitSelect) sqlScan(ctx context.Context, root *BuildCommitQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(bcs.fns)) + for _, fn := range bcs.fns { + aggregation = append(aggregation, fn(selector)) } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (bcs *BuildCommitSelect) BoolX(ctx context.Context) bool { - v, err := bcs.Bool(ctx) - if err != nil { - panic(err) + switch n := len(*bcs.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) } - return v -} - -func (bcs *BuildCommitSelect) sqlScan(ctx context.Context, v interface{}) error { rows := &sql.Rows{} - query, args := bcs.sql.Query() + query, args := selector.Query() if err := bcs.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/ent/buildcommit_update.go b/ent/buildcommit_update.go index 4d698e98..e59b442b 100755 --- a/ent/buildcommit_update.go +++ b/ent/buildcommit_update.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -38,6 +38,14 @@ func (bcu *BuildCommitUpdate) SetType(b buildcommit.Type) *BuildCommitUpdate { return bcu } +// SetNillableType sets the "type" field if the given value is not nil. +func (bcu *BuildCommitUpdate) SetNillableType(b *buildcommit.Type) *BuildCommitUpdate { + if b != nil { + bcu.SetType(*b) + } + return bcu +} + // SetRevision sets the "revision" field. func (bcu *BuildCommitUpdate) SetRevision(i int) *BuildCommitUpdate { bcu.mutation.ResetRevision() @@ -45,6 +53,14 @@ func (bcu *BuildCommitUpdate) SetRevision(i int) *BuildCommitUpdate { return bcu } +// SetNillableRevision sets the "revision" field if the given value is not nil. +func (bcu *BuildCommitUpdate) SetNillableRevision(i *int) *BuildCommitUpdate { + if i != nil { + bcu.SetRevision(*i) + } + return bcu +} + // AddRevision adds i to the "revision" field. func (bcu *BuildCommitUpdate) AddRevision(i int) *BuildCommitUpdate { bcu.mutation.AddRevision(i) @@ -57,6 +73,14 @@ func (bcu *BuildCommitUpdate) SetState(b buildcommit.State) *BuildCommitUpdate { return bcu } +// SetNillableState sets the "state" field if the given value is not nil. +func (bcu *BuildCommitUpdate) SetNillableState(b *buildcommit.State) *BuildCommitUpdate { + if b != nil { + bcu.SetState(*b) + } + return bcu +} + // SetCreatedAt sets the "created_at" field. func (bcu *BuildCommitUpdate) SetCreatedAt(t time.Time) *BuildCommitUpdate { bcu.mutation.SetCreatedAt(t) @@ -167,40 +191,7 @@ func (bcu *BuildCommitUpdate) RemoveBuildCommitToPlanDiffs(p ...*PlanDiff) *Buil // Save executes the query and returns the number of nodes affected by the update operation. func (bcu *BuildCommitUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(bcu.hooks) == 0 { - if err = bcu.check(); err != nil { - return 0, err - } - affected, err = bcu.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*BuildCommitMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = bcu.check(); err != nil { - return 0, err - } - bcu.mutation = mutation - affected, err = bcu.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(bcu.hooks) - 1; i >= 0; i-- { - if bcu.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = bcu.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, bcu.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, bcu.sqlSave, bcu.mutation, bcu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -244,16 +235,10 @@ func (bcu *BuildCommitUpdate) check() error { } func (bcu *BuildCommitUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: buildcommit.Table, - Columns: buildcommit.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: buildcommit.FieldID, - }, - }, + if err := bcu.check(); err != nil { + return n, err } + _spec := sqlgraph.NewUpdateSpec(buildcommit.Table, buildcommit.Columns, sqlgraph.NewFieldSpec(buildcommit.FieldID, field.TypeUUID)) if ps := bcu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -262,39 +247,19 @@ func (bcu *BuildCommitUpdate) sqlSave(ctx context.Context) (n int, err error) { } } if value, ok := bcu.mutation.GetType(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: buildcommit.FieldType, - }) + _spec.SetField(buildcommit.FieldType, field.TypeEnum, value) } if value, ok := bcu.mutation.Revision(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: buildcommit.FieldRevision, - }) + _spec.SetField(buildcommit.FieldRevision, field.TypeInt, value) } if value, ok := bcu.mutation.AddedRevision(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: buildcommit.FieldRevision, - }) + _spec.AddField(buildcommit.FieldRevision, field.TypeInt, value) } if value, ok := bcu.mutation.State(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: buildcommit.FieldState, - }) + _spec.SetField(buildcommit.FieldState, field.TypeEnum, value) } if value, ok := bcu.mutation.CreatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: buildcommit.FieldCreatedAt, - }) + _spec.SetField(buildcommit.FieldCreatedAt, field.TypeTime, value) } if bcu.mutation.BuildCommitToBuildCleared() { edge := &sqlgraph.EdgeSpec{ @@ -304,10 +269,7 @@ func (bcu *BuildCommitUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{buildcommit.BuildCommitToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -320,10 +282,7 @@ func (bcu *BuildCommitUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{buildcommit.BuildCommitToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -339,10 +298,7 @@ func (bcu *BuildCommitUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{buildcommit.BuildCommitToServerTaskColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: servertask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(servertask.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -355,10 +311,7 @@ func (bcu *BuildCommitUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{buildcommit.BuildCommitToServerTaskColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: servertask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(servertask.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -374,10 +327,7 @@ func (bcu *BuildCommitUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{buildcommit.BuildCommitToServerTaskColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: servertask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(servertask.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -393,10 +343,7 @@ func (bcu *BuildCommitUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{buildcommit.BuildCommitToPlanDiffsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plandiff.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plandiff.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -409,10 +356,7 @@ func (bcu *BuildCommitUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{buildcommit.BuildCommitToPlanDiffsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plandiff.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plandiff.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -428,10 +372,7 @@ func (bcu *BuildCommitUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{buildcommit.BuildCommitToPlanDiffsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plandiff.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plandiff.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -443,10 +384,11 @@ func (bcu *BuildCommitUpdate) sqlSave(ctx context.Context) (n int, err error) { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{buildcommit.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return 0, err } + bcu.mutation.done = true return n, nil } @@ -464,6 +406,14 @@ func (bcuo *BuildCommitUpdateOne) SetType(b buildcommit.Type) *BuildCommitUpdate return bcuo } +// SetNillableType sets the "type" field if the given value is not nil. +func (bcuo *BuildCommitUpdateOne) SetNillableType(b *buildcommit.Type) *BuildCommitUpdateOne { + if b != nil { + bcuo.SetType(*b) + } + return bcuo +} + // SetRevision sets the "revision" field. func (bcuo *BuildCommitUpdateOne) SetRevision(i int) *BuildCommitUpdateOne { bcuo.mutation.ResetRevision() @@ -471,6 +421,14 @@ func (bcuo *BuildCommitUpdateOne) SetRevision(i int) *BuildCommitUpdateOne { return bcuo } +// SetNillableRevision sets the "revision" field if the given value is not nil. +func (bcuo *BuildCommitUpdateOne) SetNillableRevision(i *int) *BuildCommitUpdateOne { + if i != nil { + bcuo.SetRevision(*i) + } + return bcuo +} + // AddRevision adds i to the "revision" field. func (bcuo *BuildCommitUpdateOne) AddRevision(i int) *BuildCommitUpdateOne { bcuo.mutation.AddRevision(i) @@ -483,6 +441,14 @@ func (bcuo *BuildCommitUpdateOne) SetState(b buildcommit.State) *BuildCommitUpda return bcuo } +// SetNillableState sets the "state" field if the given value is not nil. +func (bcuo *BuildCommitUpdateOne) SetNillableState(b *buildcommit.State) *BuildCommitUpdateOne { + if b != nil { + bcuo.SetState(*b) + } + return bcuo +} + // SetCreatedAt sets the "created_at" field. func (bcuo *BuildCommitUpdateOne) SetCreatedAt(t time.Time) *BuildCommitUpdateOne { bcuo.mutation.SetCreatedAt(t) @@ -591,6 +557,12 @@ func (bcuo *BuildCommitUpdateOne) RemoveBuildCommitToPlanDiffs(p ...*PlanDiff) * return bcuo.RemoveBuildCommitToPlanDiffIDs(ids...) } +// Where appends a list predicates to the BuildCommitUpdate builder. +func (bcuo *BuildCommitUpdateOne) Where(ps ...predicate.BuildCommit) *BuildCommitUpdateOne { + bcuo.mutation.Where(ps...) + return bcuo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (bcuo *BuildCommitUpdateOne) Select(field string, fields ...string) *BuildCommitUpdateOne { @@ -600,40 +572,7 @@ func (bcuo *BuildCommitUpdateOne) Select(field string, fields ...string) *BuildC // Save executes the query and returns the updated BuildCommit entity. func (bcuo *BuildCommitUpdateOne) Save(ctx context.Context) (*BuildCommit, error) { - var ( - err error - node *BuildCommit - ) - if len(bcuo.hooks) == 0 { - if err = bcuo.check(); err != nil { - return nil, err - } - node, err = bcuo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*BuildCommitMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = bcuo.check(); err != nil { - return nil, err - } - bcuo.mutation = mutation - node, err = bcuo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(bcuo.hooks) - 1; i >= 0; i-- { - if bcuo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = bcuo.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, bcuo.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, bcuo.sqlSave, bcuo.mutation, bcuo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -677,16 +616,10 @@ func (bcuo *BuildCommitUpdateOne) check() error { } func (bcuo *BuildCommitUpdateOne) sqlSave(ctx context.Context) (_node *BuildCommit, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: buildcommit.Table, - Columns: buildcommit.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: buildcommit.FieldID, - }, - }, + if err := bcuo.check(); err != nil { + return _node, err } + _spec := sqlgraph.NewUpdateSpec(buildcommit.Table, buildcommit.Columns, sqlgraph.NewFieldSpec(buildcommit.FieldID, field.TypeUUID)) id, ok := bcuo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "BuildCommit.id" for update`)} @@ -712,39 +645,19 @@ func (bcuo *BuildCommitUpdateOne) sqlSave(ctx context.Context) (_node *BuildComm } } if value, ok := bcuo.mutation.GetType(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: buildcommit.FieldType, - }) + _spec.SetField(buildcommit.FieldType, field.TypeEnum, value) } if value, ok := bcuo.mutation.Revision(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: buildcommit.FieldRevision, - }) + _spec.SetField(buildcommit.FieldRevision, field.TypeInt, value) } if value, ok := bcuo.mutation.AddedRevision(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: buildcommit.FieldRevision, - }) + _spec.AddField(buildcommit.FieldRevision, field.TypeInt, value) } if value, ok := bcuo.mutation.State(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: buildcommit.FieldState, - }) + _spec.SetField(buildcommit.FieldState, field.TypeEnum, value) } if value, ok := bcuo.mutation.CreatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: buildcommit.FieldCreatedAt, - }) + _spec.SetField(buildcommit.FieldCreatedAt, field.TypeTime, value) } if bcuo.mutation.BuildCommitToBuildCleared() { edge := &sqlgraph.EdgeSpec{ @@ -754,10 +667,7 @@ func (bcuo *BuildCommitUpdateOne) sqlSave(ctx context.Context) (_node *BuildComm Columns: []string{buildcommit.BuildCommitToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -770,10 +680,7 @@ func (bcuo *BuildCommitUpdateOne) sqlSave(ctx context.Context) (_node *BuildComm Columns: []string{buildcommit.BuildCommitToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -789,10 +696,7 @@ func (bcuo *BuildCommitUpdateOne) sqlSave(ctx context.Context) (_node *BuildComm Columns: []string{buildcommit.BuildCommitToServerTaskColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: servertask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(servertask.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -805,10 +709,7 @@ func (bcuo *BuildCommitUpdateOne) sqlSave(ctx context.Context) (_node *BuildComm Columns: []string{buildcommit.BuildCommitToServerTaskColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: servertask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(servertask.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -824,10 +725,7 @@ func (bcuo *BuildCommitUpdateOne) sqlSave(ctx context.Context) (_node *BuildComm Columns: []string{buildcommit.BuildCommitToServerTaskColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: servertask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(servertask.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -843,10 +741,7 @@ func (bcuo *BuildCommitUpdateOne) sqlSave(ctx context.Context) (_node *BuildComm Columns: []string{buildcommit.BuildCommitToPlanDiffsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plandiff.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plandiff.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -859,10 +754,7 @@ func (bcuo *BuildCommitUpdateOne) sqlSave(ctx context.Context) (_node *BuildComm Columns: []string{buildcommit.BuildCommitToPlanDiffsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plandiff.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plandiff.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -878,10 +770,7 @@ func (bcuo *BuildCommitUpdateOne) sqlSave(ctx context.Context) (_node *BuildComm Columns: []string{buildcommit.BuildCommitToPlanDiffsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plandiff.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plandiff.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -896,9 +785,10 @@ func (bcuo *BuildCommitUpdateOne) sqlSave(ctx context.Context) (_node *BuildComm if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{buildcommit.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } + bcuo.mutation.done = true return _node, nil } diff --git a/ent/client.go b/ent/client.go index 3ea27c63..3adc4938 100755 --- a/ent/client.go +++ b/ent/client.go @@ -1,15 +1,21 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" + "errors" "fmt" "log" + "reflect" "github.com/gen0cide/laforge/ent/migrate" "github.com/google/uuid" + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/gen0cide/laforge/ent/adhocplan" "github.com/gen0cide/laforge/ent/agentstatus" "github.com/gen0cide/laforge/ent/agenttask" @@ -47,10 +53,6 @@ import ( "github.com/gen0cide/laforge/ent/team" "github.com/gen0cide/laforge/ent/token" "github.com/gen0cide/laforge/ent/user" - - "entgo.io/ent/dialect" - "entgo.io/ent/dialect/sql" - "entgo.io/ent/dialect/sql/sqlgraph" ) // Client is the client that holds all ent builders. @@ -136,9 +138,7 @@ type Client struct { // NewClient creates a new client configured with the given options. func NewClient(opts ...Option) *Client { - cfg := config{log: log.Println, hooks: &hooks{}} - cfg.options(opts...) - client := &Client{config: cfg} + client := &Client{config: newConfig(opts...)} client.init() return client } @@ -184,6 +184,62 @@ func (c *Client) init() { c.User = NewUserClient(c.config) } +type ( + // config is the configuration for the client and its builder. + config struct { + // driver used for executing database requests. + driver dialect.Driver + // debug enable a debug logging. + debug bool + // log used for logging on debug mode. + log func(...any) + // hooks to execute on mutations. + hooks *hooks + // interceptors to execute on queries. + inters *inters + } + // Option function to configure the client. + Option func(*config) +) + +// newConfig creates a new config for the client. +func newConfig(opts ...Option) config { + cfg := config{log: log.Println, hooks: &hooks{}, inters: &inters{}} + cfg.options(opts...) + return cfg +} + +// options applies the options on the config object. +func (c *config) options(opts ...Option) { + for _, opt := range opts { + opt(c) + } + if c.debug { + c.driver = dialect.Debug(c.driver, c.log) + } +} + +// Debug enables debug logging on the ent.Driver. +func Debug() Option { + return func(c *config) { + c.debug = true + } +} + +// Log sets the logging function for debug mode. +func Log(fn func(...any)) Option { + return func(c *config) { + c.log = fn + } +} + +// Driver configures the client driver. +func Driver(driver dialect.Driver) Option { + return func(c *config) { + c.driver = driver + } +} + // Open opens a database/sql.DB specified by the driver name and // the data source name, and returns a new client attached to it. // Optional parameters can be added for configuring the client. @@ -200,11 +256,14 @@ func Open(driverName, dataSourceName string, options ...Option) (*Client, error) } } +// ErrTxStarted is returned when trying to start a new transaction from a transactional client. +var ErrTxStarted = errors.New("ent: cannot start a transaction within a transaction") + // Tx returns a new transactional client. The provided context // is used until the transaction is committed or rolled back. func (c *Client) Tx(ctx context.Context) (*Tx, error) { if _, ok := c.driver.(*txDriver); ok { - return nil, fmt.Errorf("ent: cannot start a transaction within a transaction") + return nil, ErrTxStarted } tx, err := newTx(ctx, c.driver) if err != nil { @@ -258,7 +317,7 @@ func (c *Client) Tx(ctx context.Context) (*Tx, error) { // BeginTx returns a transactional client with specified options. func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) { if _, ok := c.driver.(*txDriver); ok { - return nil, fmt.Errorf("ent: cannot start a transaction within a transaction") + return nil, errors.New("ent: cannot start a transaction within a transaction") } tx, err := c.driver.(interface { BeginTx(context.Context, *sql.TxOptions) (dialect.Tx, error) @@ -317,7 +376,6 @@ func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) // AdhocPlan. // Query(). // Count(ctx) -// func (c *Client) Debug() *Client { if c.debug { return c @@ -337,43 +395,115 @@ func (c *Client) Close() error { // Use adds the mutation hooks to all the entity clients. // In order to add hooks to a specific client, call: `client.Node.Use(...)`. func (c *Client) Use(hooks ...Hook) { - c.AdhocPlan.Use(hooks...) - c.AgentStatus.Use(hooks...) - c.AgentTask.Use(hooks...) - c.Ansible.Use(hooks...) - c.AuthUser.Use(hooks...) - c.Build.Use(hooks...) - c.BuildCommit.Use(hooks...) - c.Command.Use(hooks...) - c.Competition.Use(hooks...) - c.DNS.Use(hooks...) - c.DNSRecord.Use(hooks...) - c.Disk.Use(hooks...) - c.Environment.Use(hooks...) - c.FileDelete.Use(hooks...) - c.FileDownload.Use(hooks...) - c.FileExtract.Use(hooks...) - c.Finding.Use(hooks...) - c.GinFileMiddleware.Use(hooks...) - c.Host.Use(hooks...) - c.HostDependency.Use(hooks...) - c.Identity.Use(hooks...) - c.IncludedNetwork.Use(hooks...) - c.Network.Use(hooks...) - c.Plan.Use(hooks...) - c.PlanDiff.Use(hooks...) - c.ProvisionedHost.Use(hooks...) - c.ProvisionedNetwork.Use(hooks...) - c.ProvisioningStep.Use(hooks...) - c.RepoCommit.Use(hooks...) - c.Repository.Use(hooks...) - c.Script.Use(hooks...) - c.ServerTask.Use(hooks...) - c.Status.Use(hooks...) - c.Tag.Use(hooks...) - c.Team.Use(hooks...) - c.Token.Use(hooks...) - c.User.Use(hooks...) + for _, n := range []interface{ Use(...Hook) }{ + c.AdhocPlan, c.AgentStatus, c.AgentTask, c.Ansible, c.AuthUser, c.Build, + c.BuildCommit, c.Command, c.Competition, c.DNS, c.DNSRecord, c.Disk, + c.Environment, c.FileDelete, c.FileDownload, c.FileExtract, c.Finding, + c.GinFileMiddleware, c.Host, c.HostDependency, c.Identity, c.IncludedNetwork, + c.Network, c.Plan, c.PlanDiff, c.ProvisionedHost, c.ProvisionedNetwork, + c.ProvisioningStep, c.RepoCommit, c.Repository, c.Script, c.ServerTask, + c.Status, c.Tag, c.Team, c.Token, c.User, + } { + n.Use(hooks...) + } +} + +// Intercept adds the query interceptors to all the entity clients. +// In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`. +func (c *Client) Intercept(interceptors ...Interceptor) { + for _, n := range []interface{ Intercept(...Interceptor) }{ + c.AdhocPlan, c.AgentStatus, c.AgentTask, c.Ansible, c.AuthUser, c.Build, + c.BuildCommit, c.Command, c.Competition, c.DNS, c.DNSRecord, c.Disk, + c.Environment, c.FileDelete, c.FileDownload, c.FileExtract, c.Finding, + c.GinFileMiddleware, c.Host, c.HostDependency, c.Identity, c.IncludedNetwork, + c.Network, c.Plan, c.PlanDiff, c.ProvisionedHost, c.ProvisionedNetwork, + c.ProvisioningStep, c.RepoCommit, c.Repository, c.Script, c.ServerTask, + c.Status, c.Tag, c.Team, c.Token, c.User, + } { + n.Intercept(interceptors...) + } +} + +// Mutate implements the ent.Mutator interface. +func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) { + switch m := m.(type) { + case *AdhocPlanMutation: + return c.AdhocPlan.mutate(ctx, m) + case *AgentStatusMutation: + return c.AgentStatus.mutate(ctx, m) + case *AgentTaskMutation: + return c.AgentTask.mutate(ctx, m) + case *AnsibleMutation: + return c.Ansible.mutate(ctx, m) + case *AuthUserMutation: + return c.AuthUser.mutate(ctx, m) + case *BuildMutation: + return c.Build.mutate(ctx, m) + case *BuildCommitMutation: + return c.BuildCommit.mutate(ctx, m) + case *CommandMutation: + return c.Command.mutate(ctx, m) + case *CompetitionMutation: + return c.Competition.mutate(ctx, m) + case *DNSMutation: + return c.DNS.mutate(ctx, m) + case *DNSRecordMutation: + return c.DNSRecord.mutate(ctx, m) + case *DiskMutation: + return c.Disk.mutate(ctx, m) + case *EnvironmentMutation: + return c.Environment.mutate(ctx, m) + case *FileDeleteMutation: + return c.FileDelete.mutate(ctx, m) + case *FileDownloadMutation: + return c.FileDownload.mutate(ctx, m) + case *FileExtractMutation: + return c.FileExtract.mutate(ctx, m) + case *FindingMutation: + return c.Finding.mutate(ctx, m) + case *GinFileMiddlewareMutation: + return c.GinFileMiddleware.mutate(ctx, m) + case *HostMutation: + return c.Host.mutate(ctx, m) + case *HostDependencyMutation: + return c.HostDependency.mutate(ctx, m) + case *IdentityMutation: + return c.Identity.mutate(ctx, m) + case *IncludedNetworkMutation: + return c.IncludedNetwork.mutate(ctx, m) + case *NetworkMutation: + return c.Network.mutate(ctx, m) + case *PlanMutation: + return c.Plan.mutate(ctx, m) + case *PlanDiffMutation: + return c.PlanDiff.mutate(ctx, m) + case *ProvisionedHostMutation: + return c.ProvisionedHost.mutate(ctx, m) + case *ProvisionedNetworkMutation: + return c.ProvisionedNetwork.mutate(ctx, m) + case *ProvisioningStepMutation: + return c.ProvisioningStep.mutate(ctx, m) + case *RepoCommitMutation: + return c.RepoCommit.mutate(ctx, m) + case *RepositoryMutation: + return c.Repository.mutate(ctx, m) + case *ScriptMutation: + return c.Script.mutate(ctx, m) + case *ServerTaskMutation: + return c.ServerTask.mutate(ctx, m) + case *StatusMutation: + return c.Status.mutate(ctx, m) + case *TagMutation: + return c.Tag.mutate(ctx, m) + case *TeamMutation: + return c.Team.mutate(ctx, m) + case *TokenMutation: + return c.Token.mutate(ctx, m) + case *UserMutation: + return c.User.mutate(ctx, m) + default: + return nil, fmt.Errorf("ent: unknown mutation type %T", m) + } } // AdhocPlanClient is a client for the AdhocPlan schema. @@ -392,7 +522,13 @@ func (c *AdhocPlanClient) Use(hooks ...Hook) { c.hooks.AdhocPlan = append(c.hooks.AdhocPlan, hooks...) } -// Create returns a create builder for AdhocPlan. +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `adhocplan.Intercept(f(g(h())))`. +func (c *AdhocPlanClient) Intercept(interceptors ...Interceptor) { + c.inters.AdhocPlan = append(c.inters.AdhocPlan, interceptors...) +} + +// Create returns a builder for creating a AdhocPlan entity. func (c *AdhocPlanClient) Create() *AdhocPlanCreate { mutation := newAdhocPlanMutation(c.config, OpCreate) return &AdhocPlanCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} @@ -403,6 +539,21 @@ func (c *AdhocPlanClient) CreateBulk(builders ...*AdhocPlanCreate) *AdhocPlanCre return &AdhocPlanCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *AdhocPlanClient) MapCreateBulk(slice any, setFunc func(*AdhocPlanCreate, int)) *AdhocPlanCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &AdhocPlanCreateBulk{err: fmt.Errorf("calling to AdhocPlanClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*AdhocPlanCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &AdhocPlanCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for AdhocPlan. func (c *AdhocPlanClient) Update() *AdhocPlanUpdate { mutation := newAdhocPlanMutation(c.config, OpUpdate) @@ -427,12 +578,12 @@ func (c *AdhocPlanClient) Delete() *AdhocPlanDelete { return &AdhocPlanDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} } -// DeleteOne returns a delete builder for the given entity. +// DeleteOne returns a builder for deleting the given entity. func (c *AdhocPlanClient) DeleteOne(ap *AdhocPlan) *AdhocPlanDeleteOne { return c.DeleteOneID(ap.ID) } -// DeleteOneID returns a delete builder for the given id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *AdhocPlanClient) DeleteOneID(id uuid.UUID) *AdhocPlanDeleteOne { builder := c.Delete().Where(adhocplan.ID(id)) builder.mutation.id = &id @@ -444,6 +595,8 @@ func (c *AdhocPlanClient) DeleteOneID(id uuid.UUID) *AdhocPlanDeleteOne { func (c *AdhocPlanClient) Query() *AdhocPlanQuery { return &AdhocPlanQuery{ config: c.config, + ctx: &QueryContext{Type: TypeAdhocPlan}, + inters: c.Interceptors(), } } @@ -463,8 +616,8 @@ func (c *AdhocPlanClient) GetX(ctx context.Context, id uuid.UUID) *AdhocPlan { // QueryPrevAdhocPlan queries the PrevAdhocPlan edge of a AdhocPlan. func (c *AdhocPlanClient) QueryPrevAdhocPlan(ap *AdhocPlan) *AdhocPlanQuery { - query := &AdhocPlanQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&AdhocPlanClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := ap.ID step := sqlgraph.NewStep( sqlgraph.From(adhocplan.Table, adhocplan.FieldID, id), @@ -479,8 +632,8 @@ func (c *AdhocPlanClient) QueryPrevAdhocPlan(ap *AdhocPlan) *AdhocPlanQuery { // QueryNextAdhocPlan queries the NextAdhocPlan edge of a AdhocPlan. func (c *AdhocPlanClient) QueryNextAdhocPlan(ap *AdhocPlan) *AdhocPlanQuery { - query := &AdhocPlanQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&AdhocPlanClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := ap.ID step := sqlgraph.NewStep( sqlgraph.From(adhocplan.Table, adhocplan.FieldID, id), @@ -495,8 +648,8 @@ func (c *AdhocPlanClient) QueryNextAdhocPlan(ap *AdhocPlan) *AdhocPlanQuery { // QueryAdhocPlanToBuild queries the AdhocPlanToBuild edge of a AdhocPlan. func (c *AdhocPlanClient) QueryAdhocPlanToBuild(ap *AdhocPlan) *BuildQuery { - query := &BuildQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&BuildClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := ap.ID step := sqlgraph.NewStep( sqlgraph.From(adhocplan.Table, adhocplan.FieldID, id), @@ -511,8 +664,8 @@ func (c *AdhocPlanClient) QueryAdhocPlanToBuild(ap *AdhocPlan) *BuildQuery { // QueryAdhocPlanToStatus queries the AdhocPlanToStatus edge of a AdhocPlan. func (c *AdhocPlanClient) QueryAdhocPlanToStatus(ap *AdhocPlan) *StatusQuery { - query := &StatusQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&StatusClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := ap.ID step := sqlgraph.NewStep( sqlgraph.From(adhocplan.Table, adhocplan.FieldID, id), @@ -527,8 +680,8 @@ func (c *AdhocPlanClient) QueryAdhocPlanToStatus(ap *AdhocPlan) *StatusQuery { // QueryAdhocPlanToAgentTask queries the AdhocPlanToAgentTask edge of a AdhocPlan. func (c *AdhocPlanClient) QueryAdhocPlanToAgentTask(ap *AdhocPlan) *AgentTaskQuery { - query := &AgentTaskQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&AgentTaskClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := ap.ID step := sqlgraph.NewStep( sqlgraph.From(adhocplan.Table, adhocplan.FieldID, id), @@ -546,6 +699,26 @@ func (c *AdhocPlanClient) Hooks() []Hook { return c.hooks.AdhocPlan } +// Interceptors returns the client interceptors. +func (c *AdhocPlanClient) Interceptors() []Interceptor { + return c.inters.AdhocPlan +} + +func (c *AdhocPlanClient) mutate(ctx context.Context, m *AdhocPlanMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&AdhocPlanCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&AdhocPlanUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&AdhocPlanUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&AdhocPlanDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown AdhocPlan mutation op: %q", m.Op()) + } +} + // AgentStatusClient is a client for the AgentStatus schema. type AgentStatusClient struct { config @@ -562,7 +735,13 @@ func (c *AgentStatusClient) Use(hooks ...Hook) { c.hooks.AgentStatus = append(c.hooks.AgentStatus, hooks...) } -// Create returns a create builder for AgentStatus. +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `agentstatus.Intercept(f(g(h())))`. +func (c *AgentStatusClient) Intercept(interceptors ...Interceptor) { + c.inters.AgentStatus = append(c.inters.AgentStatus, interceptors...) +} + +// Create returns a builder for creating a AgentStatus entity. func (c *AgentStatusClient) Create() *AgentStatusCreate { mutation := newAgentStatusMutation(c.config, OpCreate) return &AgentStatusCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} @@ -573,6 +752,21 @@ func (c *AgentStatusClient) CreateBulk(builders ...*AgentStatusCreate) *AgentSta return &AgentStatusCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *AgentStatusClient) MapCreateBulk(slice any, setFunc func(*AgentStatusCreate, int)) *AgentStatusCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &AgentStatusCreateBulk{err: fmt.Errorf("calling to AgentStatusClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*AgentStatusCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &AgentStatusCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for AgentStatus. func (c *AgentStatusClient) Update() *AgentStatusUpdate { mutation := newAgentStatusMutation(c.config, OpUpdate) @@ -597,12 +791,12 @@ func (c *AgentStatusClient) Delete() *AgentStatusDelete { return &AgentStatusDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} } -// DeleteOne returns a delete builder for the given entity. +// DeleteOne returns a builder for deleting the given entity. func (c *AgentStatusClient) DeleteOne(as *AgentStatus) *AgentStatusDeleteOne { return c.DeleteOneID(as.ID) } -// DeleteOneID returns a delete builder for the given id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *AgentStatusClient) DeleteOneID(id uuid.UUID) *AgentStatusDeleteOne { builder := c.Delete().Where(agentstatus.ID(id)) builder.mutation.id = &id @@ -614,6 +808,8 @@ func (c *AgentStatusClient) DeleteOneID(id uuid.UUID) *AgentStatusDeleteOne { func (c *AgentStatusClient) Query() *AgentStatusQuery { return &AgentStatusQuery{ config: c.config, + ctx: &QueryContext{Type: TypeAgentStatus}, + inters: c.Interceptors(), } } @@ -633,8 +829,8 @@ func (c *AgentStatusClient) GetX(ctx context.Context, id uuid.UUID) *AgentStatus // QueryAgentStatusToProvisionedHost queries the AgentStatusToProvisionedHost edge of a AgentStatus. func (c *AgentStatusClient) QueryAgentStatusToProvisionedHost(as *AgentStatus) *ProvisionedHostQuery { - query := &ProvisionedHostQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&ProvisionedHostClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := as.ID step := sqlgraph.NewStep( sqlgraph.From(agentstatus.Table, agentstatus.FieldID, id), @@ -649,8 +845,8 @@ func (c *AgentStatusClient) QueryAgentStatusToProvisionedHost(as *AgentStatus) * // QueryAgentStatusToProvisionedNetwork queries the AgentStatusToProvisionedNetwork edge of a AgentStatus. func (c *AgentStatusClient) QueryAgentStatusToProvisionedNetwork(as *AgentStatus) *ProvisionedNetworkQuery { - query := &ProvisionedNetworkQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&ProvisionedNetworkClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := as.ID step := sqlgraph.NewStep( sqlgraph.From(agentstatus.Table, agentstatus.FieldID, id), @@ -665,8 +861,8 @@ func (c *AgentStatusClient) QueryAgentStatusToProvisionedNetwork(as *AgentStatus // QueryAgentStatusToBuild queries the AgentStatusToBuild edge of a AgentStatus. func (c *AgentStatusClient) QueryAgentStatusToBuild(as *AgentStatus) *BuildQuery { - query := &BuildQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&BuildClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := as.ID step := sqlgraph.NewStep( sqlgraph.From(agentstatus.Table, agentstatus.FieldID, id), @@ -684,6 +880,26 @@ func (c *AgentStatusClient) Hooks() []Hook { return c.hooks.AgentStatus } +// Interceptors returns the client interceptors. +func (c *AgentStatusClient) Interceptors() []Interceptor { + return c.inters.AgentStatus +} + +func (c *AgentStatusClient) mutate(ctx context.Context, m *AgentStatusMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&AgentStatusCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&AgentStatusUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&AgentStatusUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&AgentStatusDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown AgentStatus mutation op: %q", m.Op()) + } +} + // AgentTaskClient is a client for the AgentTask schema. type AgentTaskClient struct { config @@ -700,7 +916,13 @@ func (c *AgentTaskClient) Use(hooks ...Hook) { c.hooks.AgentTask = append(c.hooks.AgentTask, hooks...) } -// Create returns a create builder for AgentTask. +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `agenttask.Intercept(f(g(h())))`. +func (c *AgentTaskClient) Intercept(interceptors ...Interceptor) { + c.inters.AgentTask = append(c.inters.AgentTask, interceptors...) +} + +// Create returns a builder for creating a AgentTask entity. func (c *AgentTaskClient) Create() *AgentTaskCreate { mutation := newAgentTaskMutation(c.config, OpCreate) return &AgentTaskCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} @@ -711,6 +933,21 @@ func (c *AgentTaskClient) CreateBulk(builders ...*AgentTaskCreate) *AgentTaskCre return &AgentTaskCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *AgentTaskClient) MapCreateBulk(slice any, setFunc func(*AgentTaskCreate, int)) *AgentTaskCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &AgentTaskCreateBulk{err: fmt.Errorf("calling to AgentTaskClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*AgentTaskCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &AgentTaskCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for AgentTask. func (c *AgentTaskClient) Update() *AgentTaskUpdate { mutation := newAgentTaskMutation(c.config, OpUpdate) @@ -735,12 +972,12 @@ func (c *AgentTaskClient) Delete() *AgentTaskDelete { return &AgentTaskDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} } -// DeleteOne returns a delete builder for the given entity. +// DeleteOne returns a builder for deleting the given entity. func (c *AgentTaskClient) DeleteOne(at *AgentTask) *AgentTaskDeleteOne { return c.DeleteOneID(at.ID) } -// DeleteOneID returns a delete builder for the given id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *AgentTaskClient) DeleteOneID(id uuid.UUID) *AgentTaskDeleteOne { builder := c.Delete().Where(agenttask.ID(id)) builder.mutation.id = &id @@ -752,6 +989,8 @@ func (c *AgentTaskClient) DeleteOneID(id uuid.UUID) *AgentTaskDeleteOne { func (c *AgentTaskClient) Query() *AgentTaskQuery { return &AgentTaskQuery{ config: c.config, + ctx: &QueryContext{Type: TypeAgentTask}, + inters: c.Interceptors(), } } @@ -771,8 +1010,8 @@ func (c *AgentTaskClient) GetX(ctx context.Context, id uuid.UUID) *AgentTask { // QueryAgentTaskToProvisioningStep queries the AgentTaskToProvisioningStep edge of a AgentTask. func (c *AgentTaskClient) QueryAgentTaskToProvisioningStep(at *AgentTask) *ProvisioningStepQuery { - query := &ProvisioningStepQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&ProvisioningStepClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := at.ID step := sqlgraph.NewStep( sqlgraph.From(agenttask.Table, agenttask.FieldID, id), @@ -787,8 +1026,8 @@ func (c *AgentTaskClient) QueryAgentTaskToProvisioningStep(at *AgentTask) *Provi // QueryAgentTaskToProvisionedHost queries the AgentTaskToProvisionedHost edge of a AgentTask. func (c *AgentTaskClient) QueryAgentTaskToProvisionedHost(at *AgentTask) *ProvisionedHostQuery { - query := &ProvisionedHostQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&ProvisionedHostClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := at.ID step := sqlgraph.NewStep( sqlgraph.From(agenttask.Table, agenttask.FieldID, id), @@ -803,8 +1042,8 @@ func (c *AgentTaskClient) QueryAgentTaskToProvisionedHost(at *AgentTask) *Provis // QueryAgentTaskToAdhocPlan queries the AgentTaskToAdhocPlan edge of a AgentTask. func (c *AgentTaskClient) QueryAgentTaskToAdhocPlan(at *AgentTask) *AdhocPlanQuery { - query := &AdhocPlanQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&AdhocPlanClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := at.ID step := sqlgraph.NewStep( sqlgraph.From(agenttask.Table, agenttask.FieldID, id), @@ -822,6 +1061,26 @@ func (c *AgentTaskClient) Hooks() []Hook { return c.hooks.AgentTask } +// Interceptors returns the client interceptors. +func (c *AgentTaskClient) Interceptors() []Interceptor { + return c.inters.AgentTask +} + +func (c *AgentTaskClient) mutate(ctx context.Context, m *AgentTaskMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&AgentTaskCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&AgentTaskUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&AgentTaskUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&AgentTaskDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown AgentTask mutation op: %q", m.Op()) + } +} + // AnsibleClient is a client for the Ansible schema. type AnsibleClient struct { config @@ -838,7 +1097,13 @@ func (c *AnsibleClient) Use(hooks ...Hook) { c.hooks.Ansible = append(c.hooks.Ansible, hooks...) } -// Create returns a create builder for Ansible. +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `ansible.Intercept(f(g(h())))`. +func (c *AnsibleClient) Intercept(interceptors ...Interceptor) { + c.inters.Ansible = append(c.inters.Ansible, interceptors...) +} + +// Create returns a builder for creating a Ansible entity. func (c *AnsibleClient) Create() *AnsibleCreate { mutation := newAnsibleMutation(c.config, OpCreate) return &AnsibleCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} @@ -849,6 +1114,21 @@ func (c *AnsibleClient) CreateBulk(builders ...*AnsibleCreate) *AnsibleCreateBul return &AnsibleCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *AnsibleClient) MapCreateBulk(slice any, setFunc func(*AnsibleCreate, int)) *AnsibleCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &AnsibleCreateBulk{err: fmt.Errorf("calling to AnsibleClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*AnsibleCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &AnsibleCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for Ansible. func (c *AnsibleClient) Update() *AnsibleUpdate { mutation := newAnsibleMutation(c.config, OpUpdate) @@ -873,12 +1153,12 @@ func (c *AnsibleClient) Delete() *AnsibleDelete { return &AnsibleDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} } -// DeleteOne returns a delete builder for the given entity. +// DeleteOne returns a builder for deleting the given entity. func (c *AnsibleClient) DeleteOne(a *Ansible) *AnsibleDeleteOne { return c.DeleteOneID(a.ID) } -// DeleteOneID returns a delete builder for the given id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *AnsibleClient) DeleteOneID(id uuid.UUID) *AnsibleDeleteOne { builder := c.Delete().Where(ansible.ID(id)) builder.mutation.id = &id @@ -890,6 +1170,8 @@ func (c *AnsibleClient) DeleteOneID(id uuid.UUID) *AnsibleDeleteOne { func (c *AnsibleClient) Query() *AnsibleQuery { return &AnsibleQuery{ config: c.config, + ctx: &QueryContext{Type: TypeAnsible}, + inters: c.Interceptors(), } } @@ -909,8 +1191,8 @@ func (c *AnsibleClient) GetX(ctx context.Context, id uuid.UUID) *Ansible { // QueryAnsibleToUser queries the AnsibleToUser edge of a Ansible. func (c *AnsibleClient) QueryAnsibleToUser(a *Ansible) *UserQuery { - query := &UserQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&UserClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := a.ID step := sqlgraph.NewStep( sqlgraph.From(ansible.Table, ansible.FieldID, id), @@ -925,8 +1207,8 @@ func (c *AnsibleClient) QueryAnsibleToUser(a *Ansible) *UserQuery { // QueryAnsibleFromEnvironment queries the AnsibleFromEnvironment edge of a Ansible. func (c *AnsibleClient) QueryAnsibleFromEnvironment(a *Ansible) *EnvironmentQuery { - query := &EnvironmentQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&EnvironmentClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := a.ID step := sqlgraph.NewStep( sqlgraph.From(ansible.Table, ansible.FieldID, id), @@ -944,6 +1226,26 @@ func (c *AnsibleClient) Hooks() []Hook { return c.hooks.Ansible } +// Interceptors returns the client interceptors. +func (c *AnsibleClient) Interceptors() []Interceptor { + return c.inters.Ansible +} + +func (c *AnsibleClient) mutate(ctx context.Context, m *AnsibleMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&AnsibleCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&AnsibleUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&AnsibleUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&AnsibleDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Ansible mutation op: %q", m.Op()) + } +} + // AuthUserClient is a client for the AuthUser schema. type AuthUserClient struct { config @@ -960,7 +1262,13 @@ func (c *AuthUserClient) Use(hooks ...Hook) { c.hooks.AuthUser = append(c.hooks.AuthUser, hooks...) } -// Create returns a create builder for AuthUser. +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `authuser.Intercept(f(g(h())))`. +func (c *AuthUserClient) Intercept(interceptors ...Interceptor) { + c.inters.AuthUser = append(c.inters.AuthUser, interceptors...) +} + +// Create returns a builder for creating a AuthUser entity. func (c *AuthUserClient) Create() *AuthUserCreate { mutation := newAuthUserMutation(c.config, OpCreate) return &AuthUserCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} @@ -971,6 +1279,21 @@ func (c *AuthUserClient) CreateBulk(builders ...*AuthUserCreate) *AuthUserCreate return &AuthUserCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *AuthUserClient) MapCreateBulk(slice any, setFunc func(*AuthUserCreate, int)) *AuthUserCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &AuthUserCreateBulk{err: fmt.Errorf("calling to AuthUserClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*AuthUserCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &AuthUserCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for AuthUser. func (c *AuthUserClient) Update() *AuthUserUpdate { mutation := newAuthUserMutation(c.config, OpUpdate) @@ -995,12 +1318,12 @@ func (c *AuthUserClient) Delete() *AuthUserDelete { return &AuthUserDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} } -// DeleteOne returns a delete builder for the given entity. +// DeleteOne returns a builder for deleting the given entity. func (c *AuthUserClient) DeleteOne(au *AuthUser) *AuthUserDeleteOne { return c.DeleteOneID(au.ID) } -// DeleteOneID returns a delete builder for the given id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *AuthUserClient) DeleteOneID(id uuid.UUID) *AuthUserDeleteOne { builder := c.Delete().Where(authuser.ID(id)) builder.mutation.id = &id @@ -1012,6 +1335,8 @@ func (c *AuthUserClient) DeleteOneID(id uuid.UUID) *AuthUserDeleteOne { func (c *AuthUserClient) Query() *AuthUserQuery { return &AuthUserQuery{ config: c.config, + ctx: &QueryContext{Type: TypeAuthUser}, + inters: c.Interceptors(), } } @@ -1031,8 +1356,8 @@ func (c *AuthUserClient) GetX(ctx context.Context, id uuid.UUID) *AuthUser { // QueryAuthUserToToken queries the AuthUserToToken edge of a AuthUser. func (c *AuthUserClient) QueryAuthUserToToken(au *AuthUser) *TokenQuery { - query := &TokenQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&TokenClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := au.ID step := sqlgraph.NewStep( sqlgraph.From(authuser.Table, authuser.FieldID, id), @@ -1047,8 +1372,8 @@ func (c *AuthUserClient) QueryAuthUserToToken(au *AuthUser) *TokenQuery { // QueryAuthUserToServerTasks queries the AuthUserToServerTasks edge of a AuthUser. func (c *AuthUserClient) QueryAuthUserToServerTasks(au *AuthUser) *ServerTaskQuery { - query := &ServerTaskQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&ServerTaskClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := au.ID step := sqlgraph.NewStep( sqlgraph.From(authuser.Table, authuser.FieldID, id), @@ -1066,6 +1391,26 @@ func (c *AuthUserClient) Hooks() []Hook { return c.hooks.AuthUser } +// Interceptors returns the client interceptors. +func (c *AuthUserClient) Interceptors() []Interceptor { + return c.inters.AuthUser +} + +func (c *AuthUserClient) mutate(ctx context.Context, m *AuthUserMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&AuthUserCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&AuthUserUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&AuthUserUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&AuthUserDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown AuthUser mutation op: %q", m.Op()) + } +} + // BuildClient is a client for the Build schema. type BuildClient struct { config @@ -1082,7 +1427,13 @@ func (c *BuildClient) Use(hooks ...Hook) { c.hooks.Build = append(c.hooks.Build, hooks...) } -// Create returns a create builder for Build. +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `build.Intercept(f(g(h())))`. +func (c *BuildClient) Intercept(interceptors ...Interceptor) { + c.inters.Build = append(c.inters.Build, interceptors...) +} + +// Create returns a builder for creating a Build entity. func (c *BuildClient) Create() *BuildCreate { mutation := newBuildMutation(c.config, OpCreate) return &BuildCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} @@ -1093,6 +1444,21 @@ func (c *BuildClient) CreateBulk(builders ...*BuildCreate) *BuildCreateBulk { return &BuildCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *BuildClient) MapCreateBulk(slice any, setFunc func(*BuildCreate, int)) *BuildCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &BuildCreateBulk{err: fmt.Errorf("calling to BuildClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*BuildCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &BuildCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for Build. func (c *BuildClient) Update() *BuildUpdate { mutation := newBuildMutation(c.config, OpUpdate) @@ -1117,12 +1483,12 @@ func (c *BuildClient) Delete() *BuildDelete { return &BuildDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} } -// DeleteOne returns a delete builder for the given entity. +// DeleteOne returns a builder for deleting the given entity. func (c *BuildClient) DeleteOne(b *Build) *BuildDeleteOne { return c.DeleteOneID(b.ID) } -// DeleteOneID returns a delete builder for the given id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *BuildClient) DeleteOneID(id uuid.UUID) *BuildDeleteOne { builder := c.Delete().Where(build.ID(id)) builder.mutation.id = &id @@ -1134,6 +1500,8 @@ func (c *BuildClient) DeleteOneID(id uuid.UUID) *BuildDeleteOne { func (c *BuildClient) Query() *BuildQuery { return &BuildQuery{ config: c.config, + ctx: &QueryContext{Type: TypeBuild}, + inters: c.Interceptors(), } } @@ -1153,8 +1521,8 @@ func (c *BuildClient) GetX(ctx context.Context, id uuid.UUID) *Build { // QueryBuildToStatus queries the BuildToStatus edge of a Build. func (c *BuildClient) QueryBuildToStatus(b *Build) *StatusQuery { - query := &StatusQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&StatusClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := b.ID step := sqlgraph.NewStep( sqlgraph.From(build.Table, build.FieldID, id), @@ -1169,8 +1537,8 @@ func (c *BuildClient) QueryBuildToStatus(b *Build) *StatusQuery { // QueryBuildToEnvironment queries the BuildToEnvironment edge of a Build. func (c *BuildClient) QueryBuildToEnvironment(b *Build) *EnvironmentQuery { - query := &EnvironmentQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&EnvironmentClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := b.ID step := sqlgraph.NewStep( sqlgraph.From(build.Table, build.FieldID, id), @@ -1185,8 +1553,8 @@ func (c *BuildClient) QueryBuildToEnvironment(b *Build) *EnvironmentQuery { // QueryBuildToCompetition queries the BuildToCompetition edge of a Build. func (c *BuildClient) QueryBuildToCompetition(b *Build) *CompetitionQuery { - query := &CompetitionQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&CompetitionClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := b.ID step := sqlgraph.NewStep( sqlgraph.From(build.Table, build.FieldID, id), @@ -1201,8 +1569,8 @@ func (c *BuildClient) QueryBuildToCompetition(b *Build) *CompetitionQuery { // QueryBuildToLatestBuildCommit queries the BuildToLatestBuildCommit edge of a Build. func (c *BuildClient) QueryBuildToLatestBuildCommit(b *Build) *BuildCommitQuery { - query := &BuildCommitQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&BuildCommitClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := b.ID step := sqlgraph.NewStep( sqlgraph.From(build.Table, build.FieldID, id), @@ -1217,8 +1585,8 @@ func (c *BuildClient) QueryBuildToLatestBuildCommit(b *Build) *BuildCommitQuery // QueryBuildToRepoCommit queries the BuildToRepoCommit edge of a Build. func (c *BuildClient) QueryBuildToRepoCommit(b *Build) *RepoCommitQuery { - query := &RepoCommitQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&RepoCommitClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := b.ID step := sqlgraph.NewStep( sqlgraph.From(build.Table, build.FieldID, id), @@ -1233,8 +1601,8 @@ func (c *BuildClient) QueryBuildToRepoCommit(b *Build) *RepoCommitQuery { // QueryBuildToProvisionedNetwork queries the BuildToProvisionedNetwork edge of a Build. func (c *BuildClient) QueryBuildToProvisionedNetwork(b *Build) *ProvisionedNetworkQuery { - query := &ProvisionedNetworkQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&ProvisionedNetworkClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := b.ID step := sqlgraph.NewStep( sqlgraph.From(build.Table, build.FieldID, id), @@ -1249,8 +1617,8 @@ func (c *BuildClient) QueryBuildToProvisionedNetwork(b *Build) *ProvisionedNetwo // QueryBuildToTeam queries the BuildToTeam edge of a Build. func (c *BuildClient) QueryBuildToTeam(b *Build) *TeamQuery { - query := &TeamQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&TeamClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := b.ID step := sqlgraph.NewStep( sqlgraph.From(build.Table, build.FieldID, id), @@ -1265,8 +1633,8 @@ func (c *BuildClient) QueryBuildToTeam(b *Build) *TeamQuery { // QueryBuildToPlan queries the BuildToPlan edge of a Build. func (c *BuildClient) QueryBuildToPlan(b *Build) *PlanQuery { - query := &PlanQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&PlanClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := b.ID step := sqlgraph.NewStep( sqlgraph.From(build.Table, build.FieldID, id), @@ -1281,8 +1649,8 @@ func (c *BuildClient) QueryBuildToPlan(b *Build) *PlanQuery { // QueryBuildToBuildCommits queries the BuildToBuildCommits edge of a Build. func (c *BuildClient) QueryBuildToBuildCommits(b *Build) *BuildCommitQuery { - query := &BuildCommitQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&BuildCommitClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := b.ID step := sqlgraph.NewStep( sqlgraph.From(build.Table, build.FieldID, id), @@ -1297,8 +1665,8 @@ func (c *BuildClient) QueryBuildToBuildCommits(b *Build) *BuildCommitQuery { // QueryBuildToAdhocPlans queries the BuildToAdhocPlans edge of a Build. func (c *BuildClient) QueryBuildToAdhocPlans(b *Build) *AdhocPlanQuery { - query := &AdhocPlanQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&AdhocPlanClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := b.ID step := sqlgraph.NewStep( sqlgraph.From(build.Table, build.FieldID, id), @@ -1313,8 +1681,8 @@ func (c *BuildClient) QueryBuildToAdhocPlans(b *Build) *AdhocPlanQuery { // QueryBuildToAgentStatuses queries the BuildToAgentStatuses edge of a Build. func (c *BuildClient) QueryBuildToAgentStatuses(b *Build) *AgentStatusQuery { - query := &AgentStatusQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&AgentStatusClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := b.ID step := sqlgraph.NewStep( sqlgraph.From(build.Table, build.FieldID, id), @@ -1329,8 +1697,8 @@ func (c *BuildClient) QueryBuildToAgentStatuses(b *Build) *AgentStatusQuery { // QueryBuildToServerTasks queries the BuildToServerTasks edge of a Build. func (c *BuildClient) QueryBuildToServerTasks(b *Build) *ServerTaskQuery { - query := &ServerTaskQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&ServerTaskClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := b.ID step := sqlgraph.NewStep( sqlgraph.From(build.Table, build.FieldID, id), @@ -1348,6 +1716,26 @@ func (c *BuildClient) Hooks() []Hook { return c.hooks.Build } +// Interceptors returns the client interceptors. +func (c *BuildClient) Interceptors() []Interceptor { + return c.inters.Build +} + +func (c *BuildClient) mutate(ctx context.Context, m *BuildMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&BuildCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&BuildUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&BuildUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&BuildDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Build mutation op: %q", m.Op()) + } +} + // BuildCommitClient is a client for the BuildCommit schema. type BuildCommitClient struct { config @@ -1364,7 +1752,13 @@ func (c *BuildCommitClient) Use(hooks ...Hook) { c.hooks.BuildCommit = append(c.hooks.BuildCommit, hooks...) } -// Create returns a create builder for BuildCommit. +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `buildcommit.Intercept(f(g(h())))`. +func (c *BuildCommitClient) Intercept(interceptors ...Interceptor) { + c.inters.BuildCommit = append(c.inters.BuildCommit, interceptors...) +} + +// Create returns a builder for creating a BuildCommit entity. func (c *BuildCommitClient) Create() *BuildCommitCreate { mutation := newBuildCommitMutation(c.config, OpCreate) return &BuildCommitCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} @@ -1375,6 +1769,21 @@ func (c *BuildCommitClient) CreateBulk(builders ...*BuildCommitCreate) *BuildCom return &BuildCommitCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *BuildCommitClient) MapCreateBulk(slice any, setFunc func(*BuildCommitCreate, int)) *BuildCommitCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &BuildCommitCreateBulk{err: fmt.Errorf("calling to BuildCommitClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*BuildCommitCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &BuildCommitCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for BuildCommit. func (c *BuildCommitClient) Update() *BuildCommitUpdate { mutation := newBuildCommitMutation(c.config, OpUpdate) @@ -1399,12 +1808,12 @@ func (c *BuildCommitClient) Delete() *BuildCommitDelete { return &BuildCommitDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} } -// DeleteOne returns a delete builder for the given entity. +// DeleteOne returns a builder for deleting the given entity. func (c *BuildCommitClient) DeleteOne(bc *BuildCommit) *BuildCommitDeleteOne { return c.DeleteOneID(bc.ID) } -// DeleteOneID returns a delete builder for the given id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *BuildCommitClient) DeleteOneID(id uuid.UUID) *BuildCommitDeleteOne { builder := c.Delete().Where(buildcommit.ID(id)) builder.mutation.id = &id @@ -1416,6 +1825,8 @@ func (c *BuildCommitClient) DeleteOneID(id uuid.UUID) *BuildCommitDeleteOne { func (c *BuildCommitClient) Query() *BuildCommitQuery { return &BuildCommitQuery{ config: c.config, + ctx: &QueryContext{Type: TypeBuildCommit}, + inters: c.Interceptors(), } } @@ -1435,8 +1846,8 @@ func (c *BuildCommitClient) GetX(ctx context.Context, id uuid.UUID) *BuildCommit // QueryBuildCommitToBuild queries the BuildCommitToBuild edge of a BuildCommit. func (c *BuildCommitClient) QueryBuildCommitToBuild(bc *BuildCommit) *BuildQuery { - query := &BuildQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&BuildClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := bc.ID step := sqlgraph.NewStep( sqlgraph.From(buildcommit.Table, buildcommit.FieldID, id), @@ -1451,8 +1862,8 @@ func (c *BuildCommitClient) QueryBuildCommitToBuild(bc *BuildCommit) *BuildQuery // QueryBuildCommitToServerTask queries the BuildCommitToServerTask edge of a BuildCommit. func (c *BuildCommitClient) QueryBuildCommitToServerTask(bc *BuildCommit) *ServerTaskQuery { - query := &ServerTaskQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&ServerTaskClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := bc.ID step := sqlgraph.NewStep( sqlgraph.From(buildcommit.Table, buildcommit.FieldID, id), @@ -1467,8 +1878,8 @@ func (c *BuildCommitClient) QueryBuildCommitToServerTask(bc *BuildCommit) *Serve // QueryBuildCommitToPlanDiffs queries the BuildCommitToPlanDiffs edge of a BuildCommit. func (c *BuildCommitClient) QueryBuildCommitToPlanDiffs(bc *BuildCommit) *PlanDiffQuery { - query := &PlanDiffQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&PlanDiffClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := bc.ID step := sqlgraph.NewStep( sqlgraph.From(buildcommit.Table, buildcommit.FieldID, id), @@ -1486,6 +1897,26 @@ func (c *BuildCommitClient) Hooks() []Hook { return c.hooks.BuildCommit } +// Interceptors returns the client interceptors. +func (c *BuildCommitClient) Interceptors() []Interceptor { + return c.inters.BuildCommit +} + +func (c *BuildCommitClient) mutate(ctx context.Context, m *BuildCommitMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&BuildCommitCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&BuildCommitUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&BuildCommitUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&BuildCommitDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown BuildCommit mutation op: %q", m.Op()) + } +} + // CommandClient is a client for the Command schema. type CommandClient struct { config @@ -1502,7 +1933,13 @@ func (c *CommandClient) Use(hooks ...Hook) { c.hooks.Command = append(c.hooks.Command, hooks...) } -// Create returns a create builder for Command. +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `command.Intercept(f(g(h())))`. +func (c *CommandClient) Intercept(interceptors ...Interceptor) { + c.inters.Command = append(c.inters.Command, interceptors...) +} + +// Create returns a builder for creating a Command entity. func (c *CommandClient) Create() *CommandCreate { mutation := newCommandMutation(c.config, OpCreate) return &CommandCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} @@ -1513,6 +1950,21 @@ func (c *CommandClient) CreateBulk(builders ...*CommandCreate) *CommandCreateBul return &CommandCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *CommandClient) MapCreateBulk(slice any, setFunc func(*CommandCreate, int)) *CommandCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &CommandCreateBulk{err: fmt.Errorf("calling to CommandClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*CommandCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &CommandCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for Command. func (c *CommandClient) Update() *CommandUpdate { mutation := newCommandMutation(c.config, OpUpdate) @@ -1537,12 +1989,12 @@ func (c *CommandClient) Delete() *CommandDelete { return &CommandDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} } -// DeleteOne returns a delete builder for the given entity. +// DeleteOne returns a builder for deleting the given entity. func (c *CommandClient) DeleteOne(co *Command) *CommandDeleteOne { return c.DeleteOneID(co.ID) } -// DeleteOneID returns a delete builder for the given id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *CommandClient) DeleteOneID(id uuid.UUID) *CommandDeleteOne { builder := c.Delete().Where(command.ID(id)) builder.mutation.id = &id @@ -1554,6 +2006,8 @@ func (c *CommandClient) DeleteOneID(id uuid.UUID) *CommandDeleteOne { func (c *CommandClient) Query() *CommandQuery { return &CommandQuery{ config: c.config, + ctx: &QueryContext{Type: TypeCommand}, + inters: c.Interceptors(), } } @@ -1573,8 +2027,8 @@ func (c *CommandClient) GetX(ctx context.Context, id uuid.UUID) *Command { // QueryCommandToUser queries the CommandToUser edge of a Command. func (c *CommandClient) QueryCommandToUser(co *Command) *UserQuery { - query := &UserQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&UserClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := co.ID step := sqlgraph.NewStep( sqlgraph.From(command.Table, command.FieldID, id), @@ -1589,8 +2043,8 @@ func (c *CommandClient) QueryCommandToUser(co *Command) *UserQuery { // QueryCommandToEnvironment queries the CommandToEnvironment edge of a Command. func (c *CommandClient) QueryCommandToEnvironment(co *Command) *EnvironmentQuery { - query := &EnvironmentQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&EnvironmentClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := co.ID step := sqlgraph.NewStep( sqlgraph.From(command.Table, command.FieldID, id), @@ -1608,6 +2062,26 @@ func (c *CommandClient) Hooks() []Hook { return c.hooks.Command } +// Interceptors returns the client interceptors. +func (c *CommandClient) Interceptors() []Interceptor { + return c.inters.Command +} + +func (c *CommandClient) mutate(ctx context.Context, m *CommandMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&CommandCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&CommandUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&CommandUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&CommandDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Command mutation op: %q", m.Op()) + } +} + // CompetitionClient is a client for the Competition schema. type CompetitionClient struct { config @@ -1624,7 +2098,13 @@ func (c *CompetitionClient) Use(hooks ...Hook) { c.hooks.Competition = append(c.hooks.Competition, hooks...) } -// Create returns a create builder for Competition. +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `competition.Intercept(f(g(h())))`. +func (c *CompetitionClient) Intercept(interceptors ...Interceptor) { + c.inters.Competition = append(c.inters.Competition, interceptors...) +} + +// Create returns a builder for creating a Competition entity. func (c *CompetitionClient) Create() *CompetitionCreate { mutation := newCompetitionMutation(c.config, OpCreate) return &CompetitionCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} @@ -1635,6 +2115,21 @@ func (c *CompetitionClient) CreateBulk(builders ...*CompetitionCreate) *Competit return &CompetitionCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *CompetitionClient) MapCreateBulk(slice any, setFunc func(*CompetitionCreate, int)) *CompetitionCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &CompetitionCreateBulk{err: fmt.Errorf("calling to CompetitionClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*CompetitionCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &CompetitionCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for Competition. func (c *CompetitionClient) Update() *CompetitionUpdate { mutation := newCompetitionMutation(c.config, OpUpdate) @@ -1659,12 +2154,12 @@ func (c *CompetitionClient) Delete() *CompetitionDelete { return &CompetitionDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} } -// DeleteOne returns a delete builder for the given entity. +// DeleteOne returns a builder for deleting the given entity. func (c *CompetitionClient) DeleteOne(co *Competition) *CompetitionDeleteOne { return c.DeleteOneID(co.ID) } -// DeleteOneID returns a delete builder for the given id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *CompetitionClient) DeleteOneID(id uuid.UUID) *CompetitionDeleteOne { builder := c.Delete().Where(competition.ID(id)) builder.mutation.id = &id @@ -1676,6 +2171,8 @@ func (c *CompetitionClient) DeleteOneID(id uuid.UUID) *CompetitionDeleteOne { func (c *CompetitionClient) Query() *CompetitionQuery { return &CompetitionQuery{ config: c.config, + ctx: &QueryContext{Type: TypeCompetition}, + inters: c.Interceptors(), } } @@ -1695,8 +2192,8 @@ func (c *CompetitionClient) GetX(ctx context.Context, id uuid.UUID) *Competition // QueryCompetitionToDNS queries the CompetitionToDNS edge of a Competition. func (c *CompetitionClient) QueryCompetitionToDNS(co *Competition) *DNSQuery { - query := &DNSQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&DNSClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := co.ID step := sqlgraph.NewStep( sqlgraph.From(competition.Table, competition.FieldID, id), @@ -1711,8 +2208,8 @@ func (c *CompetitionClient) QueryCompetitionToDNS(co *Competition) *DNSQuery { // QueryCompetitionToEnvironment queries the CompetitionToEnvironment edge of a Competition. func (c *CompetitionClient) QueryCompetitionToEnvironment(co *Competition) *EnvironmentQuery { - query := &EnvironmentQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&EnvironmentClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := co.ID step := sqlgraph.NewStep( sqlgraph.From(competition.Table, competition.FieldID, id), @@ -1727,8 +2224,8 @@ func (c *CompetitionClient) QueryCompetitionToEnvironment(co *Competition) *Envi // QueryCompetitionToBuild queries the CompetitionToBuild edge of a Competition. func (c *CompetitionClient) QueryCompetitionToBuild(co *Competition) *BuildQuery { - query := &BuildQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&BuildClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := co.ID step := sqlgraph.NewStep( sqlgraph.From(competition.Table, competition.FieldID, id), @@ -1746,6 +2243,26 @@ func (c *CompetitionClient) Hooks() []Hook { return c.hooks.Competition } +// Interceptors returns the client interceptors. +func (c *CompetitionClient) Interceptors() []Interceptor { + return c.inters.Competition +} + +func (c *CompetitionClient) mutate(ctx context.Context, m *CompetitionMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&CompetitionCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&CompetitionUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&CompetitionUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&CompetitionDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Competition mutation op: %q", m.Op()) + } +} + // DNSClient is a client for the DNS schema. type DNSClient struct { config @@ -1762,7 +2279,13 @@ func (c *DNSClient) Use(hooks ...Hook) { c.hooks.DNS = append(c.hooks.DNS, hooks...) } -// Create returns a create builder for DNS. +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `dns.Intercept(f(g(h())))`. +func (c *DNSClient) Intercept(interceptors ...Interceptor) { + c.inters.DNS = append(c.inters.DNS, interceptors...) +} + +// Create returns a builder for creating a DNS entity. func (c *DNSClient) Create() *DNSCreate { mutation := newDNSMutation(c.config, OpCreate) return &DNSCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} @@ -1773,6 +2296,21 @@ func (c *DNSClient) CreateBulk(builders ...*DNSCreate) *DNSCreateBulk { return &DNSCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *DNSClient) MapCreateBulk(slice any, setFunc func(*DNSCreate, int)) *DNSCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &DNSCreateBulk{err: fmt.Errorf("calling to DNSClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*DNSCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &DNSCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for DNS. func (c *DNSClient) Update() *DNSUpdate { mutation := newDNSMutation(c.config, OpUpdate) @@ -1797,12 +2335,12 @@ func (c *DNSClient) Delete() *DNSDelete { return &DNSDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} } -// DeleteOne returns a delete builder for the given entity. +// DeleteOne returns a builder for deleting the given entity. func (c *DNSClient) DeleteOne(d *DNS) *DNSDeleteOne { return c.DeleteOneID(d.ID) } -// DeleteOneID returns a delete builder for the given id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *DNSClient) DeleteOneID(id uuid.UUID) *DNSDeleteOne { builder := c.Delete().Where(dns.ID(id)) builder.mutation.id = &id @@ -1814,6 +2352,8 @@ func (c *DNSClient) DeleteOneID(id uuid.UUID) *DNSDeleteOne { func (c *DNSClient) Query() *DNSQuery { return &DNSQuery{ config: c.config, + ctx: &QueryContext{Type: TypeDNS}, + inters: c.Interceptors(), } } @@ -1833,8 +2373,8 @@ func (c *DNSClient) GetX(ctx context.Context, id uuid.UUID) *DNS { // QueryDNSToEnvironment queries the DNSToEnvironment edge of a DNS. func (c *DNSClient) QueryDNSToEnvironment(d *DNS) *EnvironmentQuery { - query := &EnvironmentQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&EnvironmentClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := d.ID step := sqlgraph.NewStep( sqlgraph.From(dns.Table, dns.FieldID, id), @@ -1849,8 +2389,8 @@ func (c *DNSClient) QueryDNSToEnvironment(d *DNS) *EnvironmentQuery { // QueryDNSToCompetition queries the DNSToCompetition edge of a DNS. func (c *DNSClient) QueryDNSToCompetition(d *DNS) *CompetitionQuery { - query := &CompetitionQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&CompetitionClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := d.ID step := sqlgraph.NewStep( sqlgraph.From(dns.Table, dns.FieldID, id), @@ -1868,6 +2408,26 @@ func (c *DNSClient) Hooks() []Hook { return c.hooks.DNS } +// Interceptors returns the client interceptors. +func (c *DNSClient) Interceptors() []Interceptor { + return c.inters.DNS +} + +func (c *DNSClient) mutate(ctx context.Context, m *DNSMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&DNSCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&DNSUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&DNSUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&DNSDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown DNS mutation op: %q", m.Op()) + } +} + // DNSRecordClient is a client for the DNSRecord schema. type DNSRecordClient struct { config @@ -1884,7 +2444,13 @@ func (c *DNSRecordClient) Use(hooks ...Hook) { c.hooks.DNSRecord = append(c.hooks.DNSRecord, hooks...) } -// Create returns a create builder for DNSRecord. +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `dnsrecord.Intercept(f(g(h())))`. +func (c *DNSRecordClient) Intercept(interceptors ...Interceptor) { + c.inters.DNSRecord = append(c.inters.DNSRecord, interceptors...) +} + +// Create returns a builder for creating a DNSRecord entity. func (c *DNSRecordClient) Create() *DNSRecordCreate { mutation := newDNSRecordMutation(c.config, OpCreate) return &DNSRecordCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} @@ -1895,6 +2461,21 @@ func (c *DNSRecordClient) CreateBulk(builders ...*DNSRecordCreate) *DNSRecordCre return &DNSRecordCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *DNSRecordClient) MapCreateBulk(slice any, setFunc func(*DNSRecordCreate, int)) *DNSRecordCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &DNSRecordCreateBulk{err: fmt.Errorf("calling to DNSRecordClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*DNSRecordCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &DNSRecordCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for DNSRecord. func (c *DNSRecordClient) Update() *DNSRecordUpdate { mutation := newDNSRecordMutation(c.config, OpUpdate) @@ -1919,12 +2500,12 @@ func (c *DNSRecordClient) Delete() *DNSRecordDelete { return &DNSRecordDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} } -// DeleteOne returns a delete builder for the given entity. +// DeleteOne returns a builder for deleting the given entity. func (c *DNSRecordClient) DeleteOne(dr *DNSRecord) *DNSRecordDeleteOne { return c.DeleteOneID(dr.ID) } -// DeleteOneID returns a delete builder for the given id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *DNSRecordClient) DeleteOneID(id uuid.UUID) *DNSRecordDeleteOne { builder := c.Delete().Where(dnsrecord.ID(id)) builder.mutation.id = &id @@ -1936,6 +2517,8 @@ func (c *DNSRecordClient) DeleteOneID(id uuid.UUID) *DNSRecordDeleteOne { func (c *DNSRecordClient) Query() *DNSRecordQuery { return &DNSRecordQuery{ config: c.config, + ctx: &QueryContext{Type: TypeDNSRecord}, + inters: c.Interceptors(), } } @@ -1955,8 +2538,8 @@ func (c *DNSRecordClient) GetX(ctx context.Context, id uuid.UUID) *DNSRecord { // QueryDNSRecordToEnvironment queries the DNSRecordToEnvironment edge of a DNSRecord. func (c *DNSRecordClient) QueryDNSRecordToEnvironment(dr *DNSRecord) *EnvironmentQuery { - query := &EnvironmentQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&EnvironmentClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := dr.ID step := sqlgraph.NewStep( sqlgraph.From(dnsrecord.Table, dnsrecord.FieldID, id), @@ -1974,6 +2557,26 @@ func (c *DNSRecordClient) Hooks() []Hook { return c.hooks.DNSRecord } +// Interceptors returns the client interceptors. +func (c *DNSRecordClient) Interceptors() []Interceptor { + return c.inters.DNSRecord +} + +func (c *DNSRecordClient) mutate(ctx context.Context, m *DNSRecordMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&DNSRecordCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&DNSRecordUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&DNSRecordUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&DNSRecordDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown DNSRecord mutation op: %q", m.Op()) + } +} + // DiskClient is a client for the Disk schema. type DiskClient struct { config @@ -1990,7 +2593,13 @@ func (c *DiskClient) Use(hooks ...Hook) { c.hooks.Disk = append(c.hooks.Disk, hooks...) } -// Create returns a create builder for Disk. +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `disk.Intercept(f(g(h())))`. +func (c *DiskClient) Intercept(interceptors ...Interceptor) { + c.inters.Disk = append(c.inters.Disk, interceptors...) +} + +// Create returns a builder for creating a Disk entity. func (c *DiskClient) Create() *DiskCreate { mutation := newDiskMutation(c.config, OpCreate) return &DiskCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} @@ -2001,6 +2610,21 @@ func (c *DiskClient) CreateBulk(builders ...*DiskCreate) *DiskCreateBulk { return &DiskCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *DiskClient) MapCreateBulk(slice any, setFunc func(*DiskCreate, int)) *DiskCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &DiskCreateBulk{err: fmt.Errorf("calling to DiskClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*DiskCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &DiskCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for Disk. func (c *DiskClient) Update() *DiskUpdate { mutation := newDiskMutation(c.config, OpUpdate) @@ -2025,12 +2649,12 @@ func (c *DiskClient) Delete() *DiskDelete { return &DiskDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} } -// DeleteOne returns a delete builder for the given entity. +// DeleteOne returns a builder for deleting the given entity. func (c *DiskClient) DeleteOne(d *Disk) *DiskDeleteOne { return c.DeleteOneID(d.ID) } -// DeleteOneID returns a delete builder for the given id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *DiskClient) DeleteOneID(id uuid.UUID) *DiskDeleteOne { builder := c.Delete().Where(disk.ID(id)) builder.mutation.id = &id @@ -2042,6 +2666,8 @@ func (c *DiskClient) DeleteOneID(id uuid.UUID) *DiskDeleteOne { func (c *DiskClient) Query() *DiskQuery { return &DiskQuery{ config: c.config, + ctx: &QueryContext{Type: TypeDisk}, + inters: c.Interceptors(), } } @@ -2061,8 +2687,8 @@ func (c *DiskClient) GetX(ctx context.Context, id uuid.UUID) *Disk { // QueryDiskToHost queries the DiskToHost edge of a Disk. func (c *DiskClient) QueryDiskToHost(d *Disk) *HostQuery { - query := &HostQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&HostClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := d.ID step := sqlgraph.NewStep( sqlgraph.From(disk.Table, disk.FieldID, id), @@ -2080,6 +2706,26 @@ func (c *DiskClient) Hooks() []Hook { return c.hooks.Disk } +// Interceptors returns the client interceptors. +func (c *DiskClient) Interceptors() []Interceptor { + return c.inters.Disk +} + +func (c *DiskClient) mutate(ctx context.Context, m *DiskMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&DiskCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&DiskUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&DiskUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&DiskDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Disk mutation op: %q", m.Op()) + } +} + // EnvironmentClient is a client for the Environment schema. type EnvironmentClient struct { config @@ -2096,7 +2742,13 @@ func (c *EnvironmentClient) Use(hooks ...Hook) { c.hooks.Environment = append(c.hooks.Environment, hooks...) } -// Create returns a create builder for Environment. +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `environment.Intercept(f(g(h())))`. +func (c *EnvironmentClient) Intercept(interceptors ...Interceptor) { + c.inters.Environment = append(c.inters.Environment, interceptors...) +} + +// Create returns a builder for creating a Environment entity. func (c *EnvironmentClient) Create() *EnvironmentCreate { mutation := newEnvironmentMutation(c.config, OpCreate) return &EnvironmentCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} @@ -2107,6 +2759,21 @@ func (c *EnvironmentClient) CreateBulk(builders ...*EnvironmentCreate) *Environm return &EnvironmentCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *EnvironmentClient) MapCreateBulk(slice any, setFunc func(*EnvironmentCreate, int)) *EnvironmentCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &EnvironmentCreateBulk{err: fmt.Errorf("calling to EnvironmentClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*EnvironmentCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &EnvironmentCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for Environment. func (c *EnvironmentClient) Update() *EnvironmentUpdate { mutation := newEnvironmentMutation(c.config, OpUpdate) @@ -2131,12 +2798,12 @@ func (c *EnvironmentClient) Delete() *EnvironmentDelete { return &EnvironmentDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} } -// DeleteOne returns a delete builder for the given entity. +// DeleteOne returns a builder for deleting the given entity. func (c *EnvironmentClient) DeleteOne(e *Environment) *EnvironmentDeleteOne { return c.DeleteOneID(e.ID) } -// DeleteOneID returns a delete builder for the given id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *EnvironmentClient) DeleteOneID(id uuid.UUID) *EnvironmentDeleteOne { builder := c.Delete().Where(environment.ID(id)) builder.mutation.id = &id @@ -2148,6 +2815,8 @@ func (c *EnvironmentClient) DeleteOneID(id uuid.UUID) *EnvironmentDeleteOne { func (c *EnvironmentClient) Query() *EnvironmentQuery { return &EnvironmentQuery{ config: c.config, + ctx: &QueryContext{Type: TypeEnvironment}, + inters: c.Interceptors(), } } @@ -2167,8 +2836,8 @@ func (c *EnvironmentClient) GetX(ctx context.Context, id uuid.UUID) *Environment // QueryEnvironmentToUser queries the EnvironmentToUser edge of a Environment. func (c *EnvironmentClient) QueryEnvironmentToUser(e *Environment) *UserQuery { - query := &UserQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&UserClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := e.ID step := sqlgraph.NewStep( sqlgraph.From(environment.Table, environment.FieldID, id), @@ -2183,8 +2852,8 @@ func (c *EnvironmentClient) QueryEnvironmentToUser(e *Environment) *UserQuery { // QueryEnvironmentToHost queries the EnvironmentToHost edge of a Environment. func (c *EnvironmentClient) QueryEnvironmentToHost(e *Environment) *HostQuery { - query := &HostQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&HostClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := e.ID step := sqlgraph.NewStep( sqlgraph.From(environment.Table, environment.FieldID, id), @@ -2199,8 +2868,8 @@ func (c *EnvironmentClient) QueryEnvironmentToHost(e *Environment) *HostQuery { // QueryEnvironmentToCompetition queries the EnvironmentToCompetition edge of a Environment. func (c *EnvironmentClient) QueryEnvironmentToCompetition(e *Environment) *CompetitionQuery { - query := &CompetitionQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&CompetitionClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := e.ID step := sqlgraph.NewStep( sqlgraph.From(environment.Table, environment.FieldID, id), @@ -2215,8 +2884,8 @@ func (c *EnvironmentClient) QueryEnvironmentToCompetition(e *Environment) *Compe // QueryEnvironmentToIdentity queries the EnvironmentToIdentity edge of a Environment. func (c *EnvironmentClient) QueryEnvironmentToIdentity(e *Environment) *IdentityQuery { - query := &IdentityQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&IdentityClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := e.ID step := sqlgraph.NewStep( sqlgraph.From(environment.Table, environment.FieldID, id), @@ -2231,8 +2900,8 @@ func (c *EnvironmentClient) QueryEnvironmentToIdentity(e *Environment) *Identity // QueryEnvironmentToCommand queries the EnvironmentToCommand edge of a Environment. func (c *EnvironmentClient) QueryEnvironmentToCommand(e *Environment) *CommandQuery { - query := &CommandQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&CommandClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := e.ID step := sqlgraph.NewStep( sqlgraph.From(environment.Table, environment.FieldID, id), @@ -2247,8 +2916,8 @@ func (c *EnvironmentClient) QueryEnvironmentToCommand(e *Environment) *CommandQu // QueryEnvironmentToScript queries the EnvironmentToScript edge of a Environment. func (c *EnvironmentClient) QueryEnvironmentToScript(e *Environment) *ScriptQuery { - query := &ScriptQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&ScriptClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := e.ID step := sqlgraph.NewStep( sqlgraph.From(environment.Table, environment.FieldID, id), @@ -2263,8 +2932,8 @@ func (c *EnvironmentClient) QueryEnvironmentToScript(e *Environment) *ScriptQuer // QueryEnvironmentToFileDownload queries the EnvironmentToFileDownload edge of a Environment. func (c *EnvironmentClient) QueryEnvironmentToFileDownload(e *Environment) *FileDownloadQuery { - query := &FileDownloadQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&FileDownloadClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := e.ID step := sqlgraph.NewStep( sqlgraph.From(environment.Table, environment.FieldID, id), @@ -2279,8 +2948,8 @@ func (c *EnvironmentClient) QueryEnvironmentToFileDownload(e *Environment) *File // QueryEnvironmentToFileDelete queries the EnvironmentToFileDelete edge of a Environment. func (c *EnvironmentClient) QueryEnvironmentToFileDelete(e *Environment) *FileDeleteQuery { - query := &FileDeleteQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&FileDeleteClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := e.ID step := sqlgraph.NewStep( sqlgraph.From(environment.Table, environment.FieldID, id), @@ -2295,8 +2964,8 @@ func (c *EnvironmentClient) QueryEnvironmentToFileDelete(e *Environment) *FileDe // QueryEnvironmentToFileExtract queries the EnvironmentToFileExtract edge of a Environment. func (c *EnvironmentClient) QueryEnvironmentToFileExtract(e *Environment) *FileExtractQuery { - query := &FileExtractQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&FileExtractClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := e.ID step := sqlgraph.NewStep( sqlgraph.From(environment.Table, environment.FieldID, id), @@ -2311,8 +2980,8 @@ func (c *EnvironmentClient) QueryEnvironmentToFileExtract(e *Environment) *FileE // QueryEnvironmentToIncludedNetwork queries the EnvironmentToIncludedNetwork edge of a Environment. func (c *EnvironmentClient) QueryEnvironmentToIncludedNetwork(e *Environment) *IncludedNetworkQuery { - query := &IncludedNetworkQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&IncludedNetworkClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := e.ID step := sqlgraph.NewStep( sqlgraph.From(environment.Table, environment.FieldID, id), @@ -2327,8 +2996,8 @@ func (c *EnvironmentClient) QueryEnvironmentToIncludedNetwork(e *Environment) *I // QueryEnvironmentToFinding queries the EnvironmentToFinding edge of a Environment. func (c *EnvironmentClient) QueryEnvironmentToFinding(e *Environment) *FindingQuery { - query := &FindingQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&FindingClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := e.ID step := sqlgraph.NewStep( sqlgraph.From(environment.Table, environment.FieldID, id), @@ -2343,8 +3012,8 @@ func (c *EnvironmentClient) QueryEnvironmentToFinding(e *Environment) *FindingQu // QueryEnvironmentToDNSRecord queries the EnvironmentToDNSRecord edge of a Environment. func (c *EnvironmentClient) QueryEnvironmentToDNSRecord(e *Environment) *DNSRecordQuery { - query := &DNSRecordQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&DNSRecordClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := e.ID step := sqlgraph.NewStep( sqlgraph.From(environment.Table, environment.FieldID, id), @@ -2359,8 +3028,8 @@ func (c *EnvironmentClient) QueryEnvironmentToDNSRecord(e *Environment) *DNSReco // QueryEnvironmentToDNS queries the EnvironmentToDNS edge of a Environment. func (c *EnvironmentClient) QueryEnvironmentToDNS(e *Environment) *DNSQuery { - query := &DNSQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&DNSClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := e.ID step := sqlgraph.NewStep( sqlgraph.From(environment.Table, environment.FieldID, id), @@ -2375,8 +3044,8 @@ func (c *EnvironmentClient) QueryEnvironmentToDNS(e *Environment) *DNSQuery { // QueryEnvironmentToNetwork queries the EnvironmentToNetwork edge of a Environment. func (c *EnvironmentClient) QueryEnvironmentToNetwork(e *Environment) *NetworkQuery { - query := &NetworkQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&NetworkClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := e.ID step := sqlgraph.NewStep( sqlgraph.From(environment.Table, environment.FieldID, id), @@ -2391,8 +3060,8 @@ func (c *EnvironmentClient) QueryEnvironmentToNetwork(e *Environment) *NetworkQu // QueryEnvironmentToHostDependency queries the EnvironmentToHostDependency edge of a Environment. func (c *EnvironmentClient) QueryEnvironmentToHostDependency(e *Environment) *HostDependencyQuery { - query := &HostDependencyQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&HostDependencyClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := e.ID step := sqlgraph.NewStep( sqlgraph.From(environment.Table, environment.FieldID, id), @@ -2407,8 +3076,8 @@ func (c *EnvironmentClient) QueryEnvironmentToHostDependency(e *Environment) *Ho // QueryEnvironmentToAnsible queries the EnvironmentToAnsible edge of a Environment. func (c *EnvironmentClient) QueryEnvironmentToAnsible(e *Environment) *AnsibleQuery { - query := &AnsibleQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&AnsibleClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := e.ID step := sqlgraph.NewStep( sqlgraph.From(environment.Table, environment.FieldID, id), @@ -2423,8 +3092,8 @@ func (c *EnvironmentClient) QueryEnvironmentToAnsible(e *Environment) *AnsibleQu // QueryEnvironmentToBuild queries the EnvironmentToBuild edge of a Environment. func (c *EnvironmentClient) QueryEnvironmentToBuild(e *Environment) *BuildQuery { - query := &BuildQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&BuildClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := e.ID step := sqlgraph.NewStep( sqlgraph.From(environment.Table, environment.FieldID, id), @@ -2439,8 +3108,8 @@ func (c *EnvironmentClient) QueryEnvironmentToBuild(e *Environment) *BuildQuery // QueryEnvironmentToRepository queries the EnvironmentToRepository edge of a Environment. func (c *EnvironmentClient) QueryEnvironmentToRepository(e *Environment) *RepositoryQuery { - query := &RepositoryQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&RepositoryClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := e.ID step := sqlgraph.NewStep( sqlgraph.From(environment.Table, environment.FieldID, id), @@ -2455,8 +3124,8 @@ func (c *EnvironmentClient) QueryEnvironmentToRepository(e *Environment) *Reposi // QueryEnvironmentToServerTask queries the EnvironmentToServerTask edge of a Environment. func (c *EnvironmentClient) QueryEnvironmentToServerTask(e *Environment) *ServerTaskQuery { - query := &ServerTaskQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&ServerTaskClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := e.ID step := sqlgraph.NewStep( sqlgraph.From(environment.Table, environment.FieldID, id), @@ -2474,6 +3143,26 @@ func (c *EnvironmentClient) Hooks() []Hook { return c.hooks.Environment } +// Interceptors returns the client interceptors. +func (c *EnvironmentClient) Interceptors() []Interceptor { + return c.inters.Environment +} + +func (c *EnvironmentClient) mutate(ctx context.Context, m *EnvironmentMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&EnvironmentCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&EnvironmentUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&EnvironmentUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&EnvironmentDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Environment mutation op: %q", m.Op()) + } +} + // FileDeleteClient is a client for the FileDelete schema. type FileDeleteClient struct { config @@ -2490,7 +3179,13 @@ func (c *FileDeleteClient) Use(hooks ...Hook) { c.hooks.FileDelete = append(c.hooks.FileDelete, hooks...) } -// Create returns a create builder for FileDelete. +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `filedelete.Intercept(f(g(h())))`. +func (c *FileDeleteClient) Intercept(interceptors ...Interceptor) { + c.inters.FileDelete = append(c.inters.FileDelete, interceptors...) +} + +// Create returns a builder for creating a FileDelete entity. func (c *FileDeleteClient) Create() *FileDeleteCreate { mutation := newFileDeleteMutation(c.config, OpCreate) return &FileDeleteCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} @@ -2501,6 +3196,21 @@ func (c *FileDeleteClient) CreateBulk(builders ...*FileDeleteCreate) *FileDelete return &FileDeleteCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *FileDeleteClient) MapCreateBulk(slice any, setFunc func(*FileDeleteCreate, int)) *FileDeleteCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &FileDeleteCreateBulk{err: fmt.Errorf("calling to FileDeleteClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*FileDeleteCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &FileDeleteCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for FileDelete. func (c *FileDeleteClient) Update() *FileDeleteUpdate { mutation := newFileDeleteMutation(c.config, OpUpdate) @@ -2525,12 +3235,12 @@ func (c *FileDeleteClient) Delete() *FileDeleteDelete { return &FileDeleteDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} } -// DeleteOne returns a delete builder for the given entity. +// DeleteOne returns a builder for deleting the given entity. func (c *FileDeleteClient) DeleteOne(fd *FileDelete) *FileDeleteDeleteOne { return c.DeleteOneID(fd.ID) } -// DeleteOneID returns a delete builder for the given id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *FileDeleteClient) DeleteOneID(id uuid.UUID) *FileDeleteDeleteOne { builder := c.Delete().Where(filedelete.ID(id)) builder.mutation.id = &id @@ -2542,6 +3252,8 @@ func (c *FileDeleteClient) DeleteOneID(id uuid.UUID) *FileDeleteDeleteOne { func (c *FileDeleteClient) Query() *FileDeleteQuery { return &FileDeleteQuery{ config: c.config, + ctx: &QueryContext{Type: TypeFileDelete}, + inters: c.Interceptors(), } } @@ -2561,8 +3273,8 @@ func (c *FileDeleteClient) GetX(ctx context.Context, id uuid.UUID) *FileDelete { // QueryFileDeleteToEnvironment queries the FileDeleteToEnvironment edge of a FileDelete. func (c *FileDeleteClient) QueryFileDeleteToEnvironment(fd *FileDelete) *EnvironmentQuery { - query := &EnvironmentQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&EnvironmentClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := fd.ID step := sqlgraph.NewStep( sqlgraph.From(filedelete.Table, filedelete.FieldID, id), @@ -2580,6 +3292,26 @@ func (c *FileDeleteClient) Hooks() []Hook { return c.hooks.FileDelete } +// Interceptors returns the client interceptors. +func (c *FileDeleteClient) Interceptors() []Interceptor { + return c.inters.FileDelete +} + +func (c *FileDeleteClient) mutate(ctx context.Context, m *FileDeleteMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&FileDeleteCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&FileDeleteUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&FileDeleteUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&FileDeleteDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown FileDelete mutation op: %q", m.Op()) + } +} + // FileDownloadClient is a client for the FileDownload schema. type FileDownloadClient struct { config @@ -2596,7 +3328,13 @@ func (c *FileDownloadClient) Use(hooks ...Hook) { c.hooks.FileDownload = append(c.hooks.FileDownload, hooks...) } -// Create returns a create builder for FileDownload. +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `filedownload.Intercept(f(g(h())))`. +func (c *FileDownloadClient) Intercept(interceptors ...Interceptor) { + c.inters.FileDownload = append(c.inters.FileDownload, interceptors...) +} + +// Create returns a builder for creating a FileDownload entity. func (c *FileDownloadClient) Create() *FileDownloadCreate { mutation := newFileDownloadMutation(c.config, OpCreate) return &FileDownloadCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} @@ -2607,6 +3345,21 @@ func (c *FileDownloadClient) CreateBulk(builders ...*FileDownloadCreate) *FileDo return &FileDownloadCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *FileDownloadClient) MapCreateBulk(slice any, setFunc func(*FileDownloadCreate, int)) *FileDownloadCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &FileDownloadCreateBulk{err: fmt.Errorf("calling to FileDownloadClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*FileDownloadCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &FileDownloadCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for FileDownload. func (c *FileDownloadClient) Update() *FileDownloadUpdate { mutation := newFileDownloadMutation(c.config, OpUpdate) @@ -2631,12 +3384,12 @@ func (c *FileDownloadClient) Delete() *FileDownloadDelete { return &FileDownloadDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} } -// DeleteOne returns a delete builder for the given entity. +// DeleteOne returns a builder for deleting the given entity. func (c *FileDownloadClient) DeleteOne(fd *FileDownload) *FileDownloadDeleteOne { return c.DeleteOneID(fd.ID) } -// DeleteOneID returns a delete builder for the given id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *FileDownloadClient) DeleteOneID(id uuid.UUID) *FileDownloadDeleteOne { builder := c.Delete().Where(filedownload.ID(id)) builder.mutation.id = &id @@ -2648,6 +3401,8 @@ func (c *FileDownloadClient) DeleteOneID(id uuid.UUID) *FileDownloadDeleteOne { func (c *FileDownloadClient) Query() *FileDownloadQuery { return &FileDownloadQuery{ config: c.config, + ctx: &QueryContext{Type: TypeFileDownload}, + inters: c.Interceptors(), } } @@ -2667,8 +3422,8 @@ func (c *FileDownloadClient) GetX(ctx context.Context, id uuid.UUID) *FileDownlo // QueryFileDownloadToEnvironment queries the FileDownloadToEnvironment edge of a FileDownload. func (c *FileDownloadClient) QueryFileDownloadToEnvironment(fd *FileDownload) *EnvironmentQuery { - query := &EnvironmentQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&EnvironmentClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := fd.ID step := sqlgraph.NewStep( sqlgraph.From(filedownload.Table, filedownload.FieldID, id), @@ -2686,6 +3441,26 @@ func (c *FileDownloadClient) Hooks() []Hook { return c.hooks.FileDownload } +// Interceptors returns the client interceptors. +func (c *FileDownloadClient) Interceptors() []Interceptor { + return c.inters.FileDownload +} + +func (c *FileDownloadClient) mutate(ctx context.Context, m *FileDownloadMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&FileDownloadCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&FileDownloadUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&FileDownloadUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&FileDownloadDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown FileDownload mutation op: %q", m.Op()) + } +} + // FileExtractClient is a client for the FileExtract schema. type FileExtractClient struct { config @@ -2702,7 +3477,13 @@ func (c *FileExtractClient) Use(hooks ...Hook) { c.hooks.FileExtract = append(c.hooks.FileExtract, hooks...) } -// Create returns a create builder for FileExtract. +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `fileextract.Intercept(f(g(h())))`. +func (c *FileExtractClient) Intercept(interceptors ...Interceptor) { + c.inters.FileExtract = append(c.inters.FileExtract, interceptors...) +} + +// Create returns a builder for creating a FileExtract entity. func (c *FileExtractClient) Create() *FileExtractCreate { mutation := newFileExtractMutation(c.config, OpCreate) return &FileExtractCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} @@ -2713,6 +3494,21 @@ func (c *FileExtractClient) CreateBulk(builders ...*FileExtractCreate) *FileExtr return &FileExtractCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *FileExtractClient) MapCreateBulk(slice any, setFunc func(*FileExtractCreate, int)) *FileExtractCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &FileExtractCreateBulk{err: fmt.Errorf("calling to FileExtractClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*FileExtractCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &FileExtractCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for FileExtract. func (c *FileExtractClient) Update() *FileExtractUpdate { mutation := newFileExtractMutation(c.config, OpUpdate) @@ -2737,12 +3533,12 @@ func (c *FileExtractClient) Delete() *FileExtractDelete { return &FileExtractDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} } -// DeleteOne returns a delete builder for the given entity. +// DeleteOne returns a builder for deleting the given entity. func (c *FileExtractClient) DeleteOne(fe *FileExtract) *FileExtractDeleteOne { return c.DeleteOneID(fe.ID) } -// DeleteOneID returns a delete builder for the given id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *FileExtractClient) DeleteOneID(id uuid.UUID) *FileExtractDeleteOne { builder := c.Delete().Where(fileextract.ID(id)) builder.mutation.id = &id @@ -2754,6 +3550,8 @@ func (c *FileExtractClient) DeleteOneID(id uuid.UUID) *FileExtractDeleteOne { func (c *FileExtractClient) Query() *FileExtractQuery { return &FileExtractQuery{ config: c.config, + ctx: &QueryContext{Type: TypeFileExtract}, + inters: c.Interceptors(), } } @@ -2773,8 +3571,8 @@ func (c *FileExtractClient) GetX(ctx context.Context, id uuid.UUID) *FileExtract // QueryFileExtractToEnvironment queries the FileExtractToEnvironment edge of a FileExtract. func (c *FileExtractClient) QueryFileExtractToEnvironment(fe *FileExtract) *EnvironmentQuery { - query := &EnvironmentQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&EnvironmentClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := fe.ID step := sqlgraph.NewStep( sqlgraph.From(fileextract.Table, fileextract.FieldID, id), @@ -2792,6 +3590,26 @@ func (c *FileExtractClient) Hooks() []Hook { return c.hooks.FileExtract } +// Interceptors returns the client interceptors. +func (c *FileExtractClient) Interceptors() []Interceptor { + return c.inters.FileExtract +} + +func (c *FileExtractClient) mutate(ctx context.Context, m *FileExtractMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&FileExtractCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&FileExtractUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&FileExtractUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&FileExtractDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown FileExtract mutation op: %q", m.Op()) + } +} + // FindingClient is a client for the Finding schema. type FindingClient struct { config @@ -2808,7 +3626,13 @@ func (c *FindingClient) Use(hooks ...Hook) { c.hooks.Finding = append(c.hooks.Finding, hooks...) } -// Create returns a create builder for Finding. +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `finding.Intercept(f(g(h())))`. +func (c *FindingClient) Intercept(interceptors ...Interceptor) { + c.inters.Finding = append(c.inters.Finding, interceptors...) +} + +// Create returns a builder for creating a Finding entity. func (c *FindingClient) Create() *FindingCreate { mutation := newFindingMutation(c.config, OpCreate) return &FindingCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} @@ -2819,6 +3643,21 @@ func (c *FindingClient) CreateBulk(builders ...*FindingCreate) *FindingCreateBul return &FindingCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *FindingClient) MapCreateBulk(slice any, setFunc func(*FindingCreate, int)) *FindingCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &FindingCreateBulk{err: fmt.Errorf("calling to FindingClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*FindingCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &FindingCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for Finding. func (c *FindingClient) Update() *FindingUpdate { mutation := newFindingMutation(c.config, OpUpdate) @@ -2843,12 +3682,12 @@ func (c *FindingClient) Delete() *FindingDelete { return &FindingDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} } -// DeleteOne returns a delete builder for the given entity. +// DeleteOne returns a builder for deleting the given entity. func (c *FindingClient) DeleteOne(f *Finding) *FindingDeleteOne { return c.DeleteOneID(f.ID) } -// DeleteOneID returns a delete builder for the given id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *FindingClient) DeleteOneID(id uuid.UUID) *FindingDeleteOne { builder := c.Delete().Where(finding.ID(id)) builder.mutation.id = &id @@ -2860,6 +3699,8 @@ func (c *FindingClient) DeleteOneID(id uuid.UUID) *FindingDeleteOne { func (c *FindingClient) Query() *FindingQuery { return &FindingQuery{ config: c.config, + ctx: &QueryContext{Type: TypeFinding}, + inters: c.Interceptors(), } } @@ -2879,8 +3720,8 @@ func (c *FindingClient) GetX(ctx context.Context, id uuid.UUID) *Finding { // QueryFindingToUser queries the FindingToUser edge of a Finding. func (c *FindingClient) QueryFindingToUser(f *Finding) *UserQuery { - query := &UserQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&UserClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := f.ID step := sqlgraph.NewStep( sqlgraph.From(finding.Table, finding.FieldID, id), @@ -2895,8 +3736,8 @@ func (c *FindingClient) QueryFindingToUser(f *Finding) *UserQuery { // QueryFindingToHost queries the FindingToHost edge of a Finding. func (c *FindingClient) QueryFindingToHost(f *Finding) *HostQuery { - query := &HostQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&HostClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := f.ID step := sqlgraph.NewStep( sqlgraph.From(finding.Table, finding.FieldID, id), @@ -2911,8 +3752,8 @@ func (c *FindingClient) QueryFindingToHost(f *Finding) *HostQuery { // QueryFindingToScript queries the FindingToScript edge of a Finding. func (c *FindingClient) QueryFindingToScript(f *Finding) *ScriptQuery { - query := &ScriptQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&ScriptClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := f.ID step := sqlgraph.NewStep( sqlgraph.From(finding.Table, finding.FieldID, id), @@ -2927,8 +3768,8 @@ func (c *FindingClient) QueryFindingToScript(f *Finding) *ScriptQuery { // QueryFindingToEnvironment queries the FindingToEnvironment edge of a Finding. func (c *FindingClient) QueryFindingToEnvironment(f *Finding) *EnvironmentQuery { - query := &EnvironmentQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&EnvironmentClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := f.ID step := sqlgraph.NewStep( sqlgraph.From(finding.Table, finding.FieldID, id), @@ -2946,6 +3787,26 @@ func (c *FindingClient) Hooks() []Hook { return c.hooks.Finding } +// Interceptors returns the client interceptors. +func (c *FindingClient) Interceptors() []Interceptor { + return c.inters.Finding +} + +func (c *FindingClient) mutate(ctx context.Context, m *FindingMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&FindingCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&FindingUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&FindingUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&FindingDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Finding mutation op: %q", m.Op()) + } +} + // GinFileMiddlewareClient is a client for the GinFileMiddleware schema. type GinFileMiddlewareClient struct { config @@ -2962,7 +3823,13 @@ func (c *GinFileMiddlewareClient) Use(hooks ...Hook) { c.hooks.GinFileMiddleware = append(c.hooks.GinFileMiddleware, hooks...) } -// Create returns a create builder for GinFileMiddleware. +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `ginfilemiddleware.Intercept(f(g(h())))`. +func (c *GinFileMiddlewareClient) Intercept(interceptors ...Interceptor) { + c.inters.GinFileMiddleware = append(c.inters.GinFileMiddleware, interceptors...) +} + +// Create returns a builder for creating a GinFileMiddleware entity. func (c *GinFileMiddlewareClient) Create() *GinFileMiddlewareCreate { mutation := newGinFileMiddlewareMutation(c.config, OpCreate) return &GinFileMiddlewareCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} @@ -2973,6 +3840,21 @@ func (c *GinFileMiddlewareClient) CreateBulk(builders ...*GinFileMiddlewareCreat return &GinFileMiddlewareCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *GinFileMiddlewareClient) MapCreateBulk(slice any, setFunc func(*GinFileMiddlewareCreate, int)) *GinFileMiddlewareCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &GinFileMiddlewareCreateBulk{err: fmt.Errorf("calling to GinFileMiddlewareClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*GinFileMiddlewareCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &GinFileMiddlewareCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for GinFileMiddleware. func (c *GinFileMiddlewareClient) Update() *GinFileMiddlewareUpdate { mutation := newGinFileMiddlewareMutation(c.config, OpUpdate) @@ -2997,12 +3879,12 @@ func (c *GinFileMiddlewareClient) Delete() *GinFileMiddlewareDelete { return &GinFileMiddlewareDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} } -// DeleteOne returns a delete builder for the given entity. +// DeleteOne returns a builder for deleting the given entity. func (c *GinFileMiddlewareClient) DeleteOne(gfm *GinFileMiddleware) *GinFileMiddlewareDeleteOne { return c.DeleteOneID(gfm.ID) } -// DeleteOneID returns a delete builder for the given id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *GinFileMiddlewareClient) DeleteOneID(id uuid.UUID) *GinFileMiddlewareDeleteOne { builder := c.Delete().Where(ginfilemiddleware.ID(id)) builder.mutation.id = &id @@ -3014,6 +3896,8 @@ func (c *GinFileMiddlewareClient) DeleteOneID(id uuid.UUID) *GinFileMiddlewareDe func (c *GinFileMiddlewareClient) Query() *GinFileMiddlewareQuery { return &GinFileMiddlewareQuery{ config: c.config, + ctx: &QueryContext{Type: TypeGinFileMiddleware}, + inters: c.Interceptors(), } } @@ -3033,8 +3917,8 @@ func (c *GinFileMiddlewareClient) GetX(ctx context.Context, id uuid.UUID) *GinFi // QueryGinFileMiddlewareToProvisionedHost queries the GinFileMiddlewareToProvisionedHost edge of a GinFileMiddleware. func (c *GinFileMiddlewareClient) QueryGinFileMiddlewareToProvisionedHost(gfm *GinFileMiddleware) *ProvisionedHostQuery { - query := &ProvisionedHostQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&ProvisionedHostClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := gfm.ID step := sqlgraph.NewStep( sqlgraph.From(ginfilemiddleware.Table, ginfilemiddleware.FieldID, id), @@ -3049,8 +3933,8 @@ func (c *GinFileMiddlewareClient) QueryGinFileMiddlewareToProvisionedHost(gfm *G // QueryGinFileMiddlewareToProvisioningStep queries the GinFileMiddlewareToProvisioningStep edge of a GinFileMiddleware. func (c *GinFileMiddlewareClient) QueryGinFileMiddlewareToProvisioningStep(gfm *GinFileMiddleware) *ProvisioningStepQuery { - query := &ProvisioningStepQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&ProvisioningStepClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := gfm.ID step := sqlgraph.NewStep( sqlgraph.From(ginfilemiddleware.Table, ginfilemiddleware.FieldID, id), @@ -3063,9 +3947,29 @@ func (c *GinFileMiddlewareClient) QueryGinFileMiddlewareToProvisioningStep(gfm * return query } -// Hooks returns the client hooks. -func (c *GinFileMiddlewareClient) Hooks() []Hook { - return c.hooks.GinFileMiddleware +// Hooks returns the client hooks. +func (c *GinFileMiddlewareClient) Hooks() []Hook { + return c.hooks.GinFileMiddleware +} + +// Interceptors returns the client interceptors. +func (c *GinFileMiddlewareClient) Interceptors() []Interceptor { + return c.inters.GinFileMiddleware +} + +func (c *GinFileMiddlewareClient) mutate(ctx context.Context, m *GinFileMiddlewareMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&GinFileMiddlewareCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&GinFileMiddlewareUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&GinFileMiddlewareUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&GinFileMiddlewareDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown GinFileMiddleware mutation op: %q", m.Op()) + } } // HostClient is a client for the Host schema. @@ -3084,7 +3988,13 @@ func (c *HostClient) Use(hooks ...Hook) { c.hooks.Host = append(c.hooks.Host, hooks...) } -// Create returns a create builder for Host. +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `host.Intercept(f(g(h())))`. +func (c *HostClient) Intercept(interceptors ...Interceptor) { + c.inters.Host = append(c.inters.Host, interceptors...) +} + +// Create returns a builder for creating a Host entity. func (c *HostClient) Create() *HostCreate { mutation := newHostMutation(c.config, OpCreate) return &HostCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} @@ -3095,6 +4005,21 @@ func (c *HostClient) CreateBulk(builders ...*HostCreate) *HostCreateBulk { return &HostCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *HostClient) MapCreateBulk(slice any, setFunc func(*HostCreate, int)) *HostCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &HostCreateBulk{err: fmt.Errorf("calling to HostClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*HostCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &HostCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for Host. func (c *HostClient) Update() *HostUpdate { mutation := newHostMutation(c.config, OpUpdate) @@ -3119,12 +4044,12 @@ func (c *HostClient) Delete() *HostDelete { return &HostDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} } -// DeleteOne returns a delete builder for the given entity. +// DeleteOne returns a builder for deleting the given entity. func (c *HostClient) DeleteOne(h *Host) *HostDeleteOne { return c.DeleteOneID(h.ID) } -// DeleteOneID returns a delete builder for the given id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *HostClient) DeleteOneID(id uuid.UUID) *HostDeleteOne { builder := c.Delete().Where(host.ID(id)) builder.mutation.id = &id @@ -3136,6 +4061,8 @@ func (c *HostClient) DeleteOneID(id uuid.UUID) *HostDeleteOne { func (c *HostClient) Query() *HostQuery { return &HostQuery{ config: c.config, + ctx: &QueryContext{Type: TypeHost}, + inters: c.Interceptors(), } } @@ -3155,8 +4082,8 @@ func (c *HostClient) GetX(ctx context.Context, id uuid.UUID) *Host { // QueryHostToDisk queries the HostToDisk edge of a Host. func (c *HostClient) QueryHostToDisk(h *Host) *DiskQuery { - query := &DiskQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&DiskClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := h.ID step := sqlgraph.NewStep( sqlgraph.From(host.Table, host.FieldID, id), @@ -3171,8 +4098,8 @@ func (c *HostClient) QueryHostToDisk(h *Host) *DiskQuery { // QueryHostToUser queries the HostToUser edge of a Host. func (c *HostClient) QueryHostToUser(h *Host) *UserQuery { - query := &UserQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&UserClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := h.ID step := sqlgraph.NewStep( sqlgraph.From(host.Table, host.FieldID, id), @@ -3187,8 +4114,8 @@ func (c *HostClient) QueryHostToUser(h *Host) *UserQuery { // QueryHostToEnvironment queries the HostToEnvironment edge of a Host. func (c *HostClient) QueryHostToEnvironment(h *Host) *EnvironmentQuery { - query := &EnvironmentQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&EnvironmentClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := h.ID step := sqlgraph.NewStep( sqlgraph.From(host.Table, host.FieldID, id), @@ -3203,8 +4130,8 @@ func (c *HostClient) QueryHostToEnvironment(h *Host) *EnvironmentQuery { // QueryHostToIncludedNetwork queries the HostToIncludedNetwork edge of a Host. func (c *HostClient) QueryHostToIncludedNetwork(h *Host) *IncludedNetworkQuery { - query := &IncludedNetworkQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&IncludedNetworkClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := h.ID step := sqlgraph.NewStep( sqlgraph.From(host.Table, host.FieldID, id), @@ -3219,8 +4146,8 @@ func (c *HostClient) QueryHostToIncludedNetwork(h *Host) *IncludedNetworkQuery { // QueryDependOnHostToHostDependency queries the DependOnHostToHostDependency edge of a Host. func (c *HostClient) QueryDependOnHostToHostDependency(h *Host) *HostDependencyQuery { - query := &HostDependencyQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&HostDependencyClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := h.ID step := sqlgraph.NewStep( sqlgraph.From(host.Table, host.FieldID, id), @@ -3235,8 +4162,8 @@ func (c *HostClient) QueryDependOnHostToHostDependency(h *Host) *HostDependencyQ // QueryDependByHostToHostDependency queries the DependByHostToHostDependency edge of a Host. func (c *HostClient) QueryDependByHostToHostDependency(h *Host) *HostDependencyQuery { - query := &HostDependencyQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&HostDependencyClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := h.ID step := sqlgraph.NewStep( sqlgraph.From(host.Table, host.FieldID, id), @@ -3254,6 +4181,26 @@ func (c *HostClient) Hooks() []Hook { return c.hooks.Host } +// Interceptors returns the client interceptors. +func (c *HostClient) Interceptors() []Interceptor { + return c.inters.Host +} + +func (c *HostClient) mutate(ctx context.Context, m *HostMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&HostCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&HostUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&HostUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&HostDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Host mutation op: %q", m.Op()) + } +} + // HostDependencyClient is a client for the HostDependency schema. type HostDependencyClient struct { config @@ -3270,7 +4217,13 @@ func (c *HostDependencyClient) Use(hooks ...Hook) { c.hooks.HostDependency = append(c.hooks.HostDependency, hooks...) } -// Create returns a create builder for HostDependency. +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `hostdependency.Intercept(f(g(h())))`. +func (c *HostDependencyClient) Intercept(interceptors ...Interceptor) { + c.inters.HostDependency = append(c.inters.HostDependency, interceptors...) +} + +// Create returns a builder for creating a HostDependency entity. func (c *HostDependencyClient) Create() *HostDependencyCreate { mutation := newHostDependencyMutation(c.config, OpCreate) return &HostDependencyCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} @@ -3281,6 +4234,21 @@ func (c *HostDependencyClient) CreateBulk(builders ...*HostDependencyCreate) *Ho return &HostDependencyCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *HostDependencyClient) MapCreateBulk(slice any, setFunc func(*HostDependencyCreate, int)) *HostDependencyCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &HostDependencyCreateBulk{err: fmt.Errorf("calling to HostDependencyClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*HostDependencyCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &HostDependencyCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for HostDependency. func (c *HostDependencyClient) Update() *HostDependencyUpdate { mutation := newHostDependencyMutation(c.config, OpUpdate) @@ -3305,12 +4273,12 @@ func (c *HostDependencyClient) Delete() *HostDependencyDelete { return &HostDependencyDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} } -// DeleteOne returns a delete builder for the given entity. +// DeleteOne returns a builder for deleting the given entity. func (c *HostDependencyClient) DeleteOne(hd *HostDependency) *HostDependencyDeleteOne { return c.DeleteOneID(hd.ID) } -// DeleteOneID returns a delete builder for the given id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *HostDependencyClient) DeleteOneID(id uuid.UUID) *HostDependencyDeleteOne { builder := c.Delete().Where(hostdependency.ID(id)) builder.mutation.id = &id @@ -3322,6 +4290,8 @@ func (c *HostDependencyClient) DeleteOneID(id uuid.UUID) *HostDependencyDeleteOn func (c *HostDependencyClient) Query() *HostDependencyQuery { return &HostDependencyQuery{ config: c.config, + ctx: &QueryContext{Type: TypeHostDependency}, + inters: c.Interceptors(), } } @@ -3341,8 +4311,8 @@ func (c *HostDependencyClient) GetX(ctx context.Context, id uuid.UUID) *HostDepe // QueryHostDependencyToDependOnHost queries the HostDependencyToDependOnHost edge of a HostDependency. func (c *HostDependencyClient) QueryHostDependencyToDependOnHost(hd *HostDependency) *HostQuery { - query := &HostQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&HostClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := hd.ID step := sqlgraph.NewStep( sqlgraph.From(hostdependency.Table, hostdependency.FieldID, id), @@ -3357,8 +4327,8 @@ func (c *HostDependencyClient) QueryHostDependencyToDependOnHost(hd *HostDepende // QueryHostDependencyToDependByHost queries the HostDependencyToDependByHost edge of a HostDependency. func (c *HostDependencyClient) QueryHostDependencyToDependByHost(hd *HostDependency) *HostQuery { - query := &HostQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&HostClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := hd.ID step := sqlgraph.NewStep( sqlgraph.From(hostdependency.Table, hostdependency.FieldID, id), @@ -3373,8 +4343,8 @@ func (c *HostDependencyClient) QueryHostDependencyToDependByHost(hd *HostDepende // QueryHostDependencyToNetwork queries the HostDependencyToNetwork edge of a HostDependency. func (c *HostDependencyClient) QueryHostDependencyToNetwork(hd *HostDependency) *NetworkQuery { - query := &NetworkQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&NetworkClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := hd.ID step := sqlgraph.NewStep( sqlgraph.From(hostdependency.Table, hostdependency.FieldID, id), @@ -3389,8 +4359,8 @@ func (c *HostDependencyClient) QueryHostDependencyToNetwork(hd *HostDependency) // QueryHostDependencyToEnvironment queries the HostDependencyToEnvironment edge of a HostDependency. func (c *HostDependencyClient) QueryHostDependencyToEnvironment(hd *HostDependency) *EnvironmentQuery { - query := &EnvironmentQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&EnvironmentClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := hd.ID step := sqlgraph.NewStep( sqlgraph.From(hostdependency.Table, hostdependency.FieldID, id), @@ -3408,6 +4378,26 @@ func (c *HostDependencyClient) Hooks() []Hook { return c.hooks.HostDependency } +// Interceptors returns the client interceptors. +func (c *HostDependencyClient) Interceptors() []Interceptor { + return c.inters.HostDependency +} + +func (c *HostDependencyClient) mutate(ctx context.Context, m *HostDependencyMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&HostDependencyCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&HostDependencyUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&HostDependencyUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&HostDependencyDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown HostDependency mutation op: %q", m.Op()) + } +} + // IdentityClient is a client for the Identity schema. type IdentityClient struct { config @@ -3424,7 +4414,13 @@ func (c *IdentityClient) Use(hooks ...Hook) { c.hooks.Identity = append(c.hooks.Identity, hooks...) } -// Create returns a create builder for Identity. +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `identity.Intercept(f(g(h())))`. +func (c *IdentityClient) Intercept(interceptors ...Interceptor) { + c.inters.Identity = append(c.inters.Identity, interceptors...) +} + +// Create returns a builder for creating a Identity entity. func (c *IdentityClient) Create() *IdentityCreate { mutation := newIdentityMutation(c.config, OpCreate) return &IdentityCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} @@ -3435,6 +4431,21 @@ func (c *IdentityClient) CreateBulk(builders ...*IdentityCreate) *IdentityCreate return &IdentityCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *IdentityClient) MapCreateBulk(slice any, setFunc func(*IdentityCreate, int)) *IdentityCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &IdentityCreateBulk{err: fmt.Errorf("calling to IdentityClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*IdentityCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &IdentityCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for Identity. func (c *IdentityClient) Update() *IdentityUpdate { mutation := newIdentityMutation(c.config, OpUpdate) @@ -3459,12 +4470,12 @@ func (c *IdentityClient) Delete() *IdentityDelete { return &IdentityDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} } -// DeleteOne returns a delete builder for the given entity. +// DeleteOne returns a builder for deleting the given entity. func (c *IdentityClient) DeleteOne(i *Identity) *IdentityDeleteOne { return c.DeleteOneID(i.ID) } -// DeleteOneID returns a delete builder for the given id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *IdentityClient) DeleteOneID(id uuid.UUID) *IdentityDeleteOne { builder := c.Delete().Where(identity.ID(id)) builder.mutation.id = &id @@ -3476,6 +4487,8 @@ func (c *IdentityClient) DeleteOneID(id uuid.UUID) *IdentityDeleteOne { func (c *IdentityClient) Query() *IdentityQuery { return &IdentityQuery{ config: c.config, + ctx: &QueryContext{Type: TypeIdentity}, + inters: c.Interceptors(), } } @@ -3495,8 +4508,8 @@ func (c *IdentityClient) GetX(ctx context.Context, id uuid.UUID) *Identity { // QueryIdentityToEnvironment queries the IdentityToEnvironment edge of a Identity. func (c *IdentityClient) QueryIdentityToEnvironment(i *Identity) *EnvironmentQuery { - query := &EnvironmentQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&EnvironmentClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := i.ID step := sqlgraph.NewStep( sqlgraph.From(identity.Table, identity.FieldID, id), @@ -3514,6 +4527,26 @@ func (c *IdentityClient) Hooks() []Hook { return c.hooks.Identity } +// Interceptors returns the client interceptors. +func (c *IdentityClient) Interceptors() []Interceptor { + return c.inters.Identity +} + +func (c *IdentityClient) mutate(ctx context.Context, m *IdentityMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&IdentityCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&IdentityUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&IdentityUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&IdentityDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Identity mutation op: %q", m.Op()) + } +} + // IncludedNetworkClient is a client for the IncludedNetwork schema. type IncludedNetworkClient struct { config @@ -3530,7 +4563,13 @@ func (c *IncludedNetworkClient) Use(hooks ...Hook) { c.hooks.IncludedNetwork = append(c.hooks.IncludedNetwork, hooks...) } -// Create returns a create builder for IncludedNetwork. +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `includednetwork.Intercept(f(g(h())))`. +func (c *IncludedNetworkClient) Intercept(interceptors ...Interceptor) { + c.inters.IncludedNetwork = append(c.inters.IncludedNetwork, interceptors...) +} + +// Create returns a builder for creating a IncludedNetwork entity. func (c *IncludedNetworkClient) Create() *IncludedNetworkCreate { mutation := newIncludedNetworkMutation(c.config, OpCreate) return &IncludedNetworkCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} @@ -3541,6 +4580,21 @@ func (c *IncludedNetworkClient) CreateBulk(builders ...*IncludedNetworkCreate) * return &IncludedNetworkCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *IncludedNetworkClient) MapCreateBulk(slice any, setFunc func(*IncludedNetworkCreate, int)) *IncludedNetworkCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &IncludedNetworkCreateBulk{err: fmt.Errorf("calling to IncludedNetworkClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*IncludedNetworkCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &IncludedNetworkCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for IncludedNetwork. func (c *IncludedNetworkClient) Update() *IncludedNetworkUpdate { mutation := newIncludedNetworkMutation(c.config, OpUpdate) @@ -3565,12 +4619,12 @@ func (c *IncludedNetworkClient) Delete() *IncludedNetworkDelete { return &IncludedNetworkDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} } -// DeleteOne returns a delete builder for the given entity. +// DeleteOne returns a builder for deleting the given entity. func (c *IncludedNetworkClient) DeleteOne(in *IncludedNetwork) *IncludedNetworkDeleteOne { return c.DeleteOneID(in.ID) } -// DeleteOneID returns a delete builder for the given id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *IncludedNetworkClient) DeleteOneID(id uuid.UUID) *IncludedNetworkDeleteOne { builder := c.Delete().Where(includednetwork.ID(id)) builder.mutation.id = &id @@ -3582,6 +4636,8 @@ func (c *IncludedNetworkClient) DeleteOneID(id uuid.UUID) *IncludedNetworkDelete func (c *IncludedNetworkClient) Query() *IncludedNetworkQuery { return &IncludedNetworkQuery{ config: c.config, + ctx: &QueryContext{Type: TypeIncludedNetwork}, + inters: c.Interceptors(), } } @@ -3601,8 +4657,8 @@ func (c *IncludedNetworkClient) GetX(ctx context.Context, id uuid.UUID) *Include // QueryIncludedNetworkToTag queries the IncludedNetworkToTag edge of a IncludedNetwork. func (c *IncludedNetworkClient) QueryIncludedNetworkToTag(in *IncludedNetwork) *TagQuery { - query := &TagQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&TagClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := in.ID step := sqlgraph.NewStep( sqlgraph.From(includednetwork.Table, includednetwork.FieldID, id), @@ -3617,8 +4673,8 @@ func (c *IncludedNetworkClient) QueryIncludedNetworkToTag(in *IncludedNetwork) * // QueryIncludedNetworkToHost queries the IncludedNetworkToHost edge of a IncludedNetwork. func (c *IncludedNetworkClient) QueryIncludedNetworkToHost(in *IncludedNetwork) *HostQuery { - query := &HostQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&HostClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := in.ID step := sqlgraph.NewStep( sqlgraph.From(includednetwork.Table, includednetwork.FieldID, id), @@ -3633,8 +4689,8 @@ func (c *IncludedNetworkClient) QueryIncludedNetworkToHost(in *IncludedNetwork) // QueryIncludedNetworkToNetwork queries the IncludedNetworkToNetwork edge of a IncludedNetwork. func (c *IncludedNetworkClient) QueryIncludedNetworkToNetwork(in *IncludedNetwork) *NetworkQuery { - query := &NetworkQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&NetworkClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := in.ID step := sqlgraph.NewStep( sqlgraph.From(includednetwork.Table, includednetwork.FieldID, id), @@ -3649,8 +4705,8 @@ func (c *IncludedNetworkClient) QueryIncludedNetworkToNetwork(in *IncludedNetwor // QueryIncludedNetworkToEnvironment queries the IncludedNetworkToEnvironment edge of a IncludedNetwork. func (c *IncludedNetworkClient) QueryIncludedNetworkToEnvironment(in *IncludedNetwork) *EnvironmentQuery { - query := &EnvironmentQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&EnvironmentClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := in.ID step := sqlgraph.NewStep( sqlgraph.From(includednetwork.Table, includednetwork.FieldID, id), @@ -3668,6 +4724,26 @@ func (c *IncludedNetworkClient) Hooks() []Hook { return c.hooks.IncludedNetwork } +// Interceptors returns the client interceptors. +func (c *IncludedNetworkClient) Interceptors() []Interceptor { + return c.inters.IncludedNetwork +} + +func (c *IncludedNetworkClient) mutate(ctx context.Context, m *IncludedNetworkMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&IncludedNetworkCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&IncludedNetworkUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&IncludedNetworkUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&IncludedNetworkDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown IncludedNetwork mutation op: %q", m.Op()) + } +} + // NetworkClient is a client for the Network schema. type NetworkClient struct { config @@ -3684,7 +4760,13 @@ func (c *NetworkClient) Use(hooks ...Hook) { c.hooks.Network = append(c.hooks.Network, hooks...) } -// Create returns a create builder for Network. +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `network.Intercept(f(g(h())))`. +func (c *NetworkClient) Intercept(interceptors ...Interceptor) { + c.inters.Network = append(c.inters.Network, interceptors...) +} + +// Create returns a builder for creating a Network entity. func (c *NetworkClient) Create() *NetworkCreate { mutation := newNetworkMutation(c.config, OpCreate) return &NetworkCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} @@ -3695,6 +4777,21 @@ func (c *NetworkClient) CreateBulk(builders ...*NetworkCreate) *NetworkCreateBul return &NetworkCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *NetworkClient) MapCreateBulk(slice any, setFunc func(*NetworkCreate, int)) *NetworkCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &NetworkCreateBulk{err: fmt.Errorf("calling to NetworkClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*NetworkCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &NetworkCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for Network. func (c *NetworkClient) Update() *NetworkUpdate { mutation := newNetworkMutation(c.config, OpUpdate) @@ -3719,12 +4816,12 @@ func (c *NetworkClient) Delete() *NetworkDelete { return &NetworkDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} } -// DeleteOne returns a delete builder for the given entity. +// DeleteOne returns a builder for deleting the given entity. func (c *NetworkClient) DeleteOne(n *Network) *NetworkDeleteOne { return c.DeleteOneID(n.ID) } -// DeleteOneID returns a delete builder for the given id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *NetworkClient) DeleteOneID(id uuid.UUID) *NetworkDeleteOne { builder := c.Delete().Where(network.ID(id)) builder.mutation.id = &id @@ -3736,6 +4833,8 @@ func (c *NetworkClient) DeleteOneID(id uuid.UUID) *NetworkDeleteOne { func (c *NetworkClient) Query() *NetworkQuery { return &NetworkQuery{ config: c.config, + ctx: &QueryContext{Type: TypeNetwork}, + inters: c.Interceptors(), } } @@ -3755,8 +4854,8 @@ func (c *NetworkClient) GetX(ctx context.Context, id uuid.UUID) *Network { // QueryNetworkToEnvironment queries the NetworkToEnvironment edge of a Network. func (c *NetworkClient) QueryNetworkToEnvironment(n *Network) *EnvironmentQuery { - query := &EnvironmentQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&EnvironmentClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := n.ID step := sqlgraph.NewStep( sqlgraph.From(network.Table, network.FieldID, id), @@ -3771,8 +4870,8 @@ func (c *NetworkClient) QueryNetworkToEnvironment(n *Network) *EnvironmentQuery // QueryNetworkToHostDependency queries the NetworkToHostDependency edge of a Network. func (c *NetworkClient) QueryNetworkToHostDependency(n *Network) *HostDependencyQuery { - query := &HostDependencyQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&HostDependencyClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := n.ID step := sqlgraph.NewStep( sqlgraph.From(network.Table, network.FieldID, id), @@ -3787,8 +4886,8 @@ func (c *NetworkClient) QueryNetworkToHostDependency(n *Network) *HostDependency // QueryNetworkToIncludedNetwork queries the NetworkToIncludedNetwork edge of a Network. func (c *NetworkClient) QueryNetworkToIncludedNetwork(n *Network) *IncludedNetworkQuery { - query := &IncludedNetworkQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&IncludedNetworkClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := n.ID step := sqlgraph.NewStep( sqlgraph.From(network.Table, network.FieldID, id), @@ -3806,6 +4905,26 @@ func (c *NetworkClient) Hooks() []Hook { return c.hooks.Network } +// Interceptors returns the client interceptors. +func (c *NetworkClient) Interceptors() []Interceptor { + return c.inters.Network +} + +func (c *NetworkClient) mutate(ctx context.Context, m *NetworkMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&NetworkCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&NetworkUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&NetworkUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&NetworkDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Network mutation op: %q", m.Op()) + } +} + // PlanClient is a client for the Plan schema. type PlanClient struct { config @@ -3822,7 +4941,13 @@ func (c *PlanClient) Use(hooks ...Hook) { c.hooks.Plan = append(c.hooks.Plan, hooks...) } -// Create returns a create builder for Plan. +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `plan.Intercept(f(g(h())))`. +func (c *PlanClient) Intercept(interceptors ...Interceptor) { + c.inters.Plan = append(c.inters.Plan, interceptors...) +} + +// Create returns a builder for creating a Plan entity. func (c *PlanClient) Create() *PlanCreate { mutation := newPlanMutation(c.config, OpCreate) return &PlanCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} @@ -3833,6 +4958,21 @@ func (c *PlanClient) CreateBulk(builders ...*PlanCreate) *PlanCreateBulk { return &PlanCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *PlanClient) MapCreateBulk(slice any, setFunc func(*PlanCreate, int)) *PlanCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &PlanCreateBulk{err: fmt.Errorf("calling to PlanClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*PlanCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &PlanCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for Plan. func (c *PlanClient) Update() *PlanUpdate { mutation := newPlanMutation(c.config, OpUpdate) @@ -3857,12 +4997,12 @@ func (c *PlanClient) Delete() *PlanDelete { return &PlanDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} } -// DeleteOne returns a delete builder for the given entity. +// DeleteOne returns a builder for deleting the given entity. func (c *PlanClient) DeleteOne(pl *Plan) *PlanDeleteOne { return c.DeleteOneID(pl.ID) } -// DeleteOneID returns a delete builder for the given id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *PlanClient) DeleteOneID(id uuid.UUID) *PlanDeleteOne { builder := c.Delete().Where(plan.ID(id)) builder.mutation.id = &id @@ -3874,6 +5014,8 @@ func (c *PlanClient) DeleteOneID(id uuid.UUID) *PlanDeleteOne { func (c *PlanClient) Query() *PlanQuery { return &PlanQuery{ config: c.config, + ctx: &QueryContext{Type: TypePlan}, + inters: c.Interceptors(), } } @@ -3893,8 +5035,8 @@ func (c *PlanClient) GetX(ctx context.Context, id uuid.UUID) *Plan { // QueryPrevPlan queries the PrevPlan edge of a Plan. func (c *PlanClient) QueryPrevPlan(pl *Plan) *PlanQuery { - query := &PlanQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&PlanClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := pl.ID step := sqlgraph.NewStep( sqlgraph.From(plan.Table, plan.FieldID, id), @@ -3909,8 +5051,8 @@ func (c *PlanClient) QueryPrevPlan(pl *Plan) *PlanQuery { // QueryNextPlan queries the NextPlan edge of a Plan. func (c *PlanClient) QueryNextPlan(pl *Plan) *PlanQuery { - query := &PlanQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&PlanClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := pl.ID step := sqlgraph.NewStep( sqlgraph.From(plan.Table, plan.FieldID, id), @@ -3925,8 +5067,8 @@ func (c *PlanClient) QueryNextPlan(pl *Plan) *PlanQuery { // QueryPlanToBuild queries the PlanToBuild edge of a Plan. func (c *PlanClient) QueryPlanToBuild(pl *Plan) *BuildQuery { - query := &BuildQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&BuildClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := pl.ID step := sqlgraph.NewStep( sqlgraph.From(plan.Table, plan.FieldID, id), @@ -3941,8 +5083,8 @@ func (c *PlanClient) QueryPlanToBuild(pl *Plan) *BuildQuery { // QueryPlanToTeam queries the PlanToTeam edge of a Plan. func (c *PlanClient) QueryPlanToTeam(pl *Plan) *TeamQuery { - query := &TeamQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&TeamClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := pl.ID step := sqlgraph.NewStep( sqlgraph.From(plan.Table, plan.FieldID, id), @@ -3957,8 +5099,8 @@ func (c *PlanClient) QueryPlanToTeam(pl *Plan) *TeamQuery { // QueryPlanToProvisionedNetwork queries the PlanToProvisionedNetwork edge of a Plan. func (c *PlanClient) QueryPlanToProvisionedNetwork(pl *Plan) *ProvisionedNetworkQuery { - query := &ProvisionedNetworkQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&ProvisionedNetworkClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := pl.ID step := sqlgraph.NewStep( sqlgraph.From(plan.Table, plan.FieldID, id), @@ -3973,8 +5115,8 @@ func (c *PlanClient) QueryPlanToProvisionedNetwork(pl *Plan) *ProvisionedNetwork // QueryPlanToProvisionedHost queries the PlanToProvisionedHost edge of a Plan. func (c *PlanClient) QueryPlanToProvisionedHost(pl *Plan) *ProvisionedHostQuery { - query := &ProvisionedHostQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&ProvisionedHostClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := pl.ID step := sqlgraph.NewStep( sqlgraph.From(plan.Table, plan.FieldID, id), @@ -3989,8 +5131,8 @@ func (c *PlanClient) QueryPlanToProvisionedHost(pl *Plan) *ProvisionedHostQuery // QueryPlanToProvisioningStep queries the PlanToProvisioningStep edge of a Plan. func (c *PlanClient) QueryPlanToProvisioningStep(pl *Plan) *ProvisioningStepQuery { - query := &ProvisioningStepQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&ProvisioningStepClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := pl.ID step := sqlgraph.NewStep( sqlgraph.From(plan.Table, plan.FieldID, id), @@ -4005,8 +5147,8 @@ func (c *PlanClient) QueryPlanToProvisioningStep(pl *Plan) *ProvisioningStepQuer // QueryPlanToStatus queries the PlanToStatus edge of a Plan. func (c *PlanClient) QueryPlanToStatus(pl *Plan) *StatusQuery { - query := &StatusQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&StatusClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := pl.ID step := sqlgraph.NewStep( sqlgraph.From(plan.Table, plan.FieldID, id), @@ -4021,8 +5163,8 @@ func (c *PlanClient) QueryPlanToStatus(pl *Plan) *StatusQuery { // QueryPlanToPlanDiffs queries the PlanToPlanDiffs edge of a Plan. func (c *PlanClient) QueryPlanToPlanDiffs(pl *Plan) *PlanDiffQuery { - query := &PlanDiffQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&PlanDiffClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := pl.ID step := sqlgraph.NewStep( sqlgraph.From(plan.Table, plan.FieldID, id), @@ -4040,6 +5182,26 @@ func (c *PlanClient) Hooks() []Hook { return c.hooks.Plan } +// Interceptors returns the client interceptors. +func (c *PlanClient) Interceptors() []Interceptor { + return c.inters.Plan +} + +func (c *PlanClient) mutate(ctx context.Context, m *PlanMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&PlanCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&PlanUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&PlanUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&PlanDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Plan mutation op: %q", m.Op()) + } +} + // PlanDiffClient is a client for the PlanDiff schema. type PlanDiffClient struct { config @@ -4056,7 +5218,13 @@ func (c *PlanDiffClient) Use(hooks ...Hook) { c.hooks.PlanDiff = append(c.hooks.PlanDiff, hooks...) } -// Create returns a create builder for PlanDiff. +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `plandiff.Intercept(f(g(h())))`. +func (c *PlanDiffClient) Intercept(interceptors ...Interceptor) { + c.inters.PlanDiff = append(c.inters.PlanDiff, interceptors...) +} + +// Create returns a builder for creating a PlanDiff entity. func (c *PlanDiffClient) Create() *PlanDiffCreate { mutation := newPlanDiffMutation(c.config, OpCreate) return &PlanDiffCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} @@ -4067,6 +5235,21 @@ func (c *PlanDiffClient) CreateBulk(builders ...*PlanDiffCreate) *PlanDiffCreate return &PlanDiffCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *PlanDiffClient) MapCreateBulk(slice any, setFunc func(*PlanDiffCreate, int)) *PlanDiffCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &PlanDiffCreateBulk{err: fmt.Errorf("calling to PlanDiffClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*PlanDiffCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &PlanDiffCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for PlanDiff. func (c *PlanDiffClient) Update() *PlanDiffUpdate { mutation := newPlanDiffMutation(c.config, OpUpdate) @@ -4091,12 +5274,12 @@ func (c *PlanDiffClient) Delete() *PlanDiffDelete { return &PlanDiffDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} } -// DeleteOne returns a delete builder for the given entity. +// DeleteOne returns a builder for deleting the given entity. func (c *PlanDiffClient) DeleteOne(pd *PlanDiff) *PlanDiffDeleteOne { return c.DeleteOneID(pd.ID) } -// DeleteOneID returns a delete builder for the given id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *PlanDiffClient) DeleteOneID(id uuid.UUID) *PlanDiffDeleteOne { builder := c.Delete().Where(plandiff.ID(id)) builder.mutation.id = &id @@ -4108,6 +5291,8 @@ func (c *PlanDiffClient) DeleteOneID(id uuid.UUID) *PlanDiffDeleteOne { func (c *PlanDiffClient) Query() *PlanDiffQuery { return &PlanDiffQuery{ config: c.config, + ctx: &QueryContext{Type: TypePlanDiff}, + inters: c.Interceptors(), } } @@ -4127,8 +5312,8 @@ func (c *PlanDiffClient) GetX(ctx context.Context, id uuid.UUID) *PlanDiff { // QueryPlanDiffToBuildCommit queries the PlanDiffToBuildCommit edge of a PlanDiff. func (c *PlanDiffClient) QueryPlanDiffToBuildCommit(pd *PlanDiff) *BuildCommitQuery { - query := &BuildCommitQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&BuildCommitClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := pd.ID step := sqlgraph.NewStep( sqlgraph.From(plandiff.Table, plandiff.FieldID, id), @@ -4143,8 +5328,8 @@ func (c *PlanDiffClient) QueryPlanDiffToBuildCommit(pd *PlanDiff) *BuildCommitQu // QueryPlanDiffToPlan queries the PlanDiffToPlan edge of a PlanDiff. func (c *PlanDiffClient) QueryPlanDiffToPlan(pd *PlanDiff) *PlanQuery { - query := &PlanQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&PlanClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := pd.ID step := sqlgraph.NewStep( sqlgraph.From(plandiff.Table, plandiff.FieldID, id), @@ -4162,6 +5347,26 @@ func (c *PlanDiffClient) Hooks() []Hook { return c.hooks.PlanDiff } +// Interceptors returns the client interceptors. +func (c *PlanDiffClient) Interceptors() []Interceptor { + return c.inters.PlanDiff +} + +func (c *PlanDiffClient) mutate(ctx context.Context, m *PlanDiffMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&PlanDiffCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&PlanDiffUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&PlanDiffUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&PlanDiffDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown PlanDiff mutation op: %q", m.Op()) + } +} + // ProvisionedHostClient is a client for the ProvisionedHost schema. type ProvisionedHostClient struct { config @@ -4178,7 +5383,13 @@ func (c *ProvisionedHostClient) Use(hooks ...Hook) { c.hooks.ProvisionedHost = append(c.hooks.ProvisionedHost, hooks...) } -// Create returns a create builder for ProvisionedHost. +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `provisionedhost.Intercept(f(g(h())))`. +func (c *ProvisionedHostClient) Intercept(interceptors ...Interceptor) { + c.inters.ProvisionedHost = append(c.inters.ProvisionedHost, interceptors...) +} + +// Create returns a builder for creating a ProvisionedHost entity. func (c *ProvisionedHostClient) Create() *ProvisionedHostCreate { mutation := newProvisionedHostMutation(c.config, OpCreate) return &ProvisionedHostCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} @@ -4189,6 +5400,21 @@ func (c *ProvisionedHostClient) CreateBulk(builders ...*ProvisionedHostCreate) * return &ProvisionedHostCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *ProvisionedHostClient) MapCreateBulk(slice any, setFunc func(*ProvisionedHostCreate, int)) *ProvisionedHostCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &ProvisionedHostCreateBulk{err: fmt.Errorf("calling to ProvisionedHostClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*ProvisionedHostCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &ProvisionedHostCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for ProvisionedHost. func (c *ProvisionedHostClient) Update() *ProvisionedHostUpdate { mutation := newProvisionedHostMutation(c.config, OpUpdate) @@ -4213,12 +5439,12 @@ func (c *ProvisionedHostClient) Delete() *ProvisionedHostDelete { return &ProvisionedHostDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} } -// DeleteOne returns a delete builder for the given entity. +// DeleteOne returns a builder for deleting the given entity. func (c *ProvisionedHostClient) DeleteOne(ph *ProvisionedHost) *ProvisionedHostDeleteOne { return c.DeleteOneID(ph.ID) } -// DeleteOneID returns a delete builder for the given id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *ProvisionedHostClient) DeleteOneID(id uuid.UUID) *ProvisionedHostDeleteOne { builder := c.Delete().Where(provisionedhost.ID(id)) builder.mutation.id = &id @@ -4230,6 +5456,8 @@ func (c *ProvisionedHostClient) DeleteOneID(id uuid.UUID) *ProvisionedHostDelete func (c *ProvisionedHostClient) Query() *ProvisionedHostQuery { return &ProvisionedHostQuery{ config: c.config, + ctx: &QueryContext{Type: TypeProvisionedHost}, + inters: c.Interceptors(), } } @@ -4249,8 +5477,8 @@ func (c *ProvisionedHostClient) GetX(ctx context.Context, id uuid.UUID) *Provisi // QueryProvisionedHostToStatus queries the ProvisionedHostToStatus edge of a ProvisionedHost. func (c *ProvisionedHostClient) QueryProvisionedHostToStatus(ph *ProvisionedHost) *StatusQuery { - query := &StatusQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&StatusClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := ph.ID step := sqlgraph.NewStep( sqlgraph.From(provisionedhost.Table, provisionedhost.FieldID, id), @@ -4265,8 +5493,8 @@ func (c *ProvisionedHostClient) QueryProvisionedHostToStatus(ph *ProvisionedHost // QueryProvisionedHostToProvisionedNetwork queries the ProvisionedHostToProvisionedNetwork edge of a ProvisionedHost. func (c *ProvisionedHostClient) QueryProvisionedHostToProvisionedNetwork(ph *ProvisionedHost) *ProvisionedNetworkQuery { - query := &ProvisionedNetworkQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&ProvisionedNetworkClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := ph.ID step := sqlgraph.NewStep( sqlgraph.From(provisionedhost.Table, provisionedhost.FieldID, id), @@ -4281,8 +5509,8 @@ func (c *ProvisionedHostClient) QueryProvisionedHostToProvisionedNetwork(ph *Pro // QueryProvisionedHostToHost queries the ProvisionedHostToHost edge of a ProvisionedHost. func (c *ProvisionedHostClient) QueryProvisionedHostToHost(ph *ProvisionedHost) *HostQuery { - query := &HostQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&HostClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := ph.ID step := sqlgraph.NewStep( sqlgraph.From(provisionedhost.Table, provisionedhost.FieldID, id), @@ -4297,8 +5525,8 @@ func (c *ProvisionedHostClient) QueryProvisionedHostToHost(ph *ProvisionedHost) // QueryProvisionedHostToEndStepPlan queries the ProvisionedHostToEndStepPlan edge of a ProvisionedHost. func (c *ProvisionedHostClient) QueryProvisionedHostToEndStepPlan(ph *ProvisionedHost) *PlanQuery { - query := &PlanQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&PlanClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := ph.ID step := sqlgraph.NewStep( sqlgraph.From(provisionedhost.Table, provisionedhost.FieldID, id), @@ -4313,8 +5541,8 @@ func (c *ProvisionedHostClient) QueryProvisionedHostToEndStepPlan(ph *Provisione // QueryProvisionedHostToBuild queries the ProvisionedHostToBuild edge of a ProvisionedHost. func (c *ProvisionedHostClient) QueryProvisionedHostToBuild(ph *ProvisionedHost) *BuildQuery { - query := &BuildQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&BuildClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := ph.ID step := sqlgraph.NewStep( sqlgraph.From(provisionedhost.Table, provisionedhost.FieldID, id), @@ -4329,8 +5557,8 @@ func (c *ProvisionedHostClient) QueryProvisionedHostToBuild(ph *ProvisionedHost) // QueryProvisionedHostToProvisioningStep queries the ProvisionedHostToProvisioningStep edge of a ProvisionedHost. func (c *ProvisionedHostClient) QueryProvisionedHostToProvisioningStep(ph *ProvisionedHost) *ProvisioningStepQuery { - query := &ProvisioningStepQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&ProvisioningStepClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := ph.ID step := sqlgraph.NewStep( sqlgraph.From(provisionedhost.Table, provisionedhost.FieldID, id), @@ -4345,8 +5573,8 @@ func (c *ProvisionedHostClient) QueryProvisionedHostToProvisioningStep(ph *Provi // QueryProvisionedHostToAgentStatus queries the ProvisionedHostToAgentStatus edge of a ProvisionedHost. func (c *ProvisionedHostClient) QueryProvisionedHostToAgentStatus(ph *ProvisionedHost) *AgentStatusQuery { - query := &AgentStatusQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&AgentStatusClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := ph.ID step := sqlgraph.NewStep( sqlgraph.From(provisionedhost.Table, provisionedhost.FieldID, id), @@ -4361,8 +5589,8 @@ func (c *ProvisionedHostClient) QueryProvisionedHostToAgentStatus(ph *Provisione // QueryProvisionedHostToAgentTask queries the ProvisionedHostToAgentTask edge of a ProvisionedHost. func (c *ProvisionedHostClient) QueryProvisionedHostToAgentTask(ph *ProvisionedHost) *AgentTaskQuery { - query := &AgentTaskQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&AgentTaskClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := ph.ID step := sqlgraph.NewStep( sqlgraph.From(provisionedhost.Table, provisionedhost.FieldID, id), @@ -4377,8 +5605,8 @@ func (c *ProvisionedHostClient) QueryProvisionedHostToAgentTask(ph *ProvisionedH // QueryProvisionedHostToPlan queries the ProvisionedHostToPlan edge of a ProvisionedHost. func (c *ProvisionedHostClient) QueryProvisionedHostToPlan(ph *ProvisionedHost) *PlanQuery { - query := &PlanQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&PlanClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := ph.ID step := sqlgraph.NewStep( sqlgraph.From(provisionedhost.Table, provisionedhost.FieldID, id), @@ -4393,8 +5621,8 @@ func (c *ProvisionedHostClient) QueryProvisionedHostToPlan(ph *ProvisionedHost) // QueryProvisionedHostToGinFileMiddleware queries the ProvisionedHostToGinFileMiddleware edge of a ProvisionedHost. func (c *ProvisionedHostClient) QueryProvisionedHostToGinFileMiddleware(ph *ProvisionedHost) *GinFileMiddlewareQuery { - query := &GinFileMiddlewareQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&GinFileMiddlewareClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := ph.ID step := sqlgraph.NewStep( sqlgraph.From(provisionedhost.Table, provisionedhost.FieldID, id), @@ -4412,6 +5640,26 @@ func (c *ProvisionedHostClient) Hooks() []Hook { return c.hooks.ProvisionedHost } +// Interceptors returns the client interceptors. +func (c *ProvisionedHostClient) Interceptors() []Interceptor { + return c.inters.ProvisionedHost +} + +func (c *ProvisionedHostClient) mutate(ctx context.Context, m *ProvisionedHostMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&ProvisionedHostCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&ProvisionedHostUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&ProvisionedHostUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&ProvisionedHostDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown ProvisionedHost mutation op: %q", m.Op()) + } +} + // ProvisionedNetworkClient is a client for the ProvisionedNetwork schema. type ProvisionedNetworkClient struct { config @@ -4428,7 +5676,13 @@ func (c *ProvisionedNetworkClient) Use(hooks ...Hook) { c.hooks.ProvisionedNetwork = append(c.hooks.ProvisionedNetwork, hooks...) } -// Create returns a create builder for ProvisionedNetwork. +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `provisionednetwork.Intercept(f(g(h())))`. +func (c *ProvisionedNetworkClient) Intercept(interceptors ...Interceptor) { + c.inters.ProvisionedNetwork = append(c.inters.ProvisionedNetwork, interceptors...) +} + +// Create returns a builder for creating a ProvisionedNetwork entity. func (c *ProvisionedNetworkClient) Create() *ProvisionedNetworkCreate { mutation := newProvisionedNetworkMutation(c.config, OpCreate) return &ProvisionedNetworkCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} @@ -4439,6 +5693,21 @@ func (c *ProvisionedNetworkClient) CreateBulk(builders ...*ProvisionedNetworkCre return &ProvisionedNetworkCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *ProvisionedNetworkClient) MapCreateBulk(slice any, setFunc func(*ProvisionedNetworkCreate, int)) *ProvisionedNetworkCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &ProvisionedNetworkCreateBulk{err: fmt.Errorf("calling to ProvisionedNetworkClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*ProvisionedNetworkCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &ProvisionedNetworkCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for ProvisionedNetwork. func (c *ProvisionedNetworkClient) Update() *ProvisionedNetworkUpdate { mutation := newProvisionedNetworkMutation(c.config, OpUpdate) @@ -4463,12 +5732,12 @@ func (c *ProvisionedNetworkClient) Delete() *ProvisionedNetworkDelete { return &ProvisionedNetworkDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} } -// DeleteOne returns a delete builder for the given entity. +// DeleteOne returns a builder for deleting the given entity. func (c *ProvisionedNetworkClient) DeleteOne(pn *ProvisionedNetwork) *ProvisionedNetworkDeleteOne { return c.DeleteOneID(pn.ID) } -// DeleteOneID returns a delete builder for the given id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *ProvisionedNetworkClient) DeleteOneID(id uuid.UUID) *ProvisionedNetworkDeleteOne { builder := c.Delete().Where(provisionednetwork.ID(id)) builder.mutation.id = &id @@ -4480,6 +5749,8 @@ func (c *ProvisionedNetworkClient) DeleteOneID(id uuid.UUID) *ProvisionedNetwork func (c *ProvisionedNetworkClient) Query() *ProvisionedNetworkQuery { return &ProvisionedNetworkQuery{ config: c.config, + ctx: &QueryContext{Type: TypeProvisionedNetwork}, + inters: c.Interceptors(), } } @@ -4499,8 +5770,8 @@ func (c *ProvisionedNetworkClient) GetX(ctx context.Context, id uuid.UUID) *Prov // QueryProvisionedNetworkToStatus queries the ProvisionedNetworkToStatus edge of a ProvisionedNetwork. func (c *ProvisionedNetworkClient) QueryProvisionedNetworkToStatus(pn *ProvisionedNetwork) *StatusQuery { - query := &StatusQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&StatusClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := pn.ID step := sqlgraph.NewStep( sqlgraph.From(provisionednetwork.Table, provisionednetwork.FieldID, id), @@ -4515,8 +5786,8 @@ func (c *ProvisionedNetworkClient) QueryProvisionedNetworkToStatus(pn *Provision // QueryProvisionedNetworkToNetwork queries the ProvisionedNetworkToNetwork edge of a ProvisionedNetwork. func (c *ProvisionedNetworkClient) QueryProvisionedNetworkToNetwork(pn *ProvisionedNetwork) *NetworkQuery { - query := &NetworkQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&NetworkClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := pn.ID step := sqlgraph.NewStep( sqlgraph.From(provisionednetwork.Table, provisionednetwork.FieldID, id), @@ -4531,8 +5802,8 @@ func (c *ProvisionedNetworkClient) QueryProvisionedNetworkToNetwork(pn *Provisio // QueryProvisionedNetworkToBuild queries the ProvisionedNetworkToBuild edge of a ProvisionedNetwork. func (c *ProvisionedNetworkClient) QueryProvisionedNetworkToBuild(pn *ProvisionedNetwork) *BuildQuery { - query := &BuildQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&BuildClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := pn.ID step := sqlgraph.NewStep( sqlgraph.From(provisionednetwork.Table, provisionednetwork.FieldID, id), @@ -4547,8 +5818,8 @@ func (c *ProvisionedNetworkClient) QueryProvisionedNetworkToBuild(pn *Provisione // QueryProvisionedNetworkToTeam queries the ProvisionedNetworkToTeam edge of a ProvisionedNetwork. func (c *ProvisionedNetworkClient) QueryProvisionedNetworkToTeam(pn *ProvisionedNetwork) *TeamQuery { - query := &TeamQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&TeamClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := pn.ID step := sqlgraph.NewStep( sqlgraph.From(provisionednetwork.Table, provisionednetwork.FieldID, id), @@ -4563,8 +5834,8 @@ func (c *ProvisionedNetworkClient) QueryProvisionedNetworkToTeam(pn *Provisioned // QueryProvisionedNetworkToProvisionedHost queries the ProvisionedNetworkToProvisionedHost edge of a ProvisionedNetwork. func (c *ProvisionedNetworkClient) QueryProvisionedNetworkToProvisionedHost(pn *ProvisionedNetwork) *ProvisionedHostQuery { - query := &ProvisionedHostQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&ProvisionedHostClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := pn.ID step := sqlgraph.NewStep( sqlgraph.From(provisionednetwork.Table, provisionednetwork.FieldID, id), @@ -4579,8 +5850,8 @@ func (c *ProvisionedNetworkClient) QueryProvisionedNetworkToProvisionedHost(pn * // QueryProvisionedNetworkToPlan queries the ProvisionedNetworkToPlan edge of a ProvisionedNetwork. func (c *ProvisionedNetworkClient) QueryProvisionedNetworkToPlan(pn *ProvisionedNetwork) *PlanQuery { - query := &PlanQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&PlanClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := pn.ID step := sqlgraph.NewStep( sqlgraph.From(provisionednetwork.Table, provisionednetwork.FieldID, id), @@ -4598,6 +5869,26 @@ func (c *ProvisionedNetworkClient) Hooks() []Hook { return c.hooks.ProvisionedNetwork } +// Interceptors returns the client interceptors. +func (c *ProvisionedNetworkClient) Interceptors() []Interceptor { + return c.inters.ProvisionedNetwork +} + +func (c *ProvisionedNetworkClient) mutate(ctx context.Context, m *ProvisionedNetworkMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&ProvisionedNetworkCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&ProvisionedNetworkUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&ProvisionedNetworkUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&ProvisionedNetworkDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown ProvisionedNetwork mutation op: %q", m.Op()) + } +} + // ProvisioningStepClient is a client for the ProvisioningStep schema. type ProvisioningStepClient struct { config @@ -4614,7 +5905,13 @@ func (c *ProvisioningStepClient) Use(hooks ...Hook) { c.hooks.ProvisioningStep = append(c.hooks.ProvisioningStep, hooks...) } -// Create returns a create builder for ProvisioningStep. +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `provisioningstep.Intercept(f(g(h())))`. +func (c *ProvisioningStepClient) Intercept(interceptors ...Interceptor) { + c.inters.ProvisioningStep = append(c.inters.ProvisioningStep, interceptors...) +} + +// Create returns a builder for creating a ProvisioningStep entity. func (c *ProvisioningStepClient) Create() *ProvisioningStepCreate { mutation := newProvisioningStepMutation(c.config, OpCreate) return &ProvisioningStepCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} @@ -4625,6 +5922,21 @@ func (c *ProvisioningStepClient) CreateBulk(builders ...*ProvisioningStepCreate) return &ProvisioningStepCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *ProvisioningStepClient) MapCreateBulk(slice any, setFunc func(*ProvisioningStepCreate, int)) *ProvisioningStepCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &ProvisioningStepCreateBulk{err: fmt.Errorf("calling to ProvisioningStepClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*ProvisioningStepCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &ProvisioningStepCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for ProvisioningStep. func (c *ProvisioningStepClient) Update() *ProvisioningStepUpdate { mutation := newProvisioningStepMutation(c.config, OpUpdate) @@ -4649,12 +5961,12 @@ func (c *ProvisioningStepClient) Delete() *ProvisioningStepDelete { return &ProvisioningStepDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} } -// DeleteOne returns a delete builder for the given entity. +// DeleteOne returns a builder for deleting the given entity. func (c *ProvisioningStepClient) DeleteOne(ps *ProvisioningStep) *ProvisioningStepDeleteOne { return c.DeleteOneID(ps.ID) } -// DeleteOneID returns a delete builder for the given id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *ProvisioningStepClient) DeleteOneID(id uuid.UUID) *ProvisioningStepDeleteOne { builder := c.Delete().Where(provisioningstep.ID(id)) builder.mutation.id = &id @@ -4666,6 +5978,8 @@ func (c *ProvisioningStepClient) DeleteOneID(id uuid.UUID) *ProvisioningStepDele func (c *ProvisioningStepClient) Query() *ProvisioningStepQuery { return &ProvisioningStepQuery{ config: c.config, + ctx: &QueryContext{Type: TypeProvisioningStep}, + inters: c.Interceptors(), } } @@ -4685,8 +5999,8 @@ func (c *ProvisioningStepClient) GetX(ctx context.Context, id uuid.UUID) *Provis // QueryProvisioningStepToStatus queries the ProvisioningStepToStatus edge of a ProvisioningStep. func (c *ProvisioningStepClient) QueryProvisioningStepToStatus(ps *ProvisioningStep) *StatusQuery { - query := &StatusQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&StatusClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := ps.ID step := sqlgraph.NewStep( sqlgraph.From(provisioningstep.Table, provisioningstep.FieldID, id), @@ -4701,8 +6015,8 @@ func (c *ProvisioningStepClient) QueryProvisioningStepToStatus(ps *ProvisioningS // QueryProvisioningStepToProvisionedHost queries the ProvisioningStepToProvisionedHost edge of a ProvisioningStep. func (c *ProvisioningStepClient) QueryProvisioningStepToProvisionedHost(ps *ProvisioningStep) *ProvisionedHostQuery { - query := &ProvisionedHostQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&ProvisionedHostClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := ps.ID step := sqlgraph.NewStep( sqlgraph.From(provisioningstep.Table, provisioningstep.FieldID, id), @@ -4717,8 +6031,8 @@ func (c *ProvisioningStepClient) QueryProvisioningStepToProvisionedHost(ps *Prov // QueryProvisioningStepToScript queries the ProvisioningStepToScript edge of a ProvisioningStep. func (c *ProvisioningStepClient) QueryProvisioningStepToScript(ps *ProvisioningStep) *ScriptQuery { - query := &ScriptQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&ScriptClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := ps.ID step := sqlgraph.NewStep( sqlgraph.From(provisioningstep.Table, provisioningstep.FieldID, id), @@ -4733,8 +6047,8 @@ func (c *ProvisioningStepClient) QueryProvisioningStepToScript(ps *ProvisioningS // QueryProvisioningStepToCommand queries the ProvisioningStepToCommand edge of a ProvisioningStep. func (c *ProvisioningStepClient) QueryProvisioningStepToCommand(ps *ProvisioningStep) *CommandQuery { - query := &CommandQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&CommandClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := ps.ID step := sqlgraph.NewStep( sqlgraph.From(provisioningstep.Table, provisioningstep.FieldID, id), @@ -4749,8 +6063,8 @@ func (c *ProvisioningStepClient) QueryProvisioningStepToCommand(ps *Provisioning // QueryProvisioningStepToDNSRecord queries the ProvisioningStepToDNSRecord edge of a ProvisioningStep. func (c *ProvisioningStepClient) QueryProvisioningStepToDNSRecord(ps *ProvisioningStep) *DNSRecordQuery { - query := &DNSRecordQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&DNSRecordClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := ps.ID step := sqlgraph.NewStep( sqlgraph.From(provisioningstep.Table, provisioningstep.FieldID, id), @@ -4765,8 +6079,8 @@ func (c *ProvisioningStepClient) QueryProvisioningStepToDNSRecord(ps *Provisioni // QueryProvisioningStepToFileDelete queries the ProvisioningStepToFileDelete edge of a ProvisioningStep. func (c *ProvisioningStepClient) QueryProvisioningStepToFileDelete(ps *ProvisioningStep) *FileDeleteQuery { - query := &FileDeleteQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&FileDeleteClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := ps.ID step := sqlgraph.NewStep( sqlgraph.From(provisioningstep.Table, provisioningstep.FieldID, id), @@ -4781,8 +6095,8 @@ func (c *ProvisioningStepClient) QueryProvisioningStepToFileDelete(ps *Provision // QueryProvisioningStepToFileDownload queries the ProvisioningStepToFileDownload edge of a ProvisioningStep. func (c *ProvisioningStepClient) QueryProvisioningStepToFileDownload(ps *ProvisioningStep) *FileDownloadQuery { - query := &FileDownloadQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&FileDownloadClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := ps.ID step := sqlgraph.NewStep( sqlgraph.From(provisioningstep.Table, provisioningstep.FieldID, id), @@ -4797,8 +6111,8 @@ func (c *ProvisioningStepClient) QueryProvisioningStepToFileDownload(ps *Provisi // QueryProvisioningStepToFileExtract queries the ProvisioningStepToFileExtract edge of a ProvisioningStep. func (c *ProvisioningStepClient) QueryProvisioningStepToFileExtract(ps *ProvisioningStep) *FileExtractQuery { - query := &FileExtractQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&FileExtractClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := ps.ID step := sqlgraph.NewStep( sqlgraph.From(provisioningstep.Table, provisioningstep.FieldID, id), @@ -4813,8 +6127,8 @@ func (c *ProvisioningStepClient) QueryProvisioningStepToFileExtract(ps *Provisio // QueryProvisioningStepToAnsible queries the ProvisioningStepToAnsible edge of a ProvisioningStep. func (c *ProvisioningStepClient) QueryProvisioningStepToAnsible(ps *ProvisioningStep) *AnsibleQuery { - query := &AnsibleQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&AnsibleClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := ps.ID step := sqlgraph.NewStep( sqlgraph.From(provisioningstep.Table, provisioningstep.FieldID, id), @@ -4829,8 +6143,8 @@ func (c *ProvisioningStepClient) QueryProvisioningStepToAnsible(ps *Provisioning // QueryProvisioningStepToPlan queries the ProvisioningStepToPlan edge of a ProvisioningStep. func (c *ProvisioningStepClient) QueryProvisioningStepToPlan(ps *ProvisioningStep) *PlanQuery { - query := &PlanQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&PlanClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := ps.ID step := sqlgraph.NewStep( sqlgraph.From(provisioningstep.Table, provisioningstep.FieldID, id), @@ -4845,8 +6159,8 @@ func (c *ProvisioningStepClient) QueryProvisioningStepToPlan(ps *ProvisioningSte // QueryProvisioningStepToAgentTask queries the ProvisioningStepToAgentTask edge of a ProvisioningStep. func (c *ProvisioningStepClient) QueryProvisioningStepToAgentTask(ps *ProvisioningStep) *AgentTaskQuery { - query := &AgentTaskQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&AgentTaskClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := ps.ID step := sqlgraph.NewStep( sqlgraph.From(provisioningstep.Table, provisioningstep.FieldID, id), @@ -4861,8 +6175,8 @@ func (c *ProvisioningStepClient) QueryProvisioningStepToAgentTask(ps *Provisioni // QueryProvisioningStepToGinFileMiddleware queries the ProvisioningStepToGinFileMiddleware edge of a ProvisioningStep. func (c *ProvisioningStepClient) QueryProvisioningStepToGinFileMiddleware(ps *ProvisioningStep) *GinFileMiddlewareQuery { - query := &GinFileMiddlewareQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&GinFileMiddlewareClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := ps.ID step := sqlgraph.NewStep( sqlgraph.From(provisioningstep.Table, provisioningstep.FieldID, id), @@ -4880,6 +6194,26 @@ func (c *ProvisioningStepClient) Hooks() []Hook { return c.hooks.ProvisioningStep } +// Interceptors returns the client interceptors. +func (c *ProvisioningStepClient) Interceptors() []Interceptor { + return c.inters.ProvisioningStep +} + +func (c *ProvisioningStepClient) mutate(ctx context.Context, m *ProvisioningStepMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&ProvisioningStepCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&ProvisioningStepUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&ProvisioningStepUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&ProvisioningStepDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown ProvisioningStep mutation op: %q", m.Op()) + } +} + // RepoCommitClient is a client for the RepoCommit schema. type RepoCommitClient struct { config @@ -4896,7 +6230,13 @@ func (c *RepoCommitClient) Use(hooks ...Hook) { c.hooks.RepoCommit = append(c.hooks.RepoCommit, hooks...) } -// Create returns a create builder for RepoCommit. +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `repocommit.Intercept(f(g(h())))`. +func (c *RepoCommitClient) Intercept(interceptors ...Interceptor) { + c.inters.RepoCommit = append(c.inters.RepoCommit, interceptors...) +} + +// Create returns a builder for creating a RepoCommit entity. func (c *RepoCommitClient) Create() *RepoCommitCreate { mutation := newRepoCommitMutation(c.config, OpCreate) return &RepoCommitCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} @@ -4907,6 +6247,21 @@ func (c *RepoCommitClient) CreateBulk(builders ...*RepoCommitCreate) *RepoCommit return &RepoCommitCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *RepoCommitClient) MapCreateBulk(slice any, setFunc func(*RepoCommitCreate, int)) *RepoCommitCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &RepoCommitCreateBulk{err: fmt.Errorf("calling to RepoCommitClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*RepoCommitCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &RepoCommitCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for RepoCommit. func (c *RepoCommitClient) Update() *RepoCommitUpdate { mutation := newRepoCommitMutation(c.config, OpUpdate) @@ -4931,12 +6286,12 @@ func (c *RepoCommitClient) Delete() *RepoCommitDelete { return &RepoCommitDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} } -// DeleteOne returns a delete builder for the given entity. +// DeleteOne returns a builder for deleting the given entity. func (c *RepoCommitClient) DeleteOne(rc *RepoCommit) *RepoCommitDeleteOne { return c.DeleteOneID(rc.ID) } -// DeleteOneID returns a delete builder for the given id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *RepoCommitClient) DeleteOneID(id uuid.UUID) *RepoCommitDeleteOne { builder := c.Delete().Where(repocommit.ID(id)) builder.mutation.id = &id @@ -4948,6 +6303,8 @@ func (c *RepoCommitClient) DeleteOneID(id uuid.UUID) *RepoCommitDeleteOne { func (c *RepoCommitClient) Query() *RepoCommitQuery { return &RepoCommitQuery{ config: c.config, + ctx: &QueryContext{Type: TypeRepoCommit}, + inters: c.Interceptors(), } } @@ -4967,8 +6324,8 @@ func (c *RepoCommitClient) GetX(ctx context.Context, id uuid.UUID) *RepoCommit { // QueryRepoCommitToRepository queries the RepoCommitToRepository edge of a RepoCommit. func (c *RepoCommitClient) QueryRepoCommitToRepository(rc *RepoCommit) *RepositoryQuery { - query := &RepositoryQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&RepositoryClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := rc.ID step := sqlgraph.NewStep( sqlgraph.From(repocommit.Table, repocommit.FieldID, id), @@ -4986,6 +6343,26 @@ func (c *RepoCommitClient) Hooks() []Hook { return c.hooks.RepoCommit } +// Interceptors returns the client interceptors. +func (c *RepoCommitClient) Interceptors() []Interceptor { + return c.inters.RepoCommit +} + +func (c *RepoCommitClient) mutate(ctx context.Context, m *RepoCommitMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&RepoCommitCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&RepoCommitUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&RepoCommitUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&RepoCommitDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown RepoCommit mutation op: %q", m.Op()) + } +} + // RepositoryClient is a client for the Repository schema. type RepositoryClient struct { config @@ -5002,7 +6379,13 @@ func (c *RepositoryClient) Use(hooks ...Hook) { c.hooks.Repository = append(c.hooks.Repository, hooks...) } -// Create returns a create builder for Repository. +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `repository.Intercept(f(g(h())))`. +func (c *RepositoryClient) Intercept(interceptors ...Interceptor) { + c.inters.Repository = append(c.inters.Repository, interceptors...) +} + +// Create returns a builder for creating a Repository entity. func (c *RepositoryClient) Create() *RepositoryCreate { mutation := newRepositoryMutation(c.config, OpCreate) return &RepositoryCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} @@ -5013,6 +6396,21 @@ func (c *RepositoryClient) CreateBulk(builders ...*RepositoryCreate) *Repository return &RepositoryCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *RepositoryClient) MapCreateBulk(slice any, setFunc func(*RepositoryCreate, int)) *RepositoryCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &RepositoryCreateBulk{err: fmt.Errorf("calling to RepositoryClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*RepositoryCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &RepositoryCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for Repository. func (c *RepositoryClient) Update() *RepositoryUpdate { mutation := newRepositoryMutation(c.config, OpUpdate) @@ -5037,12 +6435,12 @@ func (c *RepositoryClient) Delete() *RepositoryDelete { return &RepositoryDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} } -// DeleteOne returns a delete builder for the given entity. +// DeleteOne returns a builder for deleting the given entity. func (c *RepositoryClient) DeleteOne(r *Repository) *RepositoryDeleteOne { return c.DeleteOneID(r.ID) } -// DeleteOneID returns a delete builder for the given id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *RepositoryClient) DeleteOneID(id uuid.UUID) *RepositoryDeleteOne { builder := c.Delete().Where(repository.ID(id)) builder.mutation.id = &id @@ -5054,6 +6452,8 @@ func (c *RepositoryClient) DeleteOneID(id uuid.UUID) *RepositoryDeleteOne { func (c *RepositoryClient) Query() *RepositoryQuery { return &RepositoryQuery{ config: c.config, + ctx: &QueryContext{Type: TypeRepository}, + inters: c.Interceptors(), } } @@ -5073,8 +6473,8 @@ func (c *RepositoryClient) GetX(ctx context.Context, id uuid.UUID) *Repository { // QueryRepositoryToEnvironment queries the RepositoryToEnvironment edge of a Repository. func (c *RepositoryClient) QueryRepositoryToEnvironment(r *Repository) *EnvironmentQuery { - query := &EnvironmentQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&EnvironmentClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := r.ID step := sqlgraph.NewStep( sqlgraph.From(repository.Table, repository.FieldID, id), @@ -5089,8 +6489,8 @@ func (c *RepositoryClient) QueryRepositoryToEnvironment(r *Repository) *Environm // QueryRepositoryToRepoCommit queries the RepositoryToRepoCommit edge of a Repository. func (c *RepositoryClient) QueryRepositoryToRepoCommit(r *Repository) *RepoCommitQuery { - query := &RepoCommitQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&RepoCommitClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := r.ID step := sqlgraph.NewStep( sqlgraph.From(repository.Table, repository.FieldID, id), @@ -5108,6 +6508,26 @@ func (c *RepositoryClient) Hooks() []Hook { return c.hooks.Repository } +// Interceptors returns the client interceptors. +func (c *RepositoryClient) Interceptors() []Interceptor { + return c.inters.Repository +} + +func (c *RepositoryClient) mutate(ctx context.Context, m *RepositoryMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&RepositoryCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&RepositoryUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&RepositoryUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&RepositoryDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Repository mutation op: %q", m.Op()) + } +} + // ScriptClient is a client for the Script schema. type ScriptClient struct { config @@ -5124,7 +6544,13 @@ func (c *ScriptClient) Use(hooks ...Hook) { c.hooks.Script = append(c.hooks.Script, hooks...) } -// Create returns a create builder for Script. +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `script.Intercept(f(g(h())))`. +func (c *ScriptClient) Intercept(interceptors ...Interceptor) { + c.inters.Script = append(c.inters.Script, interceptors...) +} + +// Create returns a builder for creating a Script entity. func (c *ScriptClient) Create() *ScriptCreate { mutation := newScriptMutation(c.config, OpCreate) return &ScriptCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} @@ -5135,6 +6561,21 @@ func (c *ScriptClient) CreateBulk(builders ...*ScriptCreate) *ScriptCreateBulk { return &ScriptCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *ScriptClient) MapCreateBulk(slice any, setFunc func(*ScriptCreate, int)) *ScriptCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &ScriptCreateBulk{err: fmt.Errorf("calling to ScriptClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*ScriptCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &ScriptCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for Script. func (c *ScriptClient) Update() *ScriptUpdate { mutation := newScriptMutation(c.config, OpUpdate) @@ -5159,12 +6600,12 @@ func (c *ScriptClient) Delete() *ScriptDelete { return &ScriptDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} } -// DeleteOne returns a delete builder for the given entity. +// DeleteOne returns a builder for deleting the given entity. func (c *ScriptClient) DeleteOne(s *Script) *ScriptDeleteOne { return c.DeleteOneID(s.ID) } -// DeleteOneID returns a delete builder for the given id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *ScriptClient) DeleteOneID(id uuid.UUID) *ScriptDeleteOne { builder := c.Delete().Where(script.ID(id)) builder.mutation.id = &id @@ -5176,6 +6617,8 @@ func (c *ScriptClient) DeleteOneID(id uuid.UUID) *ScriptDeleteOne { func (c *ScriptClient) Query() *ScriptQuery { return &ScriptQuery{ config: c.config, + ctx: &QueryContext{Type: TypeScript}, + inters: c.Interceptors(), } } @@ -5195,8 +6638,8 @@ func (c *ScriptClient) GetX(ctx context.Context, id uuid.UUID) *Script { // QueryScriptToUser queries the ScriptToUser edge of a Script. func (c *ScriptClient) QueryScriptToUser(s *Script) *UserQuery { - query := &UserQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&UserClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := s.ID step := sqlgraph.NewStep( sqlgraph.From(script.Table, script.FieldID, id), @@ -5211,8 +6654,8 @@ func (c *ScriptClient) QueryScriptToUser(s *Script) *UserQuery { // QueryScriptToFinding queries the ScriptToFinding edge of a Script. func (c *ScriptClient) QueryScriptToFinding(s *Script) *FindingQuery { - query := &FindingQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&FindingClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := s.ID step := sqlgraph.NewStep( sqlgraph.From(script.Table, script.FieldID, id), @@ -5227,8 +6670,8 @@ func (c *ScriptClient) QueryScriptToFinding(s *Script) *FindingQuery { // QueryScriptToEnvironment queries the ScriptToEnvironment edge of a Script. func (c *ScriptClient) QueryScriptToEnvironment(s *Script) *EnvironmentQuery { - query := &EnvironmentQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&EnvironmentClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := s.ID step := sqlgraph.NewStep( sqlgraph.From(script.Table, script.FieldID, id), @@ -5246,6 +6689,26 @@ func (c *ScriptClient) Hooks() []Hook { return c.hooks.Script } +// Interceptors returns the client interceptors. +func (c *ScriptClient) Interceptors() []Interceptor { + return c.inters.Script +} + +func (c *ScriptClient) mutate(ctx context.Context, m *ScriptMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&ScriptCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&ScriptUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&ScriptUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&ScriptDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Script mutation op: %q", m.Op()) + } +} + // ServerTaskClient is a client for the ServerTask schema. type ServerTaskClient struct { config @@ -5262,7 +6725,13 @@ func (c *ServerTaskClient) Use(hooks ...Hook) { c.hooks.ServerTask = append(c.hooks.ServerTask, hooks...) } -// Create returns a create builder for ServerTask. +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `servertask.Intercept(f(g(h())))`. +func (c *ServerTaskClient) Intercept(interceptors ...Interceptor) { + c.inters.ServerTask = append(c.inters.ServerTask, interceptors...) +} + +// Create returns a builder for creating a ServerTask entity. func (c *ServerTaskClient) Create() *ServerTaskCreate { mutation := newServerTaskMutation(c.config, OpCreate) return &ServerTaskCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} @@ -5273,6 +6742,21 @@ func (c *ServerTaskClient) CreateBulk(builders ...*ServerTaskCreate) *ServerTask return &ServerTaskCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *ServerTaskClient) MapCreateBulk(slice any, setFunc func(*ServerTaskCreate, int)) *ServerTaskCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &ServerTaskCreateBulk{err: fmt.Errorf("calling to ServerTaskClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*ServerTaskCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &ServerTaskCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for ServerTask. func (c *ServerTaskClient) Update() *ServerTaskUpdate { mutation := newServerTaskMutation(c.config, OpUpdate) @@ -5297,12 +6781,12 @@ func (c *ServerTaskClient) Delete() *ServerTaskDelete { return &ServerTaskDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} } -// DeleteOne returns a delete builder for the given entity. +// DeleteOne returns a builder for deleting the given entity. func (c *ServerTaskClient) DeleteOne(st *ServerTask) *ServerTaskDeleteOne { return c.DeleteOneID(st.ID) } -// DeleteOneID returns a delete builder for the given id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *ServerTaskClient) DeleteOneID(id uuid.UUID) *ServerTaskDeleteOne { builder := c.Delete().Where(servertask.ID(id)) builder.mutation.id = &id @@ -5314,6 +6798,8 @@ func (c *ServerTaskClient) DeleteOneID(id uuid.UUID) *ServerTaskDeleteOne { func (c *ServerTaskClient) Query() *ServerTaskQuery { return &ServerTaskQuery{ config: c.config, + ctx: &QueryContext{Type: TypeServerTask}, + inters: c.Interceptors(), } } @@ -5333,8 +6819,8 @@ func (c *ServerTaskClient) GetX(ctx context.Context, id uuid.UUID) *ServerTask { // QueryServerTaskToAuthUser queries the ServerTaskToAuthUser edge of a ServerTask. func (c *ServerTaskClient) QueryServerTaskToAuthUser(st *ServerTask) *AuthUserQuery { - query := &AuthUserQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&AuthUserClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := st.ID step := sqlgraph.NewStep( sqlgraph.From(servertask.Table, servertask.FieldID, id), @@ -5349,8 +6835,8 @@ func (c *ServerTaskClient) QueryServerTaskToAuthUser(st *ServerTask) *AuthUserQu // QueryServerTaskToStatus queries the ServerTaskToStatus edge of a ServerTask. func (c *ServerTaskClient) QueryServerTaskToStatus(st *ServerTask) *StatusQuery { - query := &StatusQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&StatusClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := st.ID step := sqlgraph.NewStep( sqlgraph.From(servertask.Table, servertask.FieldID, id), @@ -5365,8 +6851,8 @@ func (c *ServerTaskClient) QueryServerTaskToStatus(st *ServerTask) *StatusQuery // QueryServerTaskToEnvironment queries the ServerTaskToEnvironment edge of a ServerTask. func (c *ServerTaskClient) QueryServerTaskToEnvironment(st *ServerTask) *EnvironmentQuery { - query := &EnvironmentQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&EnvironmentClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := st.ID step := sqlgraph.NewStep( sqlgraph.From(servertask.Table, servertask.FieldID, id), @@ -5381,8 +6867,8 @@ func (c *ServerTaskClient) QueryServerTaskToEnvironment(st *ServerTask) *Environ // QueryServerTaskToBuild queries the ServerTaskToBuild edge of a ServerTask. func (c *ServerTaskClient) QueryServerTaskToBuild(st *ServerTask) *BuildQuery { - query := &BuildQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&BuildClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := st.ID step := sqlgraph.NewStep( sqlgraph.From(servertask.Table, servertask.FieldID, id), @@ -5397,8 +6883,8 @@ func (c *ServerTaskClient) QueryServerTaskToBuild(st *ServerTask) *BuildQuery { // QueryServerTaskToBuildCommit queries the ServerTaskToBuildCommit edge of a ServerTask. func (c *ServerTaskClient) QueryServerTaskToBuildCommit(st *ServerTask) *BuildCommitQuery { - query := &BuildCommitQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&BuildCommitClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := st.ID step := sqlgraph.NewStep( sqlgraph.From(servertask.Table, servertask.FieldID, id), @@ -5413,8 +6899,8 @@ func (c *ServerTaskClient) QueryServerTaskToBuildCommit(st *ServerTask) *BuildCo // QueryServerTaskToGinFileMiddleware queries the ServerTaskToGinFileMiddleware edge of a ServerTask. func (c *ServerTaskClient) QueryServerTaskToGinFileMiddleware(st *ServerTask) *GinFileMiddlewareQuery { - query := &GinFileMiddlewareQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&GinFileMiddlewareClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := st.ID step := sqlgraph.NewStep( sqlgraph.From(servertask.Table, servertask.FieldID, id), @@ -5432,6 +6918,26 @@ func (c *ServerTaskClient) Hooks() []Hook { return c.hooks.ServerTask } +// Interceptors returns the client interceptors. +func (c *ServerTaskClient) Interceptors() []Interceptor { + return c.inters.ServerTask +} + +func (c *ServerTaskClient) mutate(ctx context.Context, m *ServerTaskMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&ServerTaskCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&ServerTaskUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&ServerTaskUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&ServerTaskDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown ServerTask mutation op: %q", m.Op()) + } +} + // StatusClient is a client for the Status schema. type StatusClient struct { config @@ -5448,7 +6954,13 @@ func (c *StatusClient) Use(hooks ...Hook) { c.hooks.Status = append(c.hooks.Status, hooks...) } -// Create returns a create builder for Status. +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `status.Intercept(f(g(h())))`. +func (c *StatusClient) Intercept(interceptors ...Interceptor) { + c.inters.Status = append(c.inters.Status, interceptors...) +} + +// Create returns a builder for creating a Status entity. func (c *StatusClient) Create() *StatusCreate { mutation := newStatusMutation(c.config, OpCreate) return &StatusCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} @@ -5459,6 +6971,21 @@ func (c *StatusClient) CreateBulk(builders ...*StatusCreate) *StatusCreateBulk { return &StatusCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *StatusClient) MapCreateBulk(slice any, setFunc func(*StatusCreate, int)) *StatusCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &StatusCreateBulk{err: fmt.Errorf("calling to StatusClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*StatusCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &StatusCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for Status. func (c *StatusClient) Update() *StatusUpdate { mutation := newStatusMutation(c.config, OpUpdate) @@ -5483,12 +7010,12 @@ func (c *StatusClient) Delete() *StatusDelete { return &StatusDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} } -// DeleteOne returns a delete builder for the given entity. +// DeleteOne returns a builder for deleting the given entity. func (c *StatusClient) DeleteOne(s *Status) *StatusDeleteOne { return c.DeleteOneID(s.ID) } -// DeleteOneID returns a delete builder for the given id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *StatusClient) DeleteOneID(id uuid.UUID) *StatusDeleteOne { builder := c.Delete().Where(status.ID(id)) builder.mutation.id = &id @@ -5500,6 +7027,8 @@ func (c *StatusClient) DeleteOneID(id uuid.UUID) *StatusDeleteOne { func (c *StatusClient) Query() *StatusQuery { return &StatusQuery{ config: c.config, + ctx: &QueryContext{Type: TypeStatus}, + inters: c.Interceptors(), } } @@ -5519,8 +7048,8 @@ func (c *StatusClient) GetX(ctx context.Context, id uuid.UUID) *Status { // QueryStatusToBuild queries the StatusToBuild edge of a Status. func (c *StatusClient) QueryStatusToBuild(s *Status) *BuildQuery { - query := &BuildQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&BuildClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := s.ID step := sqlgraph.NewStep( sqlgraph.From(status.Table, status.FieldID, id), @@ -5535,8 +7064,8 @@ func (c *StatusClient) QueryStatusToBuild(s *Status) *BuildQuery { // QueryStatusToProvisionedNetwork queries the StatusToProvisionedNetwork edge of a Status. func (c *StatusClient) QueryStatusToProvisionedNetwork(s *Status) *ProvisionedNetworkQuery { - query := &ProvisionedNetworkQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&ProvisionedNetworkClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := s.ID step := sqlgraph.NewStep( sqlgraph.From(status.Table, status.FieldID, id), @@ -5551,8 +7080,8 @@ func (c *StatusClient) QueryStatusToProvisionedNetwork(s *Status) *ProvisionedNe // QueryStatusToProvisionedHost queries the StatusToProvisionedHost edge of a Status. func (c *StatusClient) QueryStatusToProvisionedHost(s *Status) *ProvisionedHostQuery { - query := &ProvisionedHostQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&ProvisionedHostClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := s.ID step := sqlgraph.NewStep( sqlgraph.From(status.Table, status.FieldID, id), @@ -5567,8 +7096,8 @@ func (c *StatusClient) QueryStatusToProvisionedHost(s *Status) *ProvisionedHostQ // QueryStatusToProvisioningStep queries the StatusToProvisioningStep edge of a Status. func (c *StatusClient) QueryStatusToProvisioningStep(s *Status) *ProvisioningStepQuery { - query := &ProvisioningStepQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&ProvisioningStepClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := s.ID step := sqlgraph.NewStep( sqlgraph.From(status.Table, status.FieldID, id), @@ -5583,8 +7112,8 @@ func (c *StatusClient) QueryStatusToProvisioningStep(s *Status) *ProvisioningSte // QueryStatusToTeam queries the StatusToTeam edge of a Status. func (c *StatusClient) QueryStatusToTeam(s *Status) *TeamQuery { - query := &TeamQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&TeamClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := s.ID step := sqlgraph.NewStep( sqlgraph.From(status.Table, status.FieldID, id), @@ -5599,8 +7128,8 @@ func (c *StatusClient) QueryStatusToTeam(s *Status) *TeamQuery { // QueryStatusToPlan queries the StatusToPlan edge of a Status. func (c *StatusClient) QueryStatusToPlan(s *Status) *PlanQuery { - query := &PlanQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&PlanClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := s.ID step := sqlgraph.NewStep( sqlgraph.From(status.Table, status.FieldID, id), @@ -5615,8 +7144,8 @@ func (c *StatusClient) QueryStatusToPlan(s *Status) *PlanQuery { // QueryStatusToServerTask queries the StatusToServerTask edge of a Status. func (c *StatusClient) QueryStatusToServerTask(s *Status) *ServerTaskQuery { - query := &ServerTaskQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&ServerTaskClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := s.ID step := sqlgraph.NewStep( sqlgraph.From(status.Table, status.FieldID, id), @@ -5631,8 +7160,8 @@ func (c *StatusClient) QueryStatusToServerTask(s *Status) *ServerTaskQuery { // QueryStatusToAdhocPlan queries the StatusToAdhocPlan edge of a Status. func (c *StatusClient) QueryStatusToAdhocPlan(s *Status) *AdhocPlanQuery { - query := &AdhocPlanQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&AdhocPlanClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := s.ID step := sqlgraph.NewStep( sqlgraph.From(status.Table, status.FieldID, id), @@ -5650,6 +7179,26 @@ func (c *StatusClient) Hooks() []Hook { return c.hooks.Status } +// Interceptors returns the client interceptors. +func (c *StatusClient) Interceptors() []Interceptor { + return c.inters.Status +} + +func (c *StatusClient) mutate(ctx context.Context, m *StatusMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&StatusCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&StatusUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&StatusUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&StatusDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Status mutation op: %q", m.Op()) + } +} + // TagClient is a client for the Tag schema. type TagClient struct { config @@ -5666,7 +7215,13 @@ func (c *TagClient) Use(hooks ...Hook) { c.hooks.Tag = append(c.hooks.Tag, hooks...) } -// Create returns a create builder for Tag. +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `tag.Intercept(f(g(h())))`. +func (c *TagClient) Intercept(interceptors ...Interceptor) { + c.inters.Tag = append(c.inters.Tag, interceptors...) +} + +// Create returns a builder for creating a Tag entity. func (c *TagClient) Create() *TagCreate { mutation := newTagMutation(c.config, OpCreate) return &TagCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} @@ -5677,6 +7232,21 @@ func (c *TagClient) CreateBulk(builders ...*TagCreate) *TagCreateBulk { return &TagCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *TagClient) MapCreateBulk(slice any, setFunc func(*TagCreate, int)) *TagCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &TagCreateBulk{err: fmt.Errorf("calling to TagClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*TagCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &TagCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for Tag. func (c *TagClient) Update() *TagUpdate { mutation := newTagMutation(c.config, OpUpdate) @@ -5701,12 +7271,12 @@ func (c *TagClient) Delete() *TagDelete { return &TagDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} } -// DeleteOne returns a delete builder for the given entity. +// DeleteOne returns a builder for deleting the given entity. func (c *TagClient) DeleteOne(t *Tag) *TagDeleteOne { return c.DeleteOneID(t.ID) } -// DeleteOneID returns a delete builder for the given id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *TagClient) DeleteOneID(id uuid.UUID) *TagDeleteOne { builder := c.Delete().Where(tag.ID(id)) builder.mutation.id = &id @@ -5718,6 +7288,8 @@ func (c *TagClient) DeleteOneID(id uuid.UUID) *TagDeleteOne { func (c *TagClient) Query() *TagQuery { return &TagQuery{ config: c.config, + ctx: &QueryContext{Type: TypeTag}, + inters: c.Interceptors(), } } @@ -5740,6 +7312,26 @@ func (c *TagClient) Hooks() []Hook { return c.hooks.Tag } +// Interceptors returns the client interceptors. +func (c *TagClient) Interceptors() []Interceptor { + return c.inters.Tag +} + +func (c *TagClient) mutate(ctx context.Context, m *TagMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&TagCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&TagUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&TagUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&TagDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Tag mutation op: %q", m.Op()) + } +} + // TeamClient is a client for the Team schema. type TeamClient struct { config @@ -5756,7 +7348,13 @@ func (c *TeamClient) Use(hooks ...Hook) { c.hooks.Team = append(c.hooks.Team, hooks...) } -// Create returns a create builder for Team. +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `team.Intercept(f(g(h())))`. +func (c *TeamClient) Intercept(interceptors ...Interceptor) { + c.inters.Team = append(c.inters.Team, interceptors...) +} + +// Create returns a builder for creating a Team entity. func (c *TeamClient) Create() *TeamCreate { mutation := newTeamMutation(c.config, OpCreate) return &TeamCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} @@ -5767,6 +7365,21 @@ func (c *TeamClient) CreateBulk(builders ...*TeamCreate) *TeamCreateBulk { return &TeamCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *TeamClient) MapCreateBulk(slice any, setFunc func(*TeamCreate, int)) *TeamCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &TeamCreateBulk{err: fmt.Errorf("calling to TeamClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*TeamCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &TeamCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for Team. func (c *TeamClient) Update() *TeamUpdate { mutation := newTeamMutation(c.config, OpUpdate) @@ -5791,12 +7404,12 @@ func (c *TeamClient) Delete() *TeamDelete { return &TeamDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} } -// DeleteOne returns a delete builder for the given entity. +// DeleteOne returns a builder for deleting the given entity. func (c *TeamClient) DeleteOne(t *Team) *TeamDeleteOne { return c.DeleteOneID(t.ID) } -// DeleteOneID returns a delete builder for the given id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *TeamClient) DeleteOneID(id uuid.UUID) *TeamDeleteOne { builder := c.Delete().Where(team.ID(id)) builder.mutation.id = &id @@ -5808,6 +7421,8 @@ func (c *TeamClient) DeleteOneID(id uuid.UUID) *TeamDeleteOne { func (c *TeamClient) Query() *TeamQuery { return &TeamQuery{ config: c.config, + ctx: &QueryContext{Type: TypeTeam}, + inters: c.Interceptors(), } } @@ -5827,8 +7442,8 @@ func (c *TeamClient) GetX(ctx context.Context, id uuid.UUID) *Team { // QueryTeamToBuild queries the TeamToBuild edge of a Team. func (c *TeamClient) QueryTeamToBuild(t *Team) *BuildQuery { - query := &BuildQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&BuildClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := t.ID step := sqlgraph.NewStep( sqlgraph.From(team.Table, team.FieldID, id), @@ -5843,8 +7458,8 @@ func (c *TeamClient) QueryTeamToBuild(t *Team) *BuildQuery { // QueryTeamToStatus queries the TeamToStatus edge of a Team. func (c *TeamClient) QueryTeamToStatus(t *Team) *StatusQuery { - query := &StatusQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&StatusClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := t.ID step := sqlgraph.NewStep( sqlgraph.From(team.Table, team.FieldID, id), @@ -5859,8 +7474,8 @@ func (c *TeamClient) QueryTeamToStatus(t *Team) *StatusQuery { // QueryTeamToProvisionedNetwork queries the TeamToProvisionedNetwork edge of a Team. func (c *TeamClient) QueryTeamToProvisionedNetwork(t *Team) *ProvisionedNetworkQuery { - query := &ProvisionedNetworkQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&ProvisionedNetworkClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := t.ID step := sqlgraph.NewStep( sqlgraph.From(team.Table, team.FieldID, id), @@ -5875,8 +7490,8 @@ func (c *TeamClient) QueryTeamToProvisionedNetwork(t *Team) *ProvisionedNetworkQ // QueryTeamToPlan queries the TeamToPlan edge of a Team. func (c *TeamClient) QueryTeamToPlan(t *Team) *PlanQuery { - query := &PlanQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&PlanClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := t.ID step := sqlgraph.NewStep( sqlgraph.From(team.Table, team.FieldID, id), @@ -5894,6 +7509,26 @@ func (c *TeamClient) Hooks() []Hook { return c.hooks.Team } +// Interceptors returns the client interceptors. +func (c *TeamClient) Interceptors() []Interceptor { + return c.inters.Team +} + +func (c *TeamClient) mutate(ctx context.Context, m *TeamMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&TeamCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&TeamUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&TeamUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&TeamDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Team mutation op: %q", m.Op()) + } +} + // TokenClient is a client for the Token schema. type TokenClient struct { config @@ -5910,7 +7545,13 @@ func (c *TokenClient) Use(hooks ...Hook) { c.hooks.Token = append(c.hooks.Token, hooks...) } -// Create returns a create builder for Token. +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `token.Intercept(f(g(h())))`. +func (c *TokenClient) Intercept(interceptors ...Interceptor) { + c.inters.Token = append(c.inters.Token, interceptors...) +} + +// Create returns a builder for creating a Token entity. func (c *TokenClient) Create() *TokenCreate { mutation := newTokenMutation(c.config, OpCreate) return &TokenCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} @@ -5921,6 +7562,21 @@ func (c *TokenClient) CreateBulk(builders ...*TokenCreate) *TokenCreateBulk { return &TokenCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *TokenClient) MapCreateBulk(slice any, setFunc func(*TokenCreate, int)) *TokenCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &TokenCreateBulk{err: fmt.Errorf("calling to TokenClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*TokenCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &TokenCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for Token. func (c *TokenClient) Update() *TokenUpdate { mutation := newTokenMutation(c.config, OpUpdate) @@ -5945,12 +7601,12 @@ func (c *TokenClient) Delete() *TokenDelete { return &TokenDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} } -// DeleteOne returns a delete builder for the given entity. +// DeleteOne returns a builder for deleting the given entity. func (c *TokenClient) DeleteOne(t *Token) *TokenDeleteOne { return c.DeleteOneID(t.ID) } -// DeleteOneID returns a delete builder for the given id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *TokenClient) DeleteOneID(id uuid.UUID) *TokenDeleteOne { builder := c.Delete().Where(token.ID(id)) builder.mutation.id = &id @@ -5962,6 +7618,8 @@ func (c *TokenClient) DeleteOneID(id uuid.UUID) *TokenDeleteOne { func (c *TokenClient) Query() *TokenQuery { return &TokenQuery{ config: c.config, + ctx: &QueryContext{Type: TypeToken}, + inters: c.Interceptors(), } } @@ -5981,8 +7639,8 @@ func (c *TokenClient) GetX(ctx context.Context, id uuid.UUID) *Token { // QueryTokenToAuthUser queries the TokenToAuthUser edge of a Token. func (c *TokenClient) QueryTokenToAuthUser(t *Token) *AuthUserQuery { - query := &AuthUserQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&AuthUserClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := t.ID step := sqlgraph.NewStep( sqlgraph.From(token.Table, token.FieldID, id), @@ -6000,6 +7658,26 @@ func (c *TokenClient) Hooks() []Hook { return c.hooks.Token } +// Interceptors returns the client interceptors. +func (c *TokenClient) Interceptors() []Interceptor { + return c.inters.Token +} + +func (c *TokenClient) mutate(ctx context.Context, m *TokenMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&TokenCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&TokenUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&TokenUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&TokenDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Token mutation op: %q", m.Op()) + } +} + // UserClient is a client for the User schema. type UserClient struct { config @@ -6016,7 +7694,13 @@ func (c *UserClient) Use(hooks ...Hook) { c.hooks.User = append(c.hooks.User, hooks...) } -// Create returns a create builder for User. +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `user.Intercept(f(g(h())))`. +func (c *UserClient) Intercept(interceptors ...Interceptor) { + c.inters.User = append(c.inters.User, interceptors...) +} + +// Create returns a builder for creating a User entity. func (c *UserClient) Create() *UserCreate { mutation := newUserMutation(c.config, OpCreate) return &UserCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} @@ -6027,6 +7711,21 @@ func (c *UserClient) CreateBulk(builders ...*UserCreate) *UserCreateBulk { return &UserCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *UserClient) MapCreateBulk(slice any, setFunc func(*UserCreate, int)) *UserCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &UserCreateBulk{err: fmt.Errorf("calling to UserClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*UserCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &UserCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for User. func (c *UserClient) Update() *UserUpdate { mutation := newUserMutation(c.config, OpUpdate) @@ -6051,12 +7750,12 @@ func (c *UserClient) Delete() *UserDelete { return &UserDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} } -// DeleteOne returns a delete builder for the given entity. +// DeleteOne returns a builder for deleting the given entity. func (c *UserClient) DeleteOne(u *User) *UserDeleteOne { return c.DeleteOneID(u.ID) } -// DeleteOneID returns a delete builder for the given id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *UserClient) DeleteOneID(id uuid.UUID) *UserDeleteOne { builder := c.Delete().Where(user.ID(id)) builder.mutation.id = &id @@ -6068,6 +7767,8 @@ func (c *UserClient) DeleteOneID(id uuid.UUID) *UserDeleteOne { func (c *UserClient) Query() *UserQuery { return &UserQuery{ config: c.config, + ctx: &QueryContext{Type: TypeUser}, + inters: c.Interceptors(), } } @@ -6087,8 +7788,8 @@ func (c *UserClient) GetX(ctx context.Context, id uuid.UUID) *User { // QueryUserToTag queries the UserToTag edge of a User. func (c *UserClient) QueryUserToTag(u *User) *TagQuery { - query := &TagQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&TagClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := u.ID step := sqlgraph.NewStep( sqlgraph.From(user.Table, user.FieldID, id), @@ -6103,8 +7804,8 @@ func (c *UserClient) QueryUserToTag(u *User) *TagQuery { // QueryUserToEnvironment queries the UserToEnvironment edge of a User. func (c *UserClient) QueryUserToEnvironment(u *User) *EnvironmentQuery { - query := &EnvironmentQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&EnvironmentClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := u.ID step := sqlgraph.NewStep( sqlgraph.From(user.Table, user.FieldID, id), @@ -6121,3 +7822,43 @@ func (c *UserClient) QueryUserToEnvironment(u *User) *EnvironmentQuery { func (c *UserClient) Hooks() []Hook { return c.hooks.User } + +// Interceptors returns the client interceptors. +func (c *UserClient) Interceptors() []Interceptor { + return c.inters.User +} + +func (c *UserClient) mutate(ctx context.Context, m *UserMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&UserCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&UserUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&UserUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&UserDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown User mutation op: %q", m.Op()) + } +} + +// hooks and interceptors per client, for fast access. +type ( + hooks struct { + AdhocPlan, AgentStatus, AgentTask, Ansible, AuthUser, Build, BuildCommit, + Command, Competition, DNS, DNSRecord, Disk, Environment, FileDelete, + FileDownload, FileExtract, Finding, GinFileMiddleware, Host, HostDependency, + Identity, IncludedNetwork, Network, Plan, PlanDiff, ProvisionedHost, + ProvisionedNetwork, ProvisioningStep, RepoCommit, Repository, Script, + ServerTask, Status, Tag, Team, Token, User []ent.Hook + } + inters struct { + AdhocPlan, AgentStatus, AgentTask, Ansible, AuthUser, Build, BuildCommit, + Command, Competition, DNS, DNSRecord, Disk, Environment, FileDelete, + FileDownload, FileExtract, Finding, GinFileMiddleware, Host, HostDependency, + Identity, IncludedNetwork, Network, Plan, PlanDiff, ProvisionedHost, + ProvisionedNetwork, ProvisioningStep, RepoCommit, Repository, Script, + ServerTask, Status, Tag, Team, Token, User []ent.Interceptor + } +) diff --git a/ent/command.go b/ent/command.go index 6d4d4c43..d5fad417 100755 --- a/ent/command.go +++ b/ent/command.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -7,6 +7,7 @@ import ( "fmt" "strings" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/gen0cide/laforge/ent/command" "github.com/gen0cide/laforge/ent/environment" @@ -18,8 +19,8 @@ type Command struct { config ` json:"-"` // ID of the ent. ID uuid.UUID `json:"id,omitempty"` - // HclID holds the value of the "hcl_id" field. - HclID string `json:"hcl_id,omitempty" hcl:"id,label"` + // HCLID holds the value of the "hcl_id" field. + HCLID string `json:"hcl_id,omitempty" hcl:"id,label"` // Name holds the value of the "name" field. Name string `json:"name,omitempty" hcl:"name,attr"` // Description holds the value of the "description" field. @@ -44,13 +45,15 @@ type Command struct { // The values are being populated by the CommandQuery when eager-loading is set. Edges CommandEdges `json:"edges"` + // vvvvvvvvvvvv CUSTOM vvvvvvvvvvvv // Edges put into the main struct to be loaded via hcl // CommandToUser holds the value of the CommandToUser edge. HCLCommandToUser []*User `json:"CommandToUser,omitempty" hcl:"maintainer,block"` // CommandToEnvironment holds the value of the CommandToEnvironment edge. HCLCommandToEnvironment *Environment `json:"CommandToEnvironment,omitempty"` - // + // ^^^^^^^^^^^^ CUSTOM ^^^^^^^^^^^^^ environment_environment_to_command *uuid.UUID + selectValues sql.SelectValues } // CommandEdges holds the relations/edges for other nodes in the graph. @@ -62,6 +65,10 @@ type CommandEdges struct { // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. loadedTypes [2]bool + // totalCount holds the count of the edges above. + totalCount [2]map[string]int + + namedCommandToUser map[string][]*User } // CommandToUserOrErr returns the CommandToUser value or an error if the edge @@ -78,8 +85,7 @@ func (e CommandEdges) CommandToUserOrErr() ([]*User, error) { func (e CommandEdges) CommandToEnvironmentOrErr() (*Environment, error) { if e.loadedTypes[1] { if e.CommandToEnvironment == nil { - // The edge CommandToEnvironment was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: environment.Label} } return e.CommandToEnvironment, nil @@ -88,8 +94,8 @@ func (e CommandEdges) CommandToEnvironmentOrErr() (*Environment, error) { } // scanValues returns the types for scanning values from sql.Rows. -func (*Command) scanValues(columns []string) ([]interface{}, error) { - values := make([]interface{}, len(columns)) +func (*Command) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) for i := range columns { switch columns[i] { case command.FieldArgs, command.FieldVars, command.FieldTags: @@ -98,14 +104,14 @@ func (*Command) scanValues(columns []string) ([]interface{}, error) { values[i] = new(sql.NullBool) case command.FieldCooldown, command.FieldTimeout: values[i] = new(sql.NullInt64) - case command.FieldHclID, command.FieldName, command.FieldDescription, command.FieldProgram: + case command.FieldHCLID, command.FieldName, command.FieldDescription, command.FieldProgram: values[i] = new(sql.NullString) case command.FieldID: values[i] = new(uuid.UUID) case command.ForeignKeys[0]: // environment_environment_to_command values[i] = &sql.NullScanner{S: new(uuid.UUID)} default: - return nil, fmt.Errorf("unexpected column %q for type Command", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -113,7 +119,7 @@ func (*Command) scanValues(columns []string) ([]interface{}, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the Command fields. -func (c *Command) assignValues(columns []string, values []interface{}) error { +func (c *Command) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -125,11 +131,11 @@ func (c *Command) assignValues(columns []string, values []interface{}) error { } else if value != nil { c.ID = *value } - case command.FieldHclID: + case command.FieldHCLID: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field hcl_id", values[i]) } else if value.Valid { - c.HclID = value.String + c.HCLID = value.String } case command.FieldName: if value, ok := values[i].(*sql.NullString); !ok { @@ -204,36 +210,44 @@ func (c *Command) assignValues(columns []string, values []interface{}) error { c.environment_environment_to_command = new(uuid.UUID) *c.environment_environment_to_command = *value.S.(*uuid.UUID) } + default: + c.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the Command. +// This includes values selected through modifiers, order, etc. +func (c *Command) Value(name string) (ent.Value, error) { + return c.selectValues.Get(name) +} + // QueryCommandToUser queries the "CommandToUser" edge of the Command entity. func (c *Command) QueryCommandToUser() *UserQuery { - return (&CommandClient{config: c.config}).QueryCommandToUser(c) + return NewCommandClient(c.config).QueryCommandToUser(c) } // QueryCommandToEnvironment queries the "CommandToEnvironment" edge of the Command entity. func (c *Command) QueryCommandToEnvironment() *EnvironmentQuery { - return (&CommandClient{config: c.config}).QueryCommandToEnvironment(c) + return NewCommandClient(c.config).QueryCommandToEnvironment(c) } // Update returns a builder for updating this Command. // Note that you need to call Command.Unwrap() before calling this method if this Command // was returned from a transaction, and the transaction was committed or rolled back. func (c *Command) Update() *CommandUpdateOne { - return (&CommandClient{config: c.config}).UpdateOne(c) + return NewCommandClient(c.config).UpdateOne(c) } // Unwrap unwraps the Command entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. func (c *Command) Unwrap() *Command { - tx, ok := c.config.driver.(*txDriver) + _tx, ok := c.config.driver.(*txDriver) if !ok { panic("ent: Command is not a transactional entity") } - c.config.driver = tx.drv + c.config.driver = _tx.drv return c } @@ -241,38 +255,66 @@ func (c *Command) Unwrap() *Command { func (c *Command) String() string { var builder strings.Builder builder.WriteString("Command(") - builder.WriteString(fmt.Sprintf("id=%v", c.ID)) - builder.WriteString(", hcl_id=") - builder.WriteString(c.HclID) - builder.WriteString(", name=") + builder.WriteString(fmt.Sprintf("id=%v, ", c.ID)) + builder.WriteString("hcl_id=") + builder.WriteString(c.HCLID) + builder.WriteString(", ") + builder.WriteString("name=") builder.WriteString(c.Name) - builder.WriteString(", description=") + builder.WriteString(", ") + builder.WriteString("description=") builder.WriteString(c.Description) - builder.WriteString(", program=") + builder.WriteString(", ") + builder.WriteString("program=") builder.WriteString(c.Program) - builder.WriteString(", args=") + builder.WriteString(", ") + builder.WriteString("args=") builder.WriteString(fmt.Sprintf("%v", c.Args)) - builder.WriteString(", ignore_errors=") + builder.WriteString(", ") + builder.WriteString("ignore_errors=") builder.WriteString(fmt.Sprintf("%v", c.IgnoreErrors)) - builder.WriteString(", disabled=") + builder.WriteString(", ") + builder.WriteString("disabled=") builder.WriteString(fmt.Sprintf("%v", c.Disabled)) - builder.WriteString(", cooldown=") + builder.WriteString(", ") + builder.WriteString("cooldown=") builder.WriteString(fmt.Sprintf("%v", c.Cooldown)) - builder.WriteString(", timeout=") + builder.WriteString(", ") + builder.WriteString("timeout=") builder.WriteString(fmt.Sprintf("%v", c.Timeout)) - builder.WriteString(", vars=") + builder.WriteString(", ") + builder.WriteString("vars=") builder.WriteString(fmt.Sprintf("%v", c.Vars)) - builder.WriteString(", tags=") + builder.WriteString(", ") + builder.WriteString("tags=") builder.WriteString(fmt.Sprintf("%v", c.Tags)) builder.WriteByte(')') return builder.String() } -// Commands is a parsable slice of Command. -type Commands []*Command +// NamedCommandToUser returns the CommandToUser named value or an error if the edge was not +// loaded in eager-loading with this name. +func (c *Command) NamedCommandToUser(name string) ([]*User, error) { + if c.Edges.namedCommandToUser == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := c.Edges.namedCommandToUser[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} -func (c Commands) config(cfg config) { - for _i := range c { - c[_i].config = cfg +func (c *Command) appendNamedCommandToUser(name string, edges ...*User) { + if c.Edges.namedCommandToUser == nil { + c.Edges.namedCommandToUser = make(map[string][]*User) + } + if len(edges) == 0 { + c.Edges.namedCommandToUser[name] = []*User{} + } else { + c.Edges.namedCommandToUser[name] = append(c.Edges.namedCommandToUser[name], edges...) } } + +// Commands is a parsable slice of Command. +type Commands []*Command diff --git a/ent/command/command.go b/ent/command/command.go index 681b50b1..6c4b9fb0 100755 --- a/ent/command/command.go +++ b/ent/command/command.go @@ -1,8 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package command import ( + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -11,8 +13,8 @@ const ( Label = "command" // FieldID holds the string denoting the id field in the database. FieldID = "id" - // FieldHclID holds the string denoting the hcl_id field in the database. - FieldHclID = "hcl_id" + // FieldHCLID holds the string denoting the hcl_id field in the database. + FieldHCLID = "hcl_id" // FieldName holds the string denoting the name field in the database. FieldName = "name" // FieldDescription holds the string denoting the description field in the database. @@ -58,7 +60,7 @@ const ( // Columns holds all SQL columns for command fields. var Columns = []string{ FieldID, - FieldHclID, + FieldHCLID, FieldName, FieldDescription, FieldProgram, @@ -100,3 +102,86 @@ var ( // DefaultID holds the default value on creation for the "id" field. DefaultID func() uuid.UUID ) + +// OrderOption defines the ordering options for the Command queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByHCLID orders the results by the hcl_id field. +func ByHCLID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldHCLID, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByDescription orders the results by the description field. +func ByDescription(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDescription, opts...).ToFunc() +} + +// ByProgram orders the results by the program field. +func ByProgram(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldProgram, opts...).ToFunc() +} + +// ByIgnoreErrors orders the results by the ignore_errors field. +func ByIgnoreErrors(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldIgnoreErrors, opts...).ToFunc() +} + +// ByDisabled orders the results by the disabled field. +func ByDisabled(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDisabled, opts...).ToFunc() +} + +// ByCooldown orders the results by the cooldown field. +func ByCooldown(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCooldown, opts...).ToFunc() +} + +// ByTimeout orders the results by the timeout field. +func ByTimeout(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldTimeout, opts...).ToFunc() +} + +// ByCommandToUserCount orders the results by CommandToUser count. +func ByCommandToUserCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newCommandToUserStep(), opts...) + } +} + +// ByCommandToUser orders the results by CommandToUser terms. +func ByCommandToUser(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newCommandToUserStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByCommandToEnvironmentField orders the results by CommandToEnvironment field. +func ByCommandToEnvironmentField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newCommandToEnvironmentStep(), sql.OrderByField(field, opts...)) + } +} +func newCommandToUserStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(CommandToUserInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, CommandToUserTable, CommandToUserColumn), + ) +} +func newCommandToEnvironmentStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(CommandToEnvironmentInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, CommandToEnvironmentTable, CommandToEnvironmentColumn), + ) +} diff --git a/ent/command/where.go b/ent/command/where.go index eef75f36..4b0bf8b1 100755 --- a/ent/command/where.go +++ b/ent/command/where.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package command @@ -11,765 +11,447 @@ import ( // ID filters vertices based on their ID field. func ID(id uuid.UUID) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Command(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id uuid.UUID) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Command(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id uuid.UUID) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.Command(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...uuid.UUID) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.Command(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...uuid.UUID) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.Command(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id uuid.UUID) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.Command(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id uuid.UUID) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.Command(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id uuid.UUID) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.Command(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id uuid.UUID) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.Command(sql.FieldLTE(FieldID, id)) } -// HclID applies equality check predicate on the "hcl_id" field. It's identical to HclIDEQ. -func HclID(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldHclID), v)) - }) +// HCLID applies equality check predicate on the "hcl_id" field. It's identical to HCLIDEQ. +func HCLID(v string) predicate.Command { + return predicate.Command(sql.FieldEQ(FieldHCLID, v)) } // Name applies equality check predicate on the "name" field. It's identical to NameEQ. func Name(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldName), v)) - }) + return predicate.Command(sql.FieldEQ(FieldName, v)) } // Description applies equality check predicate on the "description" field. It's identical to DescriptionEQ. func Description(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldDescription), v)) - }) + return predicate.Command(sql.FieldEQ(FieldDescription, v)) } // Program applies equality check predicate on the "program" field. It's identical to ProgramEQ. func Program(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldProgram), v)) - }) + return predicate.Command(sql.FieldEQ(FieldProgram, v)) } // IgnoreErrors applies equality check predicate on the "ignore_errors" field. It's identical to IgnoreErrorsEQ. func IgnoreErrors(v bool) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldIgnoreErrors), v)) - }) + return predicate.Command(sql.FieldEQ(FieldIgnoreErrors, v)) } // Disabled applies equality check predicate on the "disabled" field. It's identical to DisabledEQ. func Disabled(v bool) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldDisabled), v)) - }) + return predicate.Command(sql.FieldEQ(FieldDisabled, v)) } // Cooldown applies equality check predicate on the "cooldown" field. It's identical to CooldownEQ. func Cooldown(v int) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCooldown), v)) - }) + return predicate.Command(sql.FieldEQ(FieldCooldown, v)) } // Timeout applies equality check predicate on the "timeout" field. It's identical to TimeoutEQ. func Timeout(v int) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldTimeout), v)) - }) + return predicate.Command(sql.FieldEQ(FieldTimeout, v)) } -// HclIDEQ applies the EQ predicate on the "hcl_id" field. -func HclIDEQ(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldHclID), v)) - }) +// HCLIDEQ applies the EQ predicate on the "hcl_id" field. +func HCLIDEQ(v string) predicate.Command { + return predicate.Command(sql.FieldEQ(FieldHCLID, v)) } -// HclIDNEQ applies the NEQ predicate on the "hcl_id" field. -func HclIDNEQ(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldHclID), v)) - }) +// HCLIDNEQ applies the NEQ predicate on the "hcl_id" field. +func HCLIDNEQ(v string) predicate.Command { + return predicate.Command(sql.FieldNEQ(FieldHCLID, v)) } -// HclIDIn applies the In predicate on the "hcl_id" field. -func HclIDIn(vs ...string) predicate.Command { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Command(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldHclID), v...)) - }) +// HCLIDIn applies the In predicate on the "hcl_id" field. +func HCLIDIn(vs ...string) predicate.Command { + return predicate.Command(sql.FieldIn(FieldHCLID, vs...)) } -// HclIDNotIn applies the NotIn predicate on the "hcl_id" field. -func HclIDNotIn(vs ...string) predicate.Command { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Command(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldHclID), v...)) - }) +// HCLIDNotIn applies the NotIn predicate on the "hcl_id" field. +func HCLIDNotIn(vs ...string) predicate.Command { + return predicate.Command(sql.FieldNotIn(FieldHCLID, vs...)) } -// HclIDGT applies the GT predicate on the "hcl_id" field. -func HclIDGT(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldHclID), v)) - }) +// HCLIDGT applies the GT predicate on the "hcl_id" field. +func HCLIDGT(v string) predicate.Command { + return predicate.Command(sql.FieldGT(FieldHCLID, v)) } -// HclIDGTE applies the GTE predicate on the "hcl_id" field. -func HclIDGTE(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldHclID), v)) - }) +// HCLIDGTE applies the GTE predicate on the "hcl_id" field. +func HCLIDGTE(v string) predicate.Command { + return predicate.Command(sql.FieldGTE(FieldHCLID, v)) } -// HclIDLT applies the LT predicate on the "hcl_id" field. -func HclIDLT(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldHclID), v)) - }) +// HCLIDLT applies the LT predicate on the "hcl_id" field. +func HCLIDLT(v string) predicate.Command { + return predicate.Command(sql.FieldLT(FieldHCLID, v)) } -// HclIDLTE applies the LTE predicate on the "hcl_id" field. -func HclIDLTE(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldHclID), v)) - }) +// HCLIDLTE applies the LTE predicate on the "hcl_id" field. +func HCLIDLTE(v string) predicate.Command { + return predicate.Command(sql.FieldLTE(FieldHCLID, v)) } -// HclIDContains applies the Contains predicate on the "hcl_id" field. -func HclIDContains(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldHclID), v)) - }) +// HCLIDContains applies the Contains predicate on the "hcl_id" field. +func HCLIDContains(v string) predicate.Command { + return predicate.Command(sql.FieldContains(FieldHCLID, v)) } -// HclIDHasPrefix applies the HasPrefix predicate on the "hcl_id" field. -func HclIDHasPrefix(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldHclID), v)) - }) +// HCLIDHasPrefix applies the HasPrefix predicate on the "hcl_id" field. +func HCLIDHasPrefix(v string) predicate.Command { + return predicate.Command(sql.FieldHasPrefix(FieldHCLID, v)) } -// HclIDHasSuffix applies the HasSuffix predicate on the "hcl_id" field. -func HclIDHasSuffix(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldHclID), v)) - }) +// HCLIDHasSuffix applies the HasSuffix predicate on the "hcl_id" field. +func HCLIDHasSuffix(v string) predicate.Command { + return predicate.Command(sql.FieldHasSuffix(FieldHCLID, v)) } -// HclIDEqualFold applies the EqualFold predicate on the "hcl_id" field. -func HclIDEqualFold(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldHclID), v)) - }) +// HCLIDEqualFold applies the EqualFold predicate on the "hcl_id" field. +func HCLIDEqualFold(v string) predicate.Command { + return predicate.Command(sql.FieldEqualFold(FieldHCLID, v)) } -// HclIDContainsFold applies the ContainsFold predicate on the "hcl_id" field. -func HclIDContainsFold(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldHclID), v)) - }) +// HCLIDContainsFold applies the ContainsFold predicate on the "hcl_id" field. +func HCLIDContainsFold(v string) predicate.Command { + return predicate.Command(sql.FieldContainsFold(FieldHCLID, v)) } // NameEQ applies the EQ predicate on the "name" field. func NameEQ(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldName), v)) - }) + return predicate.Command(sql.FieldEQ(FieldName, v)) } // NameNEQ applies the NEQ predicate on the "name" field. func NameNEQ(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldName), v)) - }) + return predicate.Command(sql.FieldNEQ(FieldName, v)) } // NameIn applies the In predicate on the "name" field. func NameIn(vs ...string) predicate.Command { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Command(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldName), v...)) - }) + return predicate.Command(sql.FieldIn(FieldName, vs...)) } // NameNotIn applies the NotIn predicate on the "name" field. func NameNotIn(vs ...string) predicate.Command { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Command(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldName), v...)) - }) + return predicate.Command(sql.FieldNotIn(FieldName, vs...)) } // NameGT applies the GT predicate on the "name" field. func NameGT(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldName), v)) - }) + return predicate.Command(sql.FieldGT(FieldName, v)) } // NameGTE applies the GTE predicate on the "name" field. func NameGTE(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldName), v)) - }) + return predicate.Command(sql.FieldGTE(FieldName, v)) } // NameLT applies the LT predicate on the "name" field. func NameLT(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldName), v)) - }) + return predicate.Command(sql.FieldLT(FieldName, v)) } // NameLTE applies the LTE predicate on the "name" field. func NameLTE(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldName), v)) - }) + return predicate.Command(sql.FieldLTE(FieldName, v)) } // NameContains applies the Contains predicate on the "name" field. func NameContains(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldName), v)) - }) + return predicate.Command(sql.FieldContains(FieldName, v)) } // NameHasPrefix applies the HasPrefix predicate on the "name" field. func NameHasPrefix(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldName), v)) - }) + return predicate.Command(sql.FieldHasPrefix(FieldName, v)) } // NameHasSuffix applies the HasSuffix predicate on the "name" field. func NameHasSuffix(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldName), v)) - }) + return predicate.Command(sql.FieldHasSuffix(FieldName, v)) } // NameEqualFold applies the EqualFold predicate on the "name" field. func NameEqualFold(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldName), v)) - }) + return predicate.Command(sql.FieldEqualFold(FieldName, v)) } // NameContainsFold applies the ContainsFold predicate on the "name" field. func NameContainsFold(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldName), v)) - }) + return predicate.Command(sql.FieldContainsFold(FieldName, v)) } // DescriptionEQ applies the EQ predicate on the "description" field. func DescriptionEQ(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldDescription), v)) - }) + return predicate.Command(sql.FieldEQ(FieldDescription, v)) } // DescriptionNEQ applies the NEQ predicate on the "description" field. func DescriptionNEQ(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldDescription), v)) - }) + return predicate.Command(sql.FieldNEQ(FieldDescription, v)) } // DescriptionIn applies the In predicate on the "description" field. func DescriptionIn(vs ...string) predicate.Command { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Command(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldDescription), v...)) - }) + return predicate.Command(sql.FieldIn(FieldDescription, vs...)) } // DescriptionNotIn applies the NotIn predicate on the "description" field. func DescriptionNotIn(vs ...string) predicate.Command { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Command(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldDescription), v...)) - }) + return predicate.Command(sql.FieldNotIn(FieldDescription, vs...)) } // DescriptionGT applies the GT predicate on the "description" field. func DescriptionGT(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldDescription), v)) - }) + return predicate.Command(sql.FieldGT(FieldDescription, v)) } // DescriptionGTE applies the GTE predicate on the "description" field. func DescriptionGTE(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldDescription), v)) - }) + return predicate.Command(sql.FieldGTE(FieldDescription, v)) } // DescriptionLT applies the LT predicate on the "description" field. func DescriptionLT(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldDescription), v)) - }) + return predicate.Command(sql.FieldLT(FieldDescription, v)) } // DescriptionLTE applies the LTE predicate on the "description" field. func DescriptionLTE(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldDescription), v)) - }) + return predicate.Command(sql.FieldLTE(FieldDescription, v)) } // DescriptionContains applies the Contains predicate on the "description" field. func DescriptionContains(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldDescription), v)) - }) + return predicate.Command(sql.FieldContains(FieldDescription, v)) } // DescriptionHasPrefix applies the HasPrefix predicate on the "description" field. func DescriptionHasPrefix(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldDescription), v)) - }) + return predicate.Command(sql.FieldHasPrefix(FieldDescription, v)) } // DescriptionHasSuffix applies the HasSuffix predicate on the "description" field. func DescriptionHasSuffix(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldDescription), v)) - }) + return predicate.Command(sql.FieldHasSuffix(FieldDescription, v)) } // DescriptionEqualFold applies the EqualFold predicate on the "description" field. func DescriptionEqualFold(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldDescription), v)) - }) + return predicate.Command(sql.FieldEqualFold(FieldDescription, v)) } // DescriptionContainsFold applies the ContainsFold predicate on the "description" field. func DescriptionContainsFold(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldDescription), v)) - }) + return predicate.Command(sql.FieldContainsFold(FieldDescription, v)) } // ProgramEQ applies the EQ predicate on the "program" field. func ProgramEQ(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldProgram), v)) - }) + return predicate.Command(sql.FieldEQ(FieldProgram, v)) } // ProgramNEQ applies the NEQ predicate on the "program" field. func ProgramNEQ(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldProgram), v)) - }) + return predicate.Command(sql.FieldNEQ(FieldProgram, v)) } // ProgramIn applies the In predicate on the "program" field. func ProgramIn(vs ...string) predicate.Command { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Command(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldProgram), v...)) - }) + return predicate.Command(sql.FieldIn(FieldProgram, vs...)) } // ProgramNotIn applies the NotIn predicate on the "program" field. func ProgramNotIn(vs ...string) predicate.Command { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Command(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldProgram), v...)) - }) + return predicate.Command(sql.FieldNotIn(FieldProgram, vs...)) } // ProgramGT applies the GT predicate on the "program" field. func ProgramGT(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldProgram), v)) - }) + return predicate.Command(sql.FieldGT(FieldProgram, v)) } // ProgramGTE applies the GTE predicate on the "program" field. func ProgramGTE(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldProgram), v)) - }) + return predicate.Command(sql.FieldGTE(FieldProgram, v)) } // ProgramLT applies the LT predicate on the "program" field. func ProgramLT(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldProgram), v)) - }) + return predicate.Command(sql.FieldLT(FieldProgram, v)) } // ProgramLTE applies the LTE predicate on the "program" field. func ProgramLTE(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldProgram), v)) - }) + return predicate.Command(sql.FieldLTE(FieldProgram, v)) } // ProgramContains applies the Contains predicate on the "program" field. func ProgramContains(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldProgram), v)) - }) + return predicate.Command(sql.FieldContains(FieldProgram, v)) } // ProgramHasPrefix applies the HasPrefix predicate on the "program" field. func ProgramHasPrefix(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldProgram), v)) - }) + return predicate.Command(sql.FieldHasPrefix(FieldProgram, v)) } // ProgramHasSuffix applies the HasSuffix predicate on the "program" field. func ProgramHasSuffix(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldProgram), v)) - }) + return predicate.Command(sql.FieldHasSuffix(FieldProgram, v)) } // ProgramEqualFold applies the EqualFold predicate on the "program" field. func ProgramEqualFold(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldProgram), v)) - }) + return predicate.Command(sql.FieldEqualFold(FieldProgram, v)) } // ProgramContainsFold applies the ContainsFold predicate on the "program" field. func ProgramContainsFold(v string) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldProgram), v)) - }) + return predicate.Command(sql.FieldContainsFold(FieldProgram, v)) } // IgnoreErrorsEQ applies the EQ predicate on the "ignore_errors" field. func IgnoreErrorsEQ(v bool) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldIgnoreErrors), v)) - }) + return predicate.Command(sql.FieldEQ(FieldIgnoreErrors, v)) } // IgnoreErrorsNEQ applies the NEQ predicate on the "ignore_errors" field. func IgnoreErrorsNEQ(v bool) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldIgnoreErrors), v)) - }) + return predicate.Command(sql.FieldNEQ(FieldIgnoreErrors, v)) } // DisabledEQ applies the EQ predicate on the "disabled" field. func DisabledEQ(v bool) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldDisabled), v)) - }) + return predicate.Command(sql.FieldEQ(FieldDisabled, v)) } // DisabledNEQ applies the NEQ predicate on the "disabled" field. func DisabledNEQ(v bool) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldDisabled), v)) - }) + return predicate.Command(sql.FieldNEQ(FieldDisabled, v)) } // CooldownEQ applies the EQ predicate on the "cooldown" field. func CooldownEQ(v int) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCooldown), v)) - }) + return predicate.Command(sql.FieldEQ(FieldCooldown, v)) } // CooldownNEQ applies the NEQ predicate on the "cooldown" field. func CooldownNEQ(v int) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldCooldown), v)) - }) + return predicate.Command(sql.FieldNEQ(FieldCooldown, v)) } // CooldownIn applies the In predicate on the "cooldown" field. func CooldownIn(vs ...int) predicate.Command { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Command(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldCooldown), v...)) - }) + return predicate.Command(sql.FieldIn(FieldCooldown, vs...)) } // CooldownNotIn applies the NotIn predicate on the "cooldown" field. func CooldownNotIn(vs ...int) predicate.Command { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Command(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldCooldown), v...)) - }) + return predicate.Command(sql.FieldNotIn(FieldCooldown, vs...)) } // CooldownGT applies the GT predicate on the "cooldown" field. func CooldownGT(v int) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldCooldown), v)) - }) + return predicate.Command(sql.FieldGT(FieldCooldown, v)) } // CooldownGTE applies the GTE predicate on the "cooldown" field. func CooldownGTE(v int) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldCooldown), v)) - }) + return predicate.Command(sql.FieldGTE(FieldCooldown, v)) } // CooldownLT applies the LT predicate on the "cooldown" field. func CooldownLT(v int) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldCooldown), v)) - }) + return predicate.Command(sql.FieldLT(FieldCooldown, v)) } // CooldownLTE applies the LTE predicate on the "cooldown" field. func CooldownLTE(v int) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldCooldown), v)) - }) + return predicate.Command(sql.FieldLTE(FieldCooldown, v)) } // TimeoutEQ applies the EQ predicate on the "timeout" field. func TimeoutEQ(v int) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldTimeout), v)) - }) + return predicate.Command(sql.FieldEQ(FieldTimeout, v)) } // TimeoutNEQ applies the NEQ predicate on the "timeout" field. func TimeoutNEQ(v int) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldTimeout), v)) - }) + return predicate.Command(sql.FieldNEQ(FieldTimeout, v)) } // TimeoutIn applies the In predicate on the "timeout" field. func TimeoutIn(vs ...int) predicate.Command { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Command(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldTimeout), v...)) - }) + return predicate.Command(sql.FieldIn(FieldTimeout, vs...)) } // TimeoutNotIn applies the NotIn predicate on the "timeout" field. func TimeoutNotIn(vs ...int) predicate.Command { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Command(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldTimeout), v...)) - }) + return predicate.Command(sql.FieldNotIn(FieldTimeout, vs...)) } // TimeoutGT applies the GT predicate on the "timeout" field. func TimeoutGT(v int) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldTimeout), v)) - }) + return predicate.Command(sql.FieldGT(FieldTimeout, v)) } // TimeoutGTE applies the GTE predicate on the "timeout" field. func TimeoutGTE(v int) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldTimeout), v)) - }) + return predicate.Command(sql.FieldGTE(FieldTimeout, v)) } // TimeoutLT applies the LT predicate on the "timeout" field. func TimeoutLT(v int) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldTimeout), v)) - }) + return predicate.Command(sql.FieldLT(FieldTimeout, v)) } // TimeoutLTE applies the LTE predicate on the "timeout" field. func TimeoutLTE(v int) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldTimeout), v)) - }) + return predicate.Command(sql.FieldLTE(FieldTimeout, v)) } // HasCommandToUser applies the HasEdge predicate on the "CommandToUser" edge. @@ -777,7 +459,6 @@ func HasCommandToUser() predicate.Command { return predicate.Command(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(CommandToUserTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, false, CommandToUserTable, CommandToUserColumn), ) sqlgraph.HasNeighbors(s, step) @@ -787,11 +468,7 @@ func HasCommandToUser() predicate.Command { // HasCommandToUserWith applies the HasEdge predicate on the "CommandToUser" edge with a given conditions (other predicates). func HasCommandToUserWith(preds ...predicate.User) predicate.Command { return predicate.Command(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(CommandToUserInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, CommandToUserTable, CommandToUserColumn), - ) + step := newCommandToUserStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -805,7 +482,6 @@ func HasCommandToEnvironment() predicate.Command { return predicate.Command(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(CommandToEnvironmentTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, true, CommandToEnvironmentTable, CommandToEnvironmentColumn), ) sqlgraph.HasNeighbors(s, step) @@ -815,11 +491,7 @@ func HasCommandToEnvironment() predicate.Command { // HasCommandToEnvironmentWith applies the HasEdge predicate on the "CommandToEnvironment" edge with a given conditions (other predicates). func HasCommandToEnvironmentWith(preds ...predicate.Environment) predicate.Command { return predicate.Command(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(CommandToEnvironmentInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, CommandToEnvironmentTable, CommandToEnvironmentColumn), - ) + step := newCommandToEnvironmentStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -830,32 +502,15 @@ func HasCommandToEnvironmentWith(preds ...predicate.Environment) predicate.Comma // And groups predicates with the AND operator between them. func And(predicates ...predicate.Command) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Command(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.Command) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Command(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.Command) predicate.Command { - return predicate.Command(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.Command(sql.NotPredicates(p)) } diff --git a/ent/command_create.go b/ent/command_create.go index 57d499c4..6bc26146 100755 --- a/ent/command_create.go +++ b/ent/command_create.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -22,9 +22,9 @@ type CommandCreate struct { hooks []Hook } -// SetHclID sets the "hcl_id" field. -func (cc *CommandCreate) SetHclID(s string) *CommandCreate { - cc.mutation.SetHclID(s) +// SetHCLID sets the "hcl_id" field. +func (cc *CommandCreate) SetHCLID(s string) *CommandCreate { + cc.mutation.SetHCLID(s) return cc } @@ -143,44 +143,8 @@ func (cc *CommandCreate) Mutation() *CommandMutation { // Save creates the Command in the database. func (cc *CommandCreate) Save(ctx context.Context) (*Command, error) { - var ( - err error - node *Command - ) cc.defaults() - if len(cc.hooks) == 0 { - if err = cc.check(); err != nil { - return nil, err - } - node, err = cc.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*CommandMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = cc.check(); err != nil { - return nil, err - } - cc.mutation = mutation - if node, err = cc.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(cc.hooks) - 1; i >= 0; i-- { - if cc.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = cc.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, cc.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, cc.sqlSave, cc.mutation, cc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -215,7 +179,7 @@ func (cc *CommandCreate) defaults() { // check runs all checks and user-defined validators on the builder. func (cc *CommandCreate) check() error { - if _, ok := cc.mutation.HclID(); !ok { + if _, ok := cc.mutation.HCLID(); !ok { return &ValidationError{Name: "hcl_id", err: errors.New(`ent: missing required field "Command.hcl_id"`)} } if _, ok := cc.mutation.Name(); !ok { @@ -262,10 +226,13 @@ func (cc *CommandCreate) check() error { } func (cc *CommandCreate) sqlSave(ctx context.Context) (*Command, error) { + if err := cc.check(); err != nil { + return nil, err + } _node, _spec := cc.createSpec() if err := sqlgraph.CreateNode(ctx, cc.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } @@ -276,110 +243,62 @@ func (cc *CommandCreate) sqlSave(ctx context.Context) (*Command, error) { return nil, err } } + cc.mutation.id = &_node.ID + cc.mutation.done = true return _node, nil } func (cc *CommandCreate) createSpec() (*Command, *sqlgraph.CreateSpec) { var ( _node = &Command{config: cc.config} - _spec = &sqlgraph.CreateSpec{ - Table: command.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: command.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(command.Table, sqlgraph.NewFieldSpec(command.FieldID, field.TypeUUID)) ) if id, ok := cc.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } - if value, ok := cc.mutation.HclID(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: command.FieldHclID, - }) - _node.HclID = value + if value, ok := cc.mutation.HCLID(); ok { + _spec.SetField(command.FieldHCLID, field.TypeString, value) + _node.HCLID = value } if value, ok := cc.mutation.Name(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: command.FieldName, - }) + _spec.SetField(command.FieldName, field.TypeString, value) _node.Name = value } if value, ok := cc.mutation.Description(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: command.FieldDescription, - }) + _spec.SetField(command.FieldDescription, field.TypeString, value) _node.Description = value } if value, ok := cc.mutation.Program(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: command.FieldProgram, - }) + _spec.SetField(command.FieldProgram, field.TypeString, value) _node.Program = value } if value, ok := cc.mutation.Args(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: command.FieldArgs, - }) + _spec.SetField(command.FieldArgs, field.TypeJSON, value) _node.Args = value } if value, ok := cc.mutation.IgnoreErrors(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: command.FieldIgnoreErrors, - }) + _spec.SetField(command.FieldIgnoreErrors, field.TypeBool, value) _node.IgnoreErrors = value } if value, ok := cc.mutation.Disabled(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: command.FieldDisabled, - }) + _spec.SetField(command.FieldDisabled, field.TypeBool, value) _node.Disabled = value } if value, ok := cc.mutation.Cooldown(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: command.FieldCooldown, - }) + _spec.SetField(command.FieldCooldown, field.TypeInt, value) _node.Cooldown = value } if value, ok := cc.mutation.Timeout(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: command.FieldTimeout, - }) + _spec.SetField(command.FieldTimeout, field.TypeInt, value) _node.Timeout = value } if value, ok := cc.mutation.Vars(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: command.FieldVars, - }) + _spec.SetField(command.FieldVars, field.TypeJSON, value) _node.Vars = value } if value, ok := cc.mutation.Tags(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: command.FieldTags, - }) + _spec.SetField(command.FieldTags, field.TypeJSON, value) _node.Tags = value } if nodes := cc.mutation.CommandToUserIDs(); len(nodes) > 0 { @@ -390,10 +309,7 @@ func (cc *CommandCreate) createSpec() (*Command, *sqlgraph.CreateSpec) { Columns: []string{command.CommandToUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -409,10 +325,7 @@ func (cc *CommandCreate) createSpec() (*Command, *sqlgraph.CreateSpec) { Columns: []string{command.CommandToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -427,11 +340,15 @@ func (cc *CommandCreate) createSpec() (*Command, *sqlgraph.CreateSpec) { // CommandCreateBulk is the builder for creating many Command entities in bulk. type CommandCreateBulk struct { config + err error builders []*CommandCreate } // Save creates the Command entities in the database. func (ccb *CommandCreateBulk) Save(ctx context.Context) ([]*Command, error) { + if ccb.err != nil { + return nil, ccb.err + } specs := make([]*sqlgraph.CreateSpec, len(ccb.builders)) nodes := make([]*Command, len(ccb.builders)) mutators := make([]Mutator, len(ccb.builders)) @@ -448,8 +365,8 @@ func (ccb *CommandCreateBulk) Save(ctx context.Context) ([]*Command, error) { return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, ccb.builders[i+1].mutation) } else { @@ -457,7 +374,7 @@ func (ccb *CommandCreateBulk) Save(ctx context.Context) ([]*Command, error) { // Invoke the actual operation on the latest mutation in the chain. if err = sqlgraph.BatchCreate(ctx, ccb.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } } } diff --git a/ent/command_delete.go b/ent/command_delete.go index 33064e05..80ff0176 100755 --- a/ent/command_delete.go +++ b/ent/command_delete.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (cd *CommandDelete) Where(ps ...predicate.Command) *CommandDelete { // Exec executes the deletion query and returns how many vertices were deleted. func (cd *CommandDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(cd.hooks) == 0 { - affected, err = cd.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*CommandMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - cd.mutation = mutation - affected, err = cd.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(cd.hooks) - 1; i >= 0; i-- { - if cd.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = cd.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, cd.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, cd.sqlExec, cd.mutation, cd.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (cd *CommandDelete) ExecX(ctx context.Context) int { } func (cd *CommandDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: command.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: command.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(command.Table, sqlgraph.NewFieldSpec(command.FieldID, field.TypeUUID)) if ps := cd.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -84,7 +48,12 @@ func (cd *CommandDelete) sqlExec(ctx context.Context) (int, error) { } } } - return sqlgraph.DeleteNodes(ctx, cd.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, cd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + cd.mutation.done = true + return affected, err } // CommandDeleteOne is the builder for deleting a single Command entity. @@ -92,6 +61,12 @@ type CommandDeleteOne struct { cd *CommandDelete } +// Where appends a list predicates to the CommandDelete builder. +func (cdo *CommandDeleteOne) Where(ps ...predicate.Command) *CommandDeleteOne { + cdo.cd.mutation.Where(ps...) + return cdo +} + // Exec executes the deletion query. func (cdo *CommandDeleteOne) Exec(ctx context.Context) error { n, err := cdo.cd.Exec(ctx) @@ -107,5 +82,7 @@ func (cdo *CommandDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (cdo *CommandDeleteOne) ExecX(ctx context.Context) { - cdo.cd.ExecX(ctx) + if err := cdo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/ent/command_query.go b/ent/command_query.go index 894c6b77..2d74c32f 100755 --- a/ent/command_query.go +++ b/ent/command_query.go @@ -1,11 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" "database/sql/driver" - "errors" "fmt" "math" @@ -22,16 +21,16 @@ import ( // CommandQuery is the builder for querying Command entities. type CommandQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string - predicates []predicate.Command - // eager-loading edges. + ctx *QueryContext + order []command.OrderOption + inters []Interceptor + predicates []predicate.Command withCommandToUser *UserQuery withCommandToEnvironment *EnvironmentQuery withFKs bool + modifiers []func(*sql.Selector) + loadTotal []func(context.Context, []*Command) error + withNamedCommandToUser map[string]*UserQuery // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -43,34 +42,34 @@ func (cq *CommandQuery) Where(ps ...predicate.Command) *CommandQuery { return cq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (cq *CommandQuery) Limit(limit int) *CommandQuery { - cq.limit = &limit + cq.ctx.Limit = &limit return cq } -// Offset adds an offset step to the query. +// Offset to start from. func (cq *CommandQuery) Offset(offset int) *CommandQuery { - cq.offset = &offset + cq.ctx.Offset = &offset return cq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (cq *CommandQuery) Unique(unique bool) *CommandQuery { - cq.unique = &unique + cq.ctx.Unique = &unique return cq } -// Order adds an order step to the query. -func (cq *CommandQuery) Order(o ...OrderFunc) *CommandQuery { +// Order specifies how the records should be ordered. +func (cq *CommandQuery) Order(o ...command.OrderOption) *CommandQuery { cq.order = append(cq.order, o...) return cq } // QueryCommandToUser chains the current query on the "CommandToUser" edge. func (cq *CommandQuery) QueryCommandToUser() *UserQuery { - query := &UserQuery{config: cq.config} + query := (&UserClient{config: cq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := cq.prepareQuery(ctx); err != nil { return nil, err @@ -92,7 +91,7 @@ func (cq *CommandQuery) QueryCommandToUser() *UserQuery { // QueryCommandToEnvironment chains the current query on the "CommandToEnvironment" edge. func (cq *CommandQuery) QueryCommandToEnvironment() *EnvironmentQuery { - query := &EnvironmentQuery{config: cq.config} + query := (&EnvironmentClient{config: cq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := cq.prepareQuery(ctx); err != nil { return nil, err @@ -115,7 +114,7 @@ func (cq *CommandQuery) QueryCommandToEnvironment() *EnvironmentQuery { // First returns the first Command entity from the query. // Returns a *NotFoundError when no Command was found. func (cq *CommandQuery) First(ctx context.Context) (*Command, error) { - nodes, err := cq.Limit(1).All(ctx) + nodes, err := cq.Limit(1).All(setContextOp(ctx, cq.ctx, "First")) if err != nil { return nil, err } @@ -138,7 +137,7 @@ func (cq *CommandQuery) FirstX(ctx context.Context) *Command { // Returns a *NotFoundError when no Command ID was found. func (cq *CommandQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = cq.Limit(1).IDs(ctx); err != nil { + if ids, err = cq.Limit(1).IDs(setContextOp(ctx, cq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -161,7 +160,7 @@ func (cq *CommandQuery) FirstIDX(ctx context.Context) uuid.UUID { // Returns a *NotSingularError when more than one Command entity is found. // Returns a *NotFoundError when no Command entities are found. func (cq *CommandQuery) Only(ctx context.Context) (*Command, error) { - nodes, err := cq.Limit(2).All(ctx) + nodes, err := cq.Limit(2).All(setContextOp(ctx, cq.ctx, "Only")) if err != nil { return nil, err } @@ -189,7 +188,7 @@ func (cq *CommandQuery) OnlyX(ctx context.Context) *Command { // Returns a *NotFoundError when no entities are found. func (cq *CommandQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = cq.Limit(2).IDs(ctx); err != nil { + if ids, err = cq.Limit(2).IDs(setContextOp(ctx, cq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -214,10 +213,12 @@ func (cq *CommandQuery) OnlyIDX(ctx context.Context) uuid.UUID { // All executes the query and returns a list of Commands. func (cq *CommandQuery) All(ctx context.Context) ([]*Command, error) { + ctx = setContextOp(ctx, cq.ctx, "All") if err := cq.prepareQuery(ctx); err != nil { return nil, err } - return cq.sqlAll(ctx) + qr := querierAll[[]*Command, *CommandQuery]() + return withInterceptors[[]*Command](ctx, cq, qr, cq.inters) } // AllX is like All, but panics if an error occurs. @@ -230,9 +231,12 @@ func (cq *CommandQuery) AllX(ctx context.Context) []*Command { } // IDs executes the query and returns a list of Command IDs. -func (cq *CommandQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { - var ids []uuid.UUID - if err := cq.Select(command.FieldID).Scan(ctx, &ids); err != nil { +func (cq *CommandQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if cq.ctx.Unique == nil && cq.path != nil { + cq.Unique(true) + } + ctx = setContextOp(ctx, cq.ctx, "IDs") + if err = cq.Select(command.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -249,10 +253,11 @@ func (cq *CommandQuery) IDsX(ctx context.Context) []uuid.UUID { // Count returns the count of the given query. func (cq *CommandQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, cq.ctx, "Count") if err := cq.prepareQuery(ctx); err != nil { return 0, err } - return cq.sqlCount(ctx) + return withInterceptors[int](ctx, cq, querierCount[*CommandQuery](), cq.inters) } // CountX is like Count, but panics if an error occurs. @@ -266,10 +271,15 @@ func (cq *CommandQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (cq *CommandQuery) Exist(ctx context.Context) (bool, error) { - if err := cq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, cq.ctx, "Exist") + switch _, err := cq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return cq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -289,23 +299,22 @@ func (cq *CommandQuery) Clone() *CommandQuery { } return &CommandQuery{ config: cq.config, - limit: cq.limit, - offset: cq.offset, - order: append([]OrderFunc{}, cq.order...), + ctx: cq.ctx.Clone(), + order: append([]command.OrderOption{}, cq.order...), + inters: append([]Interceptor{}, cq.inters...), predicates: append([]predicate.Command{}, cq.predicates...), withCommandToUser: cq.withCommandToUser.Clone(), withCommandToEnvironment: cq.withCommandToEnvironment.Clone(), // clone intermediate query. - sql: cq.sql.Clone(), - path: cq.path, - unique: cq.unique, + sql: cq.sql.Clone(), + path: cq.path, } } // WithCommandToUser tells the query-builder to eager-load the nodes that are connected to // the "CommandToUser" edge. The optional arguments are used to configure the query builder of the edge. func (cq *CommandQuery) WithCommandToUser(opts ...func(*UserQuery)) *CommandQuery { - query := &UserQuery{config: cq.config} + query := (&UserClient{config: cq.config}).Query() for _, opt := range opts { opt(query) } @@ -316,7 +325,7 @@ func (cq *CommandQuery) WithCommandToUser(opts ...func(*UserQuery)) *CommandQuer // WithCommandToEnvironment tells the query-builder to eager-load the nodes that are connected to // the "CommandToEnvironment" edge. The optional arguments are used to configure the query builder of the edge. func (cq *CommandQuery) WithCommandToEnvironment(opts ...func(*EnvironmentQuery)) *CommandQuery { - query := &EnvironmentQuery{config: cq.config} + query := (&EnvironmentClient{config: cq.config}).Query() for _, opt := range opts { opt(query) } @@ -330,25 +339,21 @@ func (cq *CommandQuery) WithCommandToEnvironment(opts ...func(*EnvironmentQuery) // Example: // // var v []struct { -// HclID string `json:"hcl_id,omitempty" hcl:"id,label"` +// HCLID string `json:"hcl_id,omitempty" hcl:"id,label"` // Count int `json:"count,omitempty"` // } // // client.Command.Query(). -// GroupBy(command.FieldHclID). +// GroupBy(command.FieldHCLID). // Aggregate(ent.Count()). // Scan(ctx, &v) -// func (cq *CommandQuery) GroupBy(field string, fields ...string) *CommandGroupBy { - group := &CommandGroupBy{config: cq.config} - group.fields = append([]string{field}, fields...) - group.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := cq.prepareQuery(ctx); err != nil { - return nil, err - } - return cq.sqlQuery(ctx), nil - } - return group + cq.ctx.Fields = append([]string{field}, fields...) + grbuild := &CommandGroupBy{build: cq} + grbuild.flds = &cq.ctx.Fields + grbuild.label = command.Label + grbuild.scan = grbuild.Scan + return grbuild } // Select allows the selection one or more fields/columns for the given query, @@ -357,20 +362,37 @@ func (cq *CommandQuery) GroupBy(field string, fields ...string) *CommandGroupBy // Example: // // var v []struct { -// HclID string `json:"hcl_id,omitempty" hcl:"id,label"` +// HCLID string `json:"hcl_id,omitempty" hcl:"id,label"` // } // // client.Command.Query(). -// Select(command.FieldHclID). +// Select(command.FieldHCLID). // Scan(ctx, &v) -// func (cq *CommandQuery) Select(fields ...string) *CommandSelect { - cq.fields = append(cq.fields, fields...) - return &CommandSelect{CommandQuery: cq} + cq.ctx.Fields = append(cq.ctx.Fields, fields...) + sbuild := &CommandSelect{CommandQuery: cq} + sbuild.label = command.Label + sbuild.flds, sbuild.scan = &cq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a CommandSelect configured with the given aggregations. +func (cq *CommandQuery) Aggregate(fns ...AggregateFunc) *CommandSelect { + return cq.Select().Aggregate(fns...) } func (cq *CommandQuery) prepareQuery(ctx context.Context) error { - for _, f := range cq.fields { + for _, inter := range cq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, cq); err != nil { + return err + } + } + } + for _, f := range cq.ctx.Fields { if !command.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -385,7 +407,7 @@ func (cq *CommandQuery) prepareQuery(ctx context.Context) error { return nil } -func (cq *CommandQuery) sqlAll(ctx context.Context) ([]*Command, error) { +func (cq *CommandQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Command, error) { var ( nodes = []*Command{} withFKs = cq.withFKs @@ -401,121 +423,140 @@ func (cq *CommandQuery) sqlAll(ctx context.Context) ([]*Command, error) { if withFKs { _spec.Node.Columns = append(_spec.Node.Columns, command.ForeignKeys...) } - _spec.ScanValues = func(columns []string) ([]interface{}, error) { + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Command).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { node := &Command{config: cq.config} nodes = append(nodes, node) - return node.scanValues(columns) - } - _spec.Assign = func(columns []string, values []interface{}) error { - if len(nodes) == 0 { - return fmt.Errorf("ent: Assign called without calling ScanValues") - } - node := nodes[len(nodes)-1] node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(cq.modifiers) > 0 { + _spec.Modifiers = cq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } if err := sqlgraph.QueryNodes(ctx, cq.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := cq.withCommandToUser; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*Command) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.CommandToUser = []*User{} - } - query.withFKs = true - query.Where(predicate.User(func(s *sql.Selector) { - s.Where(sql.InValues(command.CommandToUserColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { + if err := cq.loadCommandToUser(ctx, query, nodes, + func(n *Command) { n.Edges.CommandToUser = []*User{} }, + func(n *Command, e *User) { n.Edges.CommandToUser = append(n.Edges.CommandToUser, e) }); err != nil { return nil, err } - for _, n := range neighbors { - fk := n.command_command_to_user - if fk == nil { - return nil, fmt.Errorf(`foreign-key "command_command_to_user" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "command_command_to_user" returned %v for node %v`, *fk, n.ID) - } - node.Edges.CommandToUser = append(node.Edges.CommandToUser, n) - } } - if query := cq.withCommandToEnvironment; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*Command) - for i := range nodes { - if nodes[i].environment_environment_to_command == nil { - continue - } - fk := *nodes[i].environment_environment_to_command - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + if err := cq.loadCommandToEnvironment(ctx, query, nodes, nil, + func(n *Command, e *Environment) { n.Edges.CommandToEnvironment = e }); err != nil { + return nil, err } - query.Where(environment.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + } + for name, query := range cq.withNamedCommandToUser { + if err := cq.loadCommandToUser(ctx, query, nodes, + func(n *Command) { n.appendNamedCommandToUser(name) }, + func(n *Command, e *User) { n.appendNamedCommandToUser(name, e) }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "environment_environment_to_command" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.CommandToEnvironment = n - } + } + for i := range cq.loadTotal { + if err := cq.loadTotal[i](ctx, nodes); err != nil { + return nil, err } } - return nodes, nil } -func (cq *CommandQuery) sqlCount(ctx context.Context) (int, error) { - _spec := cq.querySpec() - _spec.Node.Columns = cq.fields - if len(cq.fields) > 0 { - _spec.Unique = cq.unique != nil && *cq.unique +func (cq *CommandQuery) loadCommandToUser(ctx context.Context, query *UserQuery, nodes []*Command, init func(*Command), assign func(*Command, *User)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Command) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } } - return sqlgraph.CountNodes(ctx, cq.driver, _spec) + query.withFKs = true + query.Where(predicate.User(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(command.CommandToUserColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.command_command_to_user + if fk == nil { + return fmt.Errorf(`foreign-key "command_command_to_user" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "command_command_to_user" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil } - -func (cq *CommandQuery) sqlExist(ctx context.Context) (bool, error) { - n, err := cq.sqlCount(ctx) +func (cq *CommandQuery) loadCommandToEnvironment(ctx context.Context, query *EnvironmentQuery, nodes []*Command, init func(*Command), assign func(*Command, *Environment)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*Command) + for i := range nodes { + if nodes[i].environment_environment_to_command == nil { + continue + } + fk := *nodes[i].environment_environment_to_command + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(environment.IDIn(ids...)) + neighbors, err := query.All(ctx) if err != nil { - return false, fmt.Errorf("ent: check existence: %w", err) + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "environment_environment_to_command" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } } - return n > 0, nil + return nil +} + +func (cq *CommandQuery) sqlCount(ctx context.Context) (int, error) { + _spec := cq.querySpec() + if len(cq.modifiers) > 0 { + _spec.Modifiers = cq.modifiers + } + _spec.Node.Columns = cq.ctx.Fields + if len(cq.ctx.Fields) > 0 { + _spec.Unique = cq.ctx.Unique != nil && *cq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, cq.driver, _spec) } func (cq *CommandQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: command.Table, - Columns: command.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: command.FieldID, - }, - }, - From: cq.sql, - Unique: true, - } - if unique := cq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(command.Table, command.Columns, sqlgraph.NewFieldSpec(command.FieldID, field.TypeUUID)) + _spec.From = cq.sql + if unique := cq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if cq.path != nil { + _spec.Unique = true } - if fields := cq.fields; len(fields) > 0 { + if fields := cq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, command.FieldID) for i := range fields { @@ -531,10 +572,10 @@ func (cq *CommandQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := cq.limit; limit != nil { + if limit := cq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := cq.offset; offset != nil { + if offset := cq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := cq.order; len(ps) > 0 { @@ -550,7 +591,7 @@ func (cq *CommandQuery) querySpec() *sqlgraph.QuerySpec { func (cq *CommandQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(cq.driver.Dialect()) t1 := builder.Table(command.Table) - columns := cq.fields + columns := cq.ctx.Fields if len(columns) == 0 { columns = command.Columns } @@ -559,7 +600,7 @@ func (cq *CommandQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = cq.sql selector.Select(selector.Columns(columns...)...) } - if cq.unique != nil && *cq.unique { + if cq.ctx.Unique != nil && *cq.ctx.Unique { selector.Distinct() } for _, p := range cq.predicates { @@ -568,25 +609,35 @@ func (cq *CommandQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range cq.order { p(selector) } - if offset := cq.offset; offset != nil { + if offset := cq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := cq.limit; limit != nil { + if limit := cq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector } +// WithNamedCommandToUser tells the query-builder to eager-load the nodes that are connected to the "CommandToUser" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (cq *CommandQuery) WithNamedCommandToUser(name string, opts ...func(*UserQuery)) *CommandQuery { + query := (&UserClient{config: cq.config}).Query() + for _, opt := range opts { + opt(query) + } + if cq.withNamedCommandToUser == nil { + cq.withNamedCommandToUser = make(map[string]*UserQuery) + } + cq.withNamedCommandToUser[name] = query + return cq +} + // CommandGroupBy is the group-by builder for Command entities. type CommandGroupBy struct { - config - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) + selector + build *CommandQuery } // Aggregate adds the given aggregation functions to the group-by query. @@ -595,471 +646,77 @@ func (cgb *CommandGroupBy) Aggregate(fns ...AggregateFunc) *CommandGroupBy { return cgb } -// Scan applies the group-by query and scans the result into the given value. -func (cgb *CommandGroupBy) Scan(ctx context.Context, v interface{}) error { - query, err := cgb.path(ctx) - if err != nil { +// Scan applies the selector query and scans the result into the given value. +func (cgb *CommandGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, cgb.build.ctx, "GroupBy") + if err := cgb.build.prepareQuery(ctx); err != nil { return err } - cgb.sql = query - return cgb.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (cgb *CommandGroupBy) ScanX(ctx context.Context, v interface{}) { - if err := cgb.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from group-by. -// It is only allowed when executing a group-by query with one field. -func (cgb *CommandGroupBy) Strings(ctx context.Context) ([]string, error) { - if len(cgb.fields) > 1 { - return nil, errors.New("ent: CommandGroupBy.Strings is not achievable when grouping more than 1 field") - } - var v []string - if err := cgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (cgb *CommandGroupBy) StringsX(ctx context.Context) []string { - v, err := cgb.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (cgb *CommandGroupBy) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = cgb.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{command.Label} - default: - err = fmt.Errorf("ent: CommandGroupBy.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (cgb *CommandGroupBy) StringX(ctx context.Context) string { - v, err := cgb.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from group-by. -// It is only allowed when executing a group-by query with one field. -func (cgb *CommandGroupBy) Ints(ctx context.Context) ([]int, error) { - if len(cgb.fields) > 1 { - return nil, errors.New("ent: CommandGroupBy.Ints is not achievable when grouping more than 1 field") - } - var v []int - if err := cgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (cgb *CommandGroupBy) IntsX(ctx context.Context) []int { - v, err := cgb.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (cgb *CommandGroupBy) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = cgb.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{command.Label} - default: - err = fmt.Errorf("ent: CommandGroupBy.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (cgb *CommandGroupBy) IntX(ctx context.Context) int { - v, err := cgb.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from group-by. -// It is only allowed when executing a group-by query with one field. -func (cgb *CommandGroupBy) Float64s(ctx context.Context) ([]float64, error) { - if len(cgb.fields) > 1 { - return nil, errors.New("ent: CommandGroupBy.Float64s is not achievable when grouping more than 1 field") - } - var v []float64 - if err := cgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (cgb *CommandGroupBy) Float64sX(ctx context.Context) []float64 { - v, err := cgb.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (cgb *CommandGroupBy) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = cgb.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{command.Label} - default: - err = fmt.Errorf("ent: CommandGroupBy.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (cgb *CommandGroupBy) Float64X(ctx context.Context) float64 { - v, err := cgb.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from group-by. -// It is only allowed when executing a group-by query with one field. -func (cgb *CommandGroupBy) Bools(ctx context.Context) ([]bool, error) { - if len(cgb.fields) > 1 { - return nil, errors.New("ent: CommandGroupBy.Bools is not achievable when grouping more than 1 field") - } - var v []bool - if err := cgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil + return scanWithInterceptors[*CommandQuery, *CommandGroupBy](ctx, cgb.build, cgb, cgb.build.inters, v) } -// BoolsX is like Bools, but panics if an error occurs. -func (cgb *CommandGroupBy) BoolsX(ctx context.Context) []bool { - v, err := cgb.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (cgb *CommandGroupBy) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = cgb.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{command.Label} - default: - err = fmt.Errorf("ent: CommandGroupBy.Bools returned %d results when one was expected", len(v)) - } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (cgb *CommandGroupBy) BoolX(ctx context.Context) bool { - v, err := cgb.Bool(ctx) - if err != nil { - panic(err) +func (cgb *CommandGroupBy) sqlScan(ctx context.Context, root *CommandQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(cgb.fns)) + for _, fn := range cgb.fns { + aggregation = append(aggregation, fn(selector)) } - return v -} - -func (cgb *CommandGroupBy) sqlScan(ctx context.Context, v interface{}) error { - for _, f := range cgb.fields { - if !command.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*cgb.flds)+len(cgb.fns)) + for _, f := range *cgb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := cgb.sqlQuery() + selector.GroupBy(selector.Columns(*cgb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := cgb.driver.Query(ctx, query, args, rows); err != nil { + if err := cgb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (cgb *CommandGroupBy) sqlQuery() *sql.Selector { - selector := cgb.sql.Select() - aggregation := make([]string, 0, len(cgb.fns)) - for _, fn := range cgb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(cgb.fields)+len(cgb.fns)) - for _, f := range cgb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(cgb.fields...)...) -} - // CommandSelect is the builder for selecting fields of Command entities. type CommandSelect struct { *CommandQuery - // intermediate query (i.e. traversal path). - sql *sql.Selector + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (cs *CommandSelect) Aggregate(fns ...AggregateFunc) *CommandSelect { + cs.fns = append(cs.fns, fns...) + return cs } // Scan applies the selector query and scans the result into the given value. -func (cs *CommandSelect) Scan(ctx context.Context, v interface{}) error { +func (cs *CommandSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, cs.ctx, "Select") if err := cs.prepareQuery(ctx); err != nil { return err } - cs.sql = cs.CommandQuery.sqlQuery(ctx) - return cs.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (cs *CommandSelect) ScanX(ctx context.Context, v interface{}) { - if err := cs.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from a selector. It is only allowed when selecting one field. -func (cs *CommandSelect) Strings(ctx context.Context) ([]string, error) { - if len(cs.fields) > 1 { - return nil, errors.New("ent: CommandSelect.Strings is not achievable when selecting more than 1 field") - } - var v []string - if err := cs.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (cs *CommandSelect) StringsX(ctx context.Context) []string { - v, err := cs.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a selector. It is only allowed when selecting one field. -func (cs *CommandSelect) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = cs.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{command.Label} - default: - err = fmt.Errorf("ent: CommandSelect.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (cs *CommandSelect) StringX(ctx context.Context) string { - v, err := cs.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from a selector. It is only allowed when selecting one field. -func (cs *CommandSelect) Ints(ctx context.Context) ([]int, error) { - if len(cs.fields) > 1 { - return nil, errors.New("ent: CommandSelect.Ints is not achievable when selecting more than 1 field") - } - var v []int - if err := cs.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (cs *CommandSelect) IntsX(ctx context.Context) []int { - v, err := cs.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a selector. It is only allowed when selecting one field. -func (cs *CommandSelect) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = cs.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{command.Label} - default: - err = fmt.Errorf("ent: CommandSelect.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (cs *CommandSelect) IntX(ctx context.Context) int { - v, err := cs.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. -func (cs *CommandSelect) Float64s(ctx context.Context) ([]float64, error) { - if len(cs.fields) > 1 { - return nil, errors.New("ent: CommandSelect.Float64s is not achievable when selecting more than 1 field") - } - var v []float64 - if err := cs.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (cs *CommandSelect) Float64sX(ctx context.Context) []float64 { - v, err := cs.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. -func (cs *CommandSelect) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = cs.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{command.Label} - default: - err = fmt.Errorf("ent: CommandSelect.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (cs *CommandSelect) Float64X(ctx context.Context) float64 { - v, err := cs.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from a selector. It is only allowed when selecting one field. -func (cs *CommandSelect) Bools(ctx context.Context) ([]bool, error) { - if len(cs.fields) > 1 { - return nil, errors.New("ent: CommandSelect.Bools is not achievable when selecting more than 1 field") - } - var v []bool - if err := cs.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil + return scanWithInterceptors[*CommandQuery, *CommandSelect](ctx, cs.CommandQuery, cs, cs.inters, v) } -// BoolsX is like Bools, but panics if an error occurs. -func (cs *CommandSelect) BoolsX(ctx context.Context) []bool { - v, err := cs.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a selector. It is only allowed when selecting one field. -func (cs *CommandSelect) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = cs.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{command.Label} - default: - err = fmt.Errorf("ent: CommandSelect.Bools returned %d results when one was expected", len(v)) +func (cs *CommandSelect) sqlScan(ctx context.Context, root *CommandQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(cs.fns)) + for _, fn := range cs.fns { + aggregation = append(aggregation, fn(selector)) } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (cs *CommandSelect) BoolX(ctx context.Context) bool { - v, err := cs.Bool(ctx) - if err != nil { - panic(err) + switch n := len(*cs.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) } - return v -} - -func (cs *CommandSelect) sqlScan(ctx context.Context, v interface{}) error { rows := &sql.Rows{} - query, args := cs.sql.Query() + query, args := selector.Query() if err := cs.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/ent/command_update.go b/ent/command_update.go index 55d89944..00477f89 100755 --- a/ent/command_update.go +++ b/ent/command_update.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -9,6 +9,7 @@ import ( "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/dialect/sql/sqljson" "entgo.io/ent/schema/field" "github.com/gen0cide/laforge/ent/command" "github.com/gen0cide/laforge/ent/environment" @@ -30,9 +31,17 @@ func (cu *CommandUpdate) Where(ps ...predicate.Command) *CommandUpdate { return cu } -// SetHclID sets the "hcl_id" field. -func (cu *CommandUpdate) SetHclID(s string) *CommandUpdate { - cu.mutation.SetHclID(s) +// SetHCLID sets the "hcl_id" field. +func (cu *CommandUpdate) SetHCLID(s string) *CommandUpdate { + cu.mutation.SetHCLID(s) + return cu +} + +// SetNillableHCLID sets the "hcl_id" field if the given value is not nil. +func (cu *CommandUpdate) SetNillableHCLID(s *string) *CommandUpdate { + if s != nil { + cu.SetHCLID(*s) + } return cu } @@ -42,36 +51,82 @@ func (cu *CommandUpdate) SetName(s string) *CommandUpdate { return cu } +// SetNillableName sets the "name" field if the given value is not nil. +func (cu *CommandUpdate) SetNillableName(s *string) *CommandUpdate { + if s != nil { + cu.SetName(*s) + } + return cu +} + // SetDescription sets the "description" field. func (cu *CommandUpdate) SetDescription(s string) *CommandUpdate { cu.mutation.SetDescription(s) return cu } +// SetNillableDescription sets the "description" field if the given value is not nil. +func (cu *CommandUpdate) SetNillableDescription(s *string) *CommandUpdate { + if s != nil { + cu.SetDescription(*s) + } + return cu +} + // SetProgram sets the "program" field. func (cu *CommandUpdate) SetProgram(s string) *CommandUpdate { cu.mutation.SetProgram(s) return cu } +// SetNillableProgram sets the "program" field if the given value is not nil. +func (cu *CommandUpdate) SetNillableProgram(s *string) *CommandUpdate { + if s != nil { + cu.SetProgram(*s) + } + return cu +} + // SetArgs sets the "args" field. func (cu *CommandUpdate) SetArgs(s []string) *CommandUpdate { cu.mutation.SetArgs(s) return cu } +// AppendArgs appends s to the "args" field. +func (cu *CommandUpdate) AppendArgs(s []string) *CommandUpdate { + cu.mutation.AppendArgs(s) + return cu +} + // SetIgnoreErrors sets the "ignore_errors" field. func (cu *CommandUpdate) SetIgnoreErrors(b bool) *CommandUpdate { cu.mutation.SetIgnoreErrors(b) return cu } +// SetNillableIgnoreErrors sets the "ignore_errors" field if the given value is not nil. +func (cu *CommandUpdate) SetNillableIgnoreErrors(b *bool) *CommandUpdate { + if b != nil { + cu.SetIgnoreErrors(*b) + } + return cu +} + // SetDisabled sets the "disabled" field. func (cu *CommandUpdate) SetDisabled(b bool) *CommandUpdate { cu.mutation.SetDisabled(b) return cu } +// SetNillableDisabled sets the "disabled" field if the given value is not nil. +func (cu *CommandUpdate) SetNillableDisabled(b *bool) *CommandUpdate { + if b != nil { + cu.SetDisabled(*b) + } + return cu +} + // SetCooldown sets the "cooldown" field. func (cu *CommandUpdate) SetCooldown(i int) *CommandUpdate { cu.mutation.ResetCooldown() @@ -79,6 +134,14 @@ func (cu *CommandUpdate) SetCooldown(i int) *CommandUpdate { return cu } +// SetNillableCooldown sets the "cooldown" field if the given value is not nil. +func (cu *CommandUpdate) SetNillableCooldown(i *int) *CommandUpdate { + if i != nil { + cu.SetCooldown(*i) + } + return cu +} + // AddCooldown adds i to the "cooldown" field. func (cu *CommandUpdate) AddCooldown(i int) *CommandUpdate { cu.mutation.AddCooldown(i) @@ -92,6 +155,14 @@ func (cu *CommandUpdate) SetTimeout(i int) *CommandUpdate { return cu } +// SetNillableTimeout sets the "timeout" field if the given value is not nil. +func (cu *CommandUpdate) SetNillableTimeout(i *int) *CommandUpdate { + if i != nil { + cu.SetTimeout(*i) + } + return cu +} + // AddTimeout adds i to the "timeout" field. func (cu *CommandUpdate) AddTimeout(i int) *CommandUpdate { cu.mutation.AddTimeout(i) @@ -178,40 +249,7 @@ func (cu *CommandUpdate) ClearCommandToEnvironment() *CommandUpdate { // Save executes the query and returns the number of nodes affected by the update operation. func (cu *CommandUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(cu.hooks) == 0 { - if err = cu.check(); err != nil { - return 0, err - } - affected, err = cu.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*CommandMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = cu.check(); err != nil { - return 0, err - } - cu.mutation = mutation - affected, err = cu.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(cu.hooks) - 1; i >= 0; i-- { - if cu.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = cu.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, cu.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, cu.sqlSave, cu.mutation, cu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -252,16 +290,10 @@ func (cu *CommandUpdate) check() error { } func (cu *CommandUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: command.Table, - Columns: command.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: command.FieldID, - }, - }, + if err := cu.check(); err != nil { + return n, err } + _spec := sqlgraph.NewUpdateSpec(command.Table, command.Columns, sqlgraph.NewFieldSpec(command.FieldID, field.TypeUUID)) if ps := cu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -269,96 +301,49 @@ func (cu *CommandUpdate) sqlSave(ctx context.Context) (n int, err error) { } } } - if value, ok := cu.mutation.HclID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: command.FieldHclID, - }) + if value, ok := cu.mutation.HCLID(); ok { + _spec.SetField(command.FieldHCLID, field.TypeString, value) } if value, ok := cu.mutation.Name(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: command.FieldName, - }) + _spec.SetField(command.FieldName, field.TypeString, value) } if value, ok := cu.mutation.Description(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: command.FieldDescription, - }) + _spec.SetField(command.FieldDescription, field.TypeString, value) } if value, ok := cu.mutation.Program(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: command.FieldProgram, - }) + _spec.SetField(command.FieldProgram, field.TypeString, value) } if value, ok := cu.mutation.Args(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: command.FieldArgs, + _spec.SetField(command.FieldArgs, field.TypeJSON, value) + } + if value, ok := cu.mutation.AppendedArgs(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, command.FieldArgs, value) }) } if value, ok := cu.mutation.IgnoreErrors(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: command.FieldIgnoreErrors, - }) + _spec.SetField(command.FieldIgnoreErrors, field.TypeBool, value) } if value, ok := cu.mutation.Disabled(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: command.FieldDisabled, - }) + _spec.SetField(command.FieldDisabled, field.TypeBool, value) } if value, ok := cu.mutation.Cooldown(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: command.FieldCooldown, - }) + _spec.SetField(command.FieldCooldown, field.TypeInt, value) } if value, ok := cu.mutation.AddedCooldown(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: command.FieldCooldown, - }) + _spec.AddField(command.FieldCooldown, field.TypeInt, value) } if value, ok := cu.mutation.Timeout(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: command.FieldTimeout, - }) + _spec.SetField(command.FieldTimeout, field.TypeInt, value) } if value, ok := cu.mutation.AddedTimeout(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: command.FieldTimeout, - }) + _spec.AddField(command.FieldTimeout, field.TypeInt, value) } if value, ok := cu.mutation.Vars(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: command.FieldVars, - }) + _spec.SetField(command.FieldVars, field.TypeJSON, value) } if value, ok := cu.mutation.Tags(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: command.FieldTags, - }) + _spec.SetField(command.FieldTags, field.TypeJSON, value) } if cu.mutation.CommandToUserCleared() { edge := &sqlgraph.EdgeSpec{ @@ -368,10 +353,7 @@ func (cu *CommandUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{command.CommandToUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -384,10 +366,7 @@ func (cu *CommandUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{command.CommandToUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -403,10 +382,7 @@ func (cu *CommandUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{command.CommandToUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -422,10 +398,7 @@ func (cu *CommandUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{command.CommandToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -438,10 +411,7 @@ func (cu *CommandUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{command.CommandToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -453,10 +423,11 @@ func (cu *CommandUpdate) sqlSave(ctx context.Context) (n int, err error) { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{command.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return 0, err } + cu.mutation.done = true return n, nil } @@ -468,9 +439,17 @@ type CommandUpdateOne struct { mutation *CommandMutation } -// SetHclID sets the "hcl_id" field. -func (cuo *CommandUpdateOne) SetHclID(s string) *CommandUpdateOne { - cuo.mutation.SetHclID(s) +// SetHCLID sets the "hcl_id" field. +func (cuo *CommandUpdateOne) SetHCLID(s string) *CommandUpdateOne { + cuo.mutation.SetHCLID(s) + return cuo +} + +// SetNillableHCLID sets the "hcl_id" field if the given value is not nil. +func (cuo *CommandUpdateOne) SetNillableHCLID(s *string) *CommandUpdateOne { + if s != nil { + cuo.SetHCLID(*s) + } return cuo } @@ -480,36 +459,82 @@ func (cuo *CommandUpdateOne) SetName(s string) *CommandUpdateOne { return cuo } +// SetNillableName sets the "name" field if the given value is not nil. +func (cuo *CommandUpdateOne) SetNillableName(s *string) *CommandUpdateOne { + if s != nil { + cuo.SetName(*s) + } + return cuo +} + // SetDescription sets the "description" field. func (cuo *CommandUpdateOne) SetDescription(s string) *CommandUpdateOne { cuo.mutation.SetDescription(s) return cuo } +// SetNillableDescription sets the "description" field if the given value is not nil. +func (cuo *CommandUpdateOne) SetNillableDescription(s *string) *CommandUpdateOne { + if s != nil { + cuo.SetDescription(*s) + } + return cuo +} + // SetProgram sets the "program" field. func (cuo *CommandUpdateOne) SetProgram(s string) *CommandUpdateOne { cuo.mutation.SetProgram(s) return cuo } +// SetNillableProgram sets the "program" field if the given value is not nil. +func (cuo *CommandUpdateOne) SetNillableProgram(s *string) *CommandUpdateOne { + if s != nil { + cuo.SetProgram(*s) + } + return cuo +} + // SetArgs sets the "args" field. func (cuo *CommandUpdateOne) SetArgs(s []string) *CommandUpdateOne { cuo.mutation.SetArgs(s) return cuo } +// AppendArgs appends s to the "args" field. +func (cuo *CommandUpdateOne) AppendArgs(s []string) *CommandUpdateOne { + cuo.mutation.AppendArgs(s) + return cuo +} + // SetIgnoreErrors sets the "ignore_errors" field. func (cuo *CommandUpdateOne) SetIgnoreErrors(b bool) *CommandUpdateOne { cuo.mutation.SetIgnoreErrors(b) return cuo } +// SetNillableIgnoreErrors sets the "ignore_errors" field if the given value is not nil. +func (cuo *CommandUpdateOne) SetNillableIgnoreErrors(b *bool) *CommandUpdateOne { + if b != nil { + cuo.SetIgnoreErrors(*b) + } + return cuo +} + // SetDisabled sets the "disabled" field. func (cuo *CommandUpdateOne) SetDisabled(b bool) *CommandUpdateOne { cuo.mutation.SetDisabled(b) return cuo } +// SetNillableDisabled sets the "disabled" field if the given value is not nil. +func (cuo *CommandUpdateOne) SetNillableDisabled(b *bool) *CommandUpdateOne { + if b != nil { + cuo.SetDisabled(*b) + } + return cuo +} + // SetCooldown sets the "cooldown" field. func (cuo *CommandUpdateOne) SetCooldown(i int) *CommandUpdateOne { cuo.mutation.ResetCooldown() @@ -517,6 +542,14 @@ func (cuo *CommandUpdateOne) SetCooldown(i int) *CommandUpdateOne { return cuo } +// SetNillableCooldown sets the "cooldown" field if the given value is not nil. +func (cuo *CommandUpdateOne) SetNillableCooldown(i *int) *CommandUpdateOne { + if i != nil { + cuo.SetCooldown(*i) + } + return cuo +} + // AddCooldown adds i to the "cooldown" field. func (cuo *CommandUpdateOne) AddCooldown(i int) *CommandUpdateOne { cuo.mutation.AddCooldown(i) @@ -530,6 +563,14 @@ func (cuo *CommandUpdateOne) SetTimeout(i int) *CommandUpdateOne { return cuo } +// SetNillableTimeout sets the "timeout" field if the given value is not nil. +func (cuo *CommandUpdateOne) SetNillableTimeout(i *int) *CommandUpdateOne { + if i != nil { + cuo.SetTimeout(*i) + } + return cuo +} + // AddTimeout adds i to the "timeout" field. func (cuo *CommandUpdateOne) AddTimeout(i int) *CommandUpdateOne { cuo.mutation.AddTimeout(i) @@ -614,6 +655,12 @@ func (cuo *CommandUpdateOne) ClearCommandToEnvironment() *CommandUpdateOne { return cuo } +// Where appends a list predicates to the CommandUpdate builder. +func (cuo *CommandUpdateOne) Where(ps ...predicate.Command) *CommandUpdateOne { + cuo.mutation.Where(ps...) + return cuo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (cuo *CommandUpdateOne) Select(field string, fields ...string) *CommandUpdateOne { @@ -623,40 +670,7 @@ func (cuo *CommandUpdateOne) Select(field string, fields ...string) *CommandUpda // Save executes the query and returns the updated Command entity. func (cuo *CommandUpdateOne) Save(ctx context.Context) (*Command, error) { - var ( - err error - node *Command - ) - if len(cuo.hooks) == 0 { - if err = cuo.check(); err != nil { - return nil, err - } - node, err = cuo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*CommandMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = cuo.check(); err != nil { - return nil, err - } - cuo.mutation = mutation - node, err = cuo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(cuo.hooks) - 1; i >= 0; i-- { - if cuo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = cuo.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, cuo.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, cuo.sqlSave, cuo.mutation, cuo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -697,16 +711,10 @@ func (cuo *CommandUpdateOne) check() error { } func (cuo *CommandUpdateOne) sqlSave(ctx context.Context) (_node *Command, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: command.Table, - Columns: command.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: command.FieldID, - }, - }, + if err := cuo.check(); err != nil { + return _node, err } + _spec := sqlgraph.NewUpdateSpec(command.Table, command.Columns, sqlgraph.NewFieldSpec(command.FieldID, field.TypeUUID)) id, ok := cuo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Command.id" for update`)} @@ -731,96 +739,49 @@ func (cuo *CommandUpdateOne) sqlSave(ctx context.Context) (_node *Command, err e } } } - if value, ok := cuo.mutation.HclID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: command.FieldHclID, - }) + if value, ok := cuo.mutation.HCLID(); ok { + _spec.SetField(command.FieldHCLID, field.TypeString, value) } if value, ok := cuo.mutation.Name(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: command.FieldName, - }) + _spec.SetField(command.FieldName, field.TypeString, value) } if value, ok := cuo.mutation.Description(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: command.FieldDescription, - }) + _spec.SetField(command.FieldDescription, field.TypeString, value) } if value, ok := cuo.mutation.Program(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: command.FieldProgram, - }) + _spec.SetField(command.FieldProgram, field.TypeString, value) } if value, ok := cuo.mutation.Args(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: command.FieldArgs, + _spec.SetField(command.FieldArgs, field.TypeJSON, value) + } + if value, ok := cuo.mutation.AppendedArgs(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, command.FieldArgs, value) }) } if value, ok := cuo.mutation.IgnoreErrors(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: command.FieldIgnoreErrors, - }) + _spec.SetField(command.FieldIgnoreErrors, field.TypeBool, value) } if value, ok := cuo.mutation.Disabled(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: command.FieldDisabled, - }) + _spec.SetField(command.FieldDisabled, field.TypeBool, value) } if value, ok := cuo.mutation.Cooldown(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: command.FieldCooldown, - }) + _spec.SetField(command.FieldCooldown, field.TypeInt, value) } if value, ok := cuo.mutation.AddedCooldown(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: command.FieldCooldown, - }) + _spec.AddField(command.FieldCooldown, field.TypeInt, value) } if value, ok := cuo.mutation.Timeout(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: command.FieldTimeout, - }) + _spec.SetField(command.FieldTimeout, field.TypeInt, value) } if value, ok := cuo.mutation.AddedTimeout(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: command.FieldTimeout, - }) + _spec.AddField(command.FieldTimeout, field.TypeInt, value) } if value, ok := cuo.mutation.Vars(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: command.FieldVars, - }) + _spec.SetField(command.FieldVars, field.TypeJSON, value) } if value, ok := cuo.mutation.Tags(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: command.FieldTags, - }) + _spec.SetField(command.FieldTags, field.TypeJSON, value) } if cuo.mutation.CommandToUserCleared() { edge := &sqlgraph.EdgeSpec{ @@ -830,10 +791,7 @@ func (cuo *CommandUpdateOne) sqlSave(ctx context.Context) (_node *Command, err e Columns: []string{command.CommandToUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -846,10 +804,7 @@ func (cuo *CommandUpdateOne) sqlSave(ctx context.Context) (_node *Command, err e Columns: []string{command.CommandToUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -865,10 +820,7 @@ func (cuo *CommandUpdateOne) sqlSave(ctx context.Context) (_node *Command, err e Columns: []string{command.CommandToUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -884,10 +836,7 @@ func (cuo *CommandUpdateOne) sqlSave(ctx context.Context) (_node *Command, err e Columns: []string{command.CommandToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -900,10 +849,7 @@ func (cuo *CommandUpdateOne) sqlSave(ctx context.Context) (_node *Command, err e Columns: []string{command.CommandToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -918,9 +864,10 @@ func (cuo *CommandUpdateOne) sqlSave(ctx context.Context) (_node *Command, err e if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{command.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } + cuo.mutation.done = true return _node, nil } diff --git a/ent/competition.go b/ent/competition.go index 53a7bc67..14dfd7b6 100755 --- a/ent/competition.go +++ b/ent/competition.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -7,6 +7,7 @@ import ( "fmt" "strings" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/gen0cide/laforge/ent/competition" "github.com/gen0cide/laforge/ent/environment" @@ -18,8 +19,8 @@ type Competition struct { config ` json:"-"` // ID of the ent. ID uuid.UUID `json:"id,omitempty"` - // HclID holds the value of the "hcl_id" field. - HclID string `json:"hcl_id,omitempty" hcl:"id,label"` + // HCLID holds the value of the "hcl_id" field. + HCLID string `json:"hcl_id,omitempty" hcl:"id,label"` // RootPassword holds the value of the "root_password" field. RootPassword string `json:"root_password,omitempty" hcl:"root_password,attr"` // Config holds the value of the "config" field. @@ -30,6 +31,7 @@ type Competition struct { // The values are being populated by the CompetitionQuery when eager-loading is set. Edges CompetitionEdges `json:"edges"` + // vvvvvvvvvvvv CUSTOM vvvvvvvvvvvv // Edges put into the main struct to be loaded via hcl // CompetitionToDNS holds the value of the CompetitionToDNS edge. HCLCompetitionToDNS []*DNS `json:"CompetitionToDNS,omitempty" hcl:"dns,block"` @@ -37,8 +39,9 @@ type Competition struct { HCLCompetitionToEnvironment *Environment `json:"CompetitionToEnvironment,omitempty"` // CompetitionToBuild holds the value of the CompetitionToBuild edge. HCLCompetitionToBuild []*Build `json:"CompetitionToBuild,omitempty"` - // + // ^^^^^^^^^^^^ CUSTOM ^^^^^^^^^^^^^ environment_environment_to_competition *uuid.UUID + selectValues sql.SelectValues } // CompetitionEdges holds the relations/edges for other nodes in the graph. @@ -52,6 +55,11 @@ type CompetitionEdges struct { // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. loadedTypes [3]bool + // totalCount holds the count of the edges above. + totalCount [3]map[string]int + + namedCompetitionToDNS map[string][]*DNS + namedCompetitionToBuild map[string][]*Build } // CompetitionToDNSOrErr returns the CompetitionToDNS value or an error if the edge @@ -68,8 +76,7 @@ func (e CompetitionEdges) CompetitionToDNSOrErr() ([]*DNS, error) { func (e CompetitionEdges) CompetitionToEnvironmentOrErr() (*Environment, error) { if e.loadedTypes[1] { if e.CompetitionToEnvironment == nil { - // The edge CompetitionToEnvironment was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: environment.Label} } return e.CompetitionToEnvironment, nil @@ -87,20 +94,20 @@ func (e CompetitionEdges) CompetitionToBuildOrErr() ([]*Build, error) { } // scanValues returns the types for scanning values from sql.Rows. -func (*Competition) scanValues(columns []string) ([]interface{}, error) { - values := make([]interface{}, len(columns)) +func (*Competition) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) for i := range columns { switch columns[i] { case competition.FieldConfig, competition.FieldTags: values[i] = new([]byte) - case competition.FieldHclID, competition.FieldRootPassword: + case competition.FieldHCLID, competition.FieldRootPassword: values[i] = new(sql.NullString) case competition.FieldID: values[i] = new(uuid.UUID) case competition.ForeignKeys[0]: // environment_environment_to_competition values[i] = &sql.NullScanner{S: new(uuid.UUID)} default: - return nil, fmt.Errorf("unexpected column %q for type Competition", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -108,7 +115,7 @@ func (*Competition) scanValues(columns []string) ([]interface{}, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the Competition fields. -func (c *Competition) assignValues(columns []string, values []interface{}) error { +func (c *Competition) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -120,11 +127,11 @@ func (c *Competition) assignValues(columns []string, values []interface{}) error } else if value != nil { c.ID = *value } - case competition.FieldHclID: + case competition.FieldHCLID: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field hcl_id", values[i]) } else if value.Valid { - c.HclID = value.String + c.HCLID = value.String } case competition.FieldRootPassword: if value, ok := values[i].(*sql.NullString); !ok { @@ -155,41 +162,49 @@ func (c *Competition) assignValues(columns []string, values []interface{}) error c.environment_environment_to_competition = new(uuid.UUID) *c.environment_environment_to_competition = *value.S.(*uuid.UUID) } + default: + c.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the Competition. +// This includes values selected through modifiers, order, etc. +func (c *Competition) Value(name string) (ent.Value, error) { + return c.selectValues.Get(name) +} + // QueryCompetitionToDNS queries the "CompetitionToDNS" edge of the Competition entity. func (c *Competition) QueryCompetitionToDNS() *DNSQuery { - return (&CompetitionClient{config: c.config}).QueryCompetitionToDNS(c) + return NewCompetitionClient(c.config).QueryCompetitionToDNS(c) } // QueryCompetitionToEnvironment queries the "CompetitionToEnvironment" edge of the Competition entity. func (c *Competition) QueryCompetitionToEnvironment() *EnvironmentQuery { - return (&CompetitionClient{config: c.config}).QueryCompetitionToEnvironment(c) + return NewCompetitionClient(c.config).QueryCompetitionToEnvironment(c) } // QueryCompetitionToBuild queries the "CompetitionToBuild" edge of the Competition entity. func (c *Competition) QueryCompetitionToBuild() *BuildQuery { - return (&CompetitionClient{config: c.config}).QueryCompetitionToBuild(c) + return NewCompetitionClient(c.config).QueryCompetitionToBuild(c) } // Update returns a builder for updating this Competition. // Note that you need to call Competition.Unwrap() before calling this method if this Competition // was returned from a transaction, and the transaction was committed or rolled back. func (c *Competition) Update() *CompetitionUpdateOne { - return (&CompetitionClient{config: c.config}).UpdateOne(c) + return NewCompetitionClient(c.config).UpdateOne(c) } // Unwrap unwraps the Competition entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. func (c *Competition) Unwrap() *Competition { - tx, ok := c.config.driver.(*txDriver) + _tx, ok := c.config.driver.(*txDriver) if !ok { panic("ent: Competition is not a transactional entity") } - c.config.driver = tx.drv + c.config.driver = _tx.drv return c } @@ -197,24 +212,69 @@ func (c *Competition) Unwrap() *Competition { func (c *Competition) String() string { var builder strings.Builder builder.WriteString("Competition(") - builder.WriteString(fmt.Sprintf("id=%v", c.ID)) - builder.WriteString(", hcl_id=") - builder.WriteString(c.HclID) - builder.WriteString(", root_password=") + builder.WriteString(fmt.Sprintf("id=%v, ", c.ID)) + builder.WriteString("hcl_id=") + builder.WriteString(c.HCLID) + builder.WriteString(", ") + builder.WriteString("root_password=") builder.WriteString(c.RootPassword) - builder.WriteString(", config=") + builder.WriteString(", ") + builder.WriteString("config=") builder.WriteString(fmt.Sprintf("%v", c.Config)) - builder.WriteString(", tags=") + builder.WriteString(", ") + builder.WriteString("tags=") builder.WriteString(fmt.Sprintf("%v", c.Tags)) builder.WriteByte(')') return builder.String() } -// Competitions is a parsable slice of Competition. -type Competitions []*Competition +// NamedCompetitionToDNS returns the CompetitionToDNS named value or an error if the edge was not +// loaded in eager-loading with this name. +func (c *Competition) NamedCompetitionToDNS(name string) ([]*DNS, error) { + if c.Edges.namedCompetitionToDNS == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := c.Edges.namedCompetitionToDNS[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} -func (c Competitions) config(cfg config) { - for _i := range c { - c[_i].config = cfg +func (c *Competition) appendNamedCompetitionToDNS(name string, edges ...*DNS) { + if c.Edges.namedCompetitionToDNS == nil { + c.Edges.namedCompetitionToDNS = make(map[string][]*DNS) + } + if len(edges) == 0 { + c.Edges.namedCompetitionToDNS[name] = []*DNS{} + } else { + c.Edges.namedCompetitionToDNS[name] = append(c.Edges.namedCompetitionToDNS[name], edges...) } } + +// NamedCompetitionToBuild returns the CompetitionToBuild named value or an error if the edge was not +// loaded in eager-loading with this name. +func (c *Competition) NamedCompetitionToBuild(name string) ([]*Build, error) { + if c.Edges.namedCompetitionToBuild == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := c.Edges.namedCompetitionToBuild[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} + +func (c *Competition) appendNamedCompetitionToBuild(name string, edges ...*Build) { + if c.Edges.namedCompetitionToBuild == nil { + c.Edges.namedCompetitionToBuild = make(map[string][]*Build) + } + if len(edges) == 0 { + c.Edges.namedCompetitionToBuild[name] = []*Build{} + } else { + c.Edges.namedCompetitionToBuild[name] = append(c.Edges.namedCompetitionToBuild[name], edges...) + } +} + +// Competitions is a parsable slice of Competition. +type Competitions []*Competition diff --git a/ent/competition/competition.go b/ent/competition/competition.go index 9675ea33..0af28a2b 100755 --- a/ent/competition/competition.go +++ b/ent/competition/competition.go @@ -1,8 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package competition import ( + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -11,8 +13,8 @@ const ( Label = "competition" // FieldID holds the string denoting the id field in the database. FieldID = "id" - // FieldHclID holds the string denoting the hcl_id field in the database. - FieldHclID = "hcl_id" + // FieldHCLID holds the string denoting the hcl_id field in the database. + FieldHCLID = "hcl_id" // FieldRootPassword holds the string denoting the root_password field in the database. FieldRootPassword = "root_password" // FieldConfig holds the string denoting the config field in the database. @@ -51,7 +53,7 @@ const ( // Columns holds all SQL columns for competition fields. var Columns = []string{ FieldID, - FieldHclID, + FieldHCLID, FieldRootPassword, FieldConfig, FieldTags, @@ -88,3 +90,77 @@ var ( // DefaultID holds the default value on creation for the "id" field. DefaultID func() uuid.UUID ) + +// OrderOption defines the ordering options for the Competition queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByHCLID orders the results by the hcl_id field. +func ByHCLID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldHCLID, opts...).ToFunc() +} + +// ByRootPassword orders the results by the root_password field. +func ByRootPassword(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldRootPassword, opts...).ToFunc() +} + +// ByCompetitionToDNSCount orders the results by CompetitionToDNS count. +func ByCompetitionToDNSCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newCompetitionToDNSStep(), opts...) + } +} + +// ByCompetitionToDNS orders the results by CompetitionToDNS terms. +func ByCompetitionToDNS(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newCompetitionToDNSStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByCompetitionToEnvironmentField orders the results by CompetitionToEnvironment field. +func ByCompetitionToEnvironmentField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newCompetitionToEnvironmentStep(), sql.OrderByField(field, opts...)) + } +} + +// ByCompetitionToBuildCount orders the results by CompetitionToBuild count. +func ByCompetitionToBuildCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newCompetitionToBuildStep(), opts...) + } +} + +// ByCompetitionToBuild orders the results by CompetitionToBuild terms. +func ByCompetitionToBuild(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newCompetitionToBuildStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newCompetitionToDNSStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(CompetitionToDNSInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, CompetitionToDNSTable, CompetitionToDNSPrimaryKey...), + ) +} +func newCompetitionToEnvironmentStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(CompetitionToEnvironmentInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, CompetitionToEnvironmentTable, CompetitionToEnvironmentColumn), + ) +} +func newCompetitionToBuildStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(CompetitionToBuildInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, CompetitionToBuildTable, CompetitionToBuildColumn), + ) +} diff --git a/ent/competition/where.go b/ent/competition/where.go index fa32cebb..4374b171 100755 --- a/ent/competition/where.go +++ b/ent/competition/where.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package competition @@ -11,321 +11,187 @@ import ( // ID filters vertices based on their ID field. func ID(id uuid.UUID) predicate.Competition { - return predicate.Competition(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Competition(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id uuid.UUID) predicate.Competition { - return predicate.Competition(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Competition(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id uuid.UUID) predicate.Competition { - return predicate.Competition(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.Competition(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...uuid.UUID) predicate.Competition { - return predicate.Competition(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.Competition(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...uuid.UUID) predicate.Competition { - return predicate.Competition(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.Competition(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id uuid.UUID) predicate.Competition { - return predicate.Competition(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.Competition(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id uuid.UUID) predicate.Competition { - return predicate.Competition(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.Competition(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id uuid.UUID) predicate.Competition { - return predicate.Competition(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.Competition(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id uuid.UUID) predicate.Competition { - return predicate.Competition(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.Competition(sql.FieldLTE(FieldID, id)) } -// HclID applies equality check predicate on the "hcl_id" field. It's identical to HclIDEQ. -func HclID(v string) predicate.Competition { - return predicate.Competition(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldHclID), v)) - }) +// HCLID applies equality check predicate on the "hcl_id" field. It's identical to HCLIDEQ. +func HCLID(v string) predicate.Competition { + return predicate.Competition(sql.FieldEQ(FieldHCLID, v)) } // RootPassword applies equality check predicate on the "root_password" field. It's identical to RootPasswordEQ. func RootPassword(v string) predicate.Competition { - return predicate.Competition(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldRootPassword), v)) - }) + return predicate.Competition(sql.FieldEQ(FieldRootPassword, v)) } -// HclIDEQ applies the EQ predicate on the "hcl_id" field. -func HclIDEQ(v string) predicate.Competition { - return predicate.Competition(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldHclID), v)) - }) +// HCLIDEQ applies the EQ predicate on the "hcl_id" field. +func HCLIDEQ(v string) predicate.Competition { + return predicate.Competition(sql.FieldEQ(FieldHCLID, v)) } -// HclIDNEQ applies the NEQ predicate on the "hcl_id" field. -func HclIDNEQ(v string) predicate.Competition { - return predicate.Competition(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldHclID), v)) - }) +// HCLIDNEQ applies the NEQ predicate on the "hcl_id" field. +func HCLIDNEQ(v string) predicate.Competition { + return predicate.Competition(sql.FieldNEQ(FieldHCLID, v)) } -// HclIDIn applies the In predicate on the "hcl_id" field. -func HclIDIn(vs ...string) predicate.Competition { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Competition(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldHclID), v...)) - }) +// HCLIDIn applies the In predicate on the "hcl_id" field. +func HCLIDIn(vs ...string) predicate.Competition { + return predicate.Competition(sql.FieldIn(FieldHCLID, vs...)) } -// HclIDNotIn applies the NotIn predicate on the "hcl_id" field. -func HclIDNotIn(vs ...string) predicate.Competition { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Competition(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldHclID), v...)) - }) +// HCLIDNotIn applies the NotIn predicate on the "hcl_id" field. +func HCLIDNotIn(vs ...string) predicate.Competition { + return predicate.Competition(sql.FieldNotIn(FieldHCLID, vs...)) } -// HclIDGT applies the GT predicate on the "hcl_id" field. -func HclIDGT(v string) predicate.Competition { - return predicate.Competition(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldHclID), v)) - }) +// HCLIDGT applies the GT predicate on the "hcl_id" field. +func HCLIDGT(v string) predicate.Competition { + return predicate.Competition(sql.FieldGT(FieldHCLID, v)) } -// HclIDGTE applies the GTE predicate on the "hcl_id" field. -func HclIDGTE(v string) predicate.Competition { - return predicate.Competition(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldHclID), v)) - }) +// HCLIDGTE applies the GTE predicate on the "hcl_id" field. +func HCLIDGTE(v string) predicate.Competition { + return predicate.Competition(sql.FieldGTE(FieldHCLID, v)) } -// HclIDLT applies the LT predicate on the "hcl_id" field. -func HclIDLT(v string) predicate.Competition { - return predicate.Competition(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldHclID), v)) - }) +// HCLIDLT applies the LT predicate on the "hcl_id" field. +func HCLIDLT(v string) predicate.Competition { + return predicate.Competition(sql.FieldLT(FieldHCLID, v)) } -// HclIDLTE applies the LTE predicate on the "hcl_id" field. -func HclIDLTE(v string) predicate.Competition { - return predicate.Competition(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldHclID), v)) - }) +// HCLIDLTE applies the LTE predicate on the "hcl_id" field. +func HCLIDLTE(v string) predicate.Competition { + return predicate.Competition(sql.FieldLTE(FieldHCLID, v)) } -// HclIDContains applies the Contains predicate on the "hcl_id" field. -func HclIDContains(v string) predicate.Competition { - return predicate.Competition(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldHclID), v)) - }) +// HCLIDContains applies the Contains predicate on the "hcl_id" field. +func HCLIDContains(v string) predicate.Competition { + return predicate.Competition(sql.FieldContains(FieldHCLID, v)) } -// HclIDHasPrefix applies the HasPrefix predicate on the "hcl_id" field. -func HclIDHasPrefix(v string) predicate.Competition { - return predicate.Competition(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldHclID), v)) - }) +// HCLIDHasPrefix applies the HasPrefix predicate on the "hcl_id" field. +func HCLIDHasPrefix(v string) predicate.Competition { + return predicate.Competition(sql.FieldHasPrefix(FieldHCLID, v)) } -// HclIDHasSuffix applies the HasSuffix predicate on the "hcl_id" field. -func HclIDHasSuffix(v string) predicate.Competition { - return predicate.Competition(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldHclID), v)) - }) +// HCLIDHasSuffix applies the HasSuffix predicate on the "hcl_id" field. +func HCLIDHasSuffix(v string) predicate.Competition { + return predicate.Competition(sql.FieldHasSuffix(FieldHCLID, v)) } -// HclIDEqualFold applies the EqualFold predicate on the "hcl_id" field. -func HclIDEqualFold(v string) predicate.Competition { - return predicate.Competition(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldHclID), v)) - }) +// HCLIDEqualFold applies the EqualFold predicate on the "hcl_id" field. +func HCLIDEqualFold(v string) predicate.Competition { + return predicate.Competition(sql.FieldEqualFold(FieldHCLID, v)) } -// HclIDContainsFold applies the ContainsFold predicate on the "hcl_id" field. -func HclIDContainsFold(v string) predicate.Competition { - return predicate.Competition(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldHclID), v)) - }) +// HCLIDContainsFold applies the ContainsFold predicate on the "hcl_id" field. +func HCLIDContainsFold(v string) predicate.Competition { + return predicate.Competition(sql.FieldContainsFold(FieldHCLID, v)) } // RootPasswordEQ applies the EQ predicate on the "root_password" field. func RootPasswordEQ(v string) predicate.Competition { - return predicate.Competition(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldRootPassword), v)) - }) + return predicate.Competition(sql.FieldEQ(FieldRootPassword, v)) } // RootPasswordNEQ applies the NEQ predicate on the "root_password" field. func RootPasswordNEQ(v string) predicate.Competition { - return predicate.Competition(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldRootPassword), v)) - }) + return predicate.Competition(sql.FieldNEQ(FieldRootPassword, v)) } // RootPasswordIn applies the In predicate on the "root_password" field. func RootPasswordIn(vs ...string) predicate.Competition { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Competition(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldRootPassword), v...)) - }) + return predicate.Competition(sql.FieldIn(FieldRootPassword, vs...)) } // RootPasswordNotIn applies the NotIn predicate on the "root_password" field. func RootPasswordNotIn(vs ...string) predicate.Competition { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Competition(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldRootPassword), v...)) - }) + return predicate.Competition(sql.FieldNotIn(FieldRootPassword, vs...)) } // RootPasswordGT applies the GT predicate on the "root_password" field. func RootPasswordGT(v string) predicate.Competition { - return predicate.Competition(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldRootPassword), v)) - }) + return predicate.Competition(sql.FieldGT(FieldRootPassword, v)) } // RootPasswordGTE applies the GTE predicate on the "root_password" field. func RootPasswordGTE(v string) predicate.Competition { - return predicate.Competition(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldRootPassword), v)) - }) + return predicate.Competition(sql.FieldGTE(FieldRootPassword, v)) } // RootPasswordLT applies the LT predicate on the "root_password" field. func RootPasswordLT(v string) predicate.Competition { - return predicate.Competition(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldRootPassword), v)) - }) + return predicate.Competition(sql.FieldLT(FieldRootPassword, v)) } // RootPasswordLTE applies the LTE predicate on the "root_password" field. func RootPasswordLTE(v string) predicate.Competition { - return predicate.Competition(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldRootPassword), v)) - }) + return predicate.Competition(sql.FieldLTE(FieldRootPassword, v)) } // RootPasswordContains applies the Contains predicate on the "root_password" field. func RootPasswordContains(v string) predicate.Competition { - return predicate.Competition(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldRootPassword), v)) - }) + return predicate.Competition(sql.FieldContains(FieldRootPassword, v)) } // RootPasswordHasPrefix applies the HasPrefix predicate on the "root_password" field. func RootPasswordHasPrefix(v string) predicate.Competition { - return predicate.Competition(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldRootPassword), v)) - }) + return predicate.Competition(sql.FieldHasPrefix(FieldRootPassword, v)) } // RootPasswordHasSuffix applies the HasSuffix predicate on the "root_password" field. func RootPasswordHasSuffix(v string) predicate.Competition { - return predicate.Competition(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldRootPassword), v)) - }) + return predicate.Competition(sql.FieldHasSuffix(FieldRootPassword, v)) } // RootPasswordEqualFold applies the EqualFold predicate on the "root_password" field. func RootPasswordEqualFold(v string) predicate.Competition { - return predicate.Competition(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldRootPassword), v)) - }) + return predicate.Competition(sql.FieldEqualFold(FieldRootPassword, v)) } // RootPasswordContainsFold applies the ContainsFold predicate on the "root_password" field. func RootPasswordContainsFold(v string) predicate.Competition { - return predicate.Competition(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldRootPassword), v)) - }) + return predicate.Competition(sql.FieldContainsFold(FieldRootPassword, v)) } // HasCompetitionToDNS applies the HasEdge predicate on the "CompetitionToDNS" edge. @@ -333,7 +199,6 @@ func HasCompetitionToDNS() predicate.Competition { return predicate.Competition(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(CompetitionToDNSTable, FieldID), sqlgraph.Edge(sqlgraph.M2M, false, CompetitionToDNSTable, CompetitionToDNSPrimaryKey...), ) sqlgraph.HasNeighbors(s, step) @@ -343,11 +208,7 @@ func HasCompetitionToDNS() predicate.Competition { // HasCompetitionToDNSWith applies the HasEdge predicate on the "CompetitionToDNS" edge with a given conditions (other predicates). func HasCompetitionToDNSWith(preds ...predicate.DNS) predicate.Competition { return predicate.Competition(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(CompetitionToDNSInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2M, false, CompetitionToDNSTable, CompetitionToDNSPrimaryKey...), - ) + step := newCompetitionToDNSStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -361,7 +222,6 @@ func HasCompetitionToEnvironment() predicate.Competition { return predicate.Competition(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(CompetitionToEnvironmentTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, true, CompetitionToEnvironmentTable, CompetitionToEnvironmentColumn), ) sqlgraph.HasNeighbors(s, step) @@ -371,11 +231,7 @@ func HasCompetitionToEnvironment() predicate.Competition { // HasCompetitionToEnvironmentWith applies the HasEdge predicate on the "CompetitionToEnvironment" edge with a given conditions (other predicates). func HasCompetitionToEnvironmentWith(preds ...predicate.Environment) predicate.Competition { return predicate.Competition(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(CompetitionToEnvironmentInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, CompetitionToEnvironmentTable, CompetitionToEnvironmentColumn), - ) + step := newCompetitionToEnvironmentStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -389,7 +245,6 @@ func HasCompetitionToBuild() predicate.Competition { return predicate.Competition(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(CompetitionToBuildTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, true, CompetitionToBuildTable, CompetitionToBuildColumn), ) sqlgraph.HasNeighbors(s, step) @@ -399,11 +254,7 @@ func HasCompetitionToBuild() predicate.Competition { // HasCompetitionToBuildWith applies the HasEdge predicate on the "CompetitionToBuild" edge with a given conditions (other predicates). func HasCompetitionToBuildWith(preds ...predicate.Build) predicate.Competition { return predicate.Competition(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(CompetitionToBuildInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, true, CompetitionToBuildTable, CompetitionToBuildColumn), - ) + step := newCompetitionToBuildStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -414,32 +265,15 @@ func HasCompetitionToBuildWith(preds ...predicate.Build) predicate.Competition { // And groups predicates with the AND operator between them. func And(predicates ...predicate.Competition) predicate.Competition { - return predicate.Competition(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Competition(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.Competition) predicate.Competition { - return predicate.Competition(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Competition(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.Competition) predicate.Competition { - return predicate.Competition(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.Competition(sql.NotPredicates(p)) } diff --git a/ent/competition_create.go b/ent/competition_create.go index fb828dea..1fc13805 100755 --- a/ent/competition_create.go +++ b/ent/competition_create.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -23,9 +23,9 @@ type CompetitionCreate struct { hooks []Hook } -// SetHclID sets the "hcl_id" field. -func (cc *CompetitionCreate) SetHclID(s string) *CompetitionCreate { - cc.mutation.SetHclID(s) +// SetHCLID sets the "hcl_id" field. +func (cc *CompetitionCreate) SetHCLID(s string) *CompetitionCreate { + cc.mutation.SetHCLID(s) return cc } @@ -117,44 +117,8 @@ func (cc *CompetitionCreate) Mutation() *CompetitionMutation { // Save creates the Competition in the database. func (cc *CompetitionCreate) Save(ctx context.Context) (*Competition, error) { - var ( - err error - node *Competition - ) cc.defaults() - if len(cc.hooks) == 0 { - if err = cc.check(); err != nil { - return nil, err - } - node, err = cc.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*CompetitionMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = cc.check(); err != nil { - return nil, err - } - cc.mutation = mutation - if node, err = cc.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(cc.hooks) - 1; i >= 0; i-- { - if cc.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = cc.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, cc.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, cc.sqlSave, cc.mutation, cc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -189,7 +153,7 @@ func (cc *CompetitionCreate) defaults() { // check runs all checks and user-defined validators on the builder. func (cc *CompetitionCreate) check() error { - if _, ok := cc.mutation.HclID(); !ok { + if _, ok := cc.mutation.HCLID(); !ok { return &ValidationError{Name: "hcl_id", err: errors.New(`ent: missing required field "Competition.hcl_id"`)} } if _, ok := cc.mutation.RootPassword(); !ok { @@ -205,10 +169,13 @@ func (cc *CompetitionCreate) check() error { } func (cc *CompetitionCreate) sqlSave(ctx context.Context) (*Competition, error) { + if err := cc.check(); err != nil { + return nil, err + } _node, _spec := cc.createSpec() if err := sqlgraph.CreateNode(ctx, cc.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } @@ -219,54 +186,34 @@ func (cc *CompetitionCreate) sqlSave(ctx context.Context) (*Competition, error) return nil, err } } + cc.mutation.id = &_node.ID + cc.mutation.done = true return _node, nil } func (cc *CompetitionCreate) createSpec() (*Competition, *sqlgraph.CreateSpec) { var ( _node = &Competition{config: cc.config} - _spec = &sqlgraph.CreateSpec{ - Table: competition.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: competition.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(competition.Table, sqlgraph.NewFieldSpec(competition.FieldID, field.TypeUUID)) ) if id, ok := cc.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } - if value, ok := cc.mutation.HclID(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: competition.FieldHclID, - }) - _node.HclID = value + if value, ok := cc.mutation.HCLID(); ok { + _spec.SetField(competition.FieldHCLID, field.TypeString, value) + _node.HCLID = value } if value, ok := cc.mutation.RootPassword(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: competition.FieldRootPassword, - }) + _spec.SetField(competition.FieldRootPassword, field.TypeString, value) _node.RootPassword = value } if value, ok := cc.mutation.Config(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: competition.FieldConfig, - }) + _spec.SetField(competition.FieldConfig, field.TypeJSON, value) _node.Config = value } if value, ok := cc.mutation.Tags(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: competition.FieldTags, - }) + _spec.SetField(competition.FieldTags, field.TypeJSON, value) _node.Tags = value } if nodes := cc.mutation.CompetitionToDNSIDs(); len(nodes) > 0 { @@ -277,10 +224,7 @@ func (cc *CompetitionCreate) createSpec() (*Competition, *sqlgraph.CreateSpec) { Columns: competition.CompetitionToDNSPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: dns.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(dns.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -296,10 +240,7 @@ func (cc *CompetitionCreate) createSpec() (*Competition, *sqlgraph.CreateSpec) { Columns: []string{competition.CompetitionToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -316,10 +257,7 @@ func (cc *CompetitionCreate) createSpec() (*Competition, *sqlgraph.CreateSpec) { Columns: []string{competition.CompetitionToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -333,11 +271,15 @@ func (cc *CompetitionCreate) createSpec() (*Competition, *sqlgraph.CreateSpec) { // CompetitionCreateBulk is the builder for creating many Competition entities in bulk. type CompetitionCreateBulk struct { config + err error builders []*CompetitionCreate } // Save creates the Competition entities in the database. func (ccb *CompetitionCreateBulk) Save(ctx context.Context) ([]*Competition, error) { + if ccb.err != nil { + return nil, ccb.err + } specs := make([]*sqlgraph.CreateSpec, len(ccb.builders)) nodes := make([]*Competition, len(ccb.builders)) mutators := make([]Mutator, len(ccb.builders)) @@ -354,8 +296,8 @@ func (ccb *CompetitionCreateBulk) Save(ctx context.Context) ([]*Competition, err return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, ccb.builders[i+1].mutation) } else { @@ -363,7 +305,7 @@ func (ccb *CompetitionCreateBulk) Save(ctx context.Context) ([]*Competition, err // Invoke the actual operation on the latest mutation in the chain. if err = sqlgraph.BatchCreate(ctx, ccb.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } } } diff --git a/ent/competition_delete.go b/ent/competition_delete.go index 9550b9cc..7d9220b6 100755 --- a/ent/competition_delete.go +++ b/ent/competition_delete.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (cd *CompetitionDelete) Where(ps ...predicate.Competition) *CompetitionDele // Exec executes the deletion query and returns how many vertices were deleted. func (cd *CompetitionDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(cd.hooks) == 0 { - affected, err = cd.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*CompetitionMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - cd.mutation = mutation - affected, err = cd.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(cd.hooks) - 1; i >= 0; i-- { - if cd.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = cd.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, cd.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, cd.sqlExec, cd.mutation, cd.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (cd *CompetitionDelete) ExecX(ctx context.Context) int { } func (cd *CompetitionDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: competition.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: competition.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(competition.Table, sqlgraph.NewFieldSpec(competition.FieldID, field.TypeUUID)) if ps := cd.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -84,7 +48,12 @@ func (cd *CompetitionDelete) sqlExec(ctx context.Context) (int, error) { } } } - return sqlgraph.DeleteNodes(ctx, cd.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, cd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + cd.mutation.done = true + return affected, err } // CompetitionDeleteOne is the builder for deleting a single Competition entity. @@ -92,6 +61,12 @@ type CompetitionDeleteOne struct { cd *CompetitionDelete } +// Where appends a list predicates to the CompetitionDelete builder. +func (cdo *CompetitionDeleteOne) Where(ps ...predicate.Competition) *CompetitionDeleteOne { + cdo.cd.mutation.Where(ps...) + return cdo +} + // Exec executes the deletion query. func (cdo *CompetitionDeleteOne) Exec(ctx context.Context) error { n, err := cdo.cd.Exec(ctx) @@ -107,5 +82,7 @@ func (cdo *CompetitionDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (cdo *CompetitionDeleteOne) ExecX(ctx context.Context) { - cdo.cd.ExecX(ctx) + if err := cdo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/ent/competition_query.go b/ent/competition_query.go index 2b34d60e..0603e86e 100755 --- a/ent/competition_query.go +++ b/ent/competition_query.go @@ -1,11 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" "database/sql/driver" - "errors" "fmt" "math" @@ -23,17 +22,18 @@ import ( // CompetitionQuery is the builder for querying Competition entities. type CompetitionQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string - predicates []predicate.Competition - // eager-loading edges. + ctx *QueryContext + order []competition.OrderOption + inters []Interceptor + predicates []predicate.Competition withCompetitionToDNS *DNSQuery withCompetitionToEnvironment *EnvironmentQuery withCompetitionToBuild *BuildQuery withFKs bool + modifiers []func(*sql.Selector) + loadTotal []func(context.Context, []*Competition) error + withNamedCompetitionToDNS map[string]*DNSQuery + withNamedCompetitionToBuild map[string]*BuildQuery // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -45,34 +45,34 @@ func (cq *CompetitionQuery) Where(ps ...predicate.Competition) *CompetitionQuery return cq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (cq *CompetitionQuery) Limit(limit int) *CompetitionQuery { - cq.limit = &limit + cq.ctx.Limit = &limit return cq } -// Offset adds an offset step to the query. +// Offset to start from. func (cq *CompetitionQuery) Offset(offset int) *CompetitionQuery { - cq.offset = &offset + cq.ctx.Offset = &offset return cq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (cq *CompetitionQuery) Unique(unique bool) *CompetitionQuery { - cq.unique = &unique + cq.ctx.Unique = &unique return cq } -// Order adds an order step to the query. -func (cq *CompetitionQuery) Order(o ...OrderFunc) *CompetitionQuery { +// Order specifies how the records should be ordered. +func (cq *CompetitionQuery) Order(o ...competition.OrderOption) *CompetitionQuery { cq.order = append(cq.order, o...) return cq } // QueryCompetitionToDNS chains the current query on the "CompetitionToDNS" edge. func (cq *CompetitionQuery) QueryCompetitionToDNS() *DNSQuery { - query := &DNSQuery{config: cq.config} + query := (&DNSClient{config: cq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := cq.prepareQuery(ctx); err != nil { return nil, err @@ -94,7 +94,7 @@ func (cq *CompetitionQuery) QueryCompetitionToDNS() *DNSQuery { // QueryCompetitionToEnvironment chains the current query on the "CompetitionToEnvironment" edge. func (cq *CompetitionQuery) QueryCompetitionToEnvironment() *EnvironmentQuery { - query := &EnvironmentQuery{config: cq.config} + query := (&EnvironmentClient{config: cq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := cq.prepareQuery(ctx); err != nil { return nil, err @@ -116,7 +116,7 @@ func (cq *CompetitionQuery) QueryCompetitionToEnvironment() *EnvironmentQuery { // QueryCompetitionToBuild chains the current query on the "CompetitionToBuild" edge. func (cq *CompetitionQuery) QueryCompetitionToBuild() *BuildQuery { - query := &BuildQuery{config: cq.config} + query := (&BuildClient{config: cq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := cq.prepareQuery(ctx); err != nil { return nil, err @@ -139,7 +139,7 @@ func (cq *CompetitionQuery) QueryCompetitionToBuild() *BuildQuery { // First returns the first Competition entity from the query. // Returns a *NotFoundError when no Competition was found. func (cq *CompetitionQuery) First(ctx context.Context) (*Competition, error) { - nodes, err := cq.Limit(1).All(ctx) + nodes, err := cq.Limit(1).All(setContextOp(ctx, cq.ctx, "First")) if err != nil { return nil, err } @@ -162,7 +162,7 @@ func (cq *CompetitionQuery) FirstX(ctx context.Context) *Competition { // Returns a *NotFoundError when no Competition ID was found. func (cq *CompetitionQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = cq.Limit(1).IDs(ctx); err != nil { + if ids, err = cq.Limit(1).IDs(setContextOp(ctx, cq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -185,7 +185,7 @@ func (cq *CompetitionQuery) FirstIDX(ctx context.Context) uuid.UUID { // Returns a *NotSingularError when more than one Competition entity is found. // Returns a *NotFoundError when no Competition entities are found. func (cq *CompetitionQuery) Only(ctx context.Context) (*Competition, error) { - nodes, err := cq.Limit(2).All(ctx) + nodes, err := cq.Limit(2).All(setContextOp(ctx, cq.ctx, "Only")) if err != nil { return nil, err } @@ -213,7 +213,7 @@ func (cq *CompetitionQuery) OnlyX(ctx context.Context) *Competition { // Returns a *NotFoundError when no entities are found. func (cq *CompetitionQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = cq.Limit(2).IDs(ctx); err != nil { + if ids, err = cq.Limit(2).IDs(setContextOp(ctx, cq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -238,10 +238,12 @@ func (cq *CompetitionQuery) OnlyIDX(ctx context.Context) uuid.UUID { // All executes the query and returns a list of Competitions. func (cq *CompetitionQuery) All(ctx context.Context) ([]*Competition, error) { + ctx = setContextOp(ctx, cq.ctx, "All") if err := cq.prepareQuery(ctx); err != nil { return nil, err } - return cq.sqlAll(ctx) + qr := querierAll[[]*Competition, *CompetitionQuery]() + return withInterceptors[[]*Competition](ctx, cq, qr, cq.inters) } // AllX is like All, but panics if an error occurs. @@ -254,9 +256,12 @@ func (cq *CompetitionQuery) AllX(ctx context.Context) []*Competition { } // IDs executes the query and returns a list of Competition IDs. -func (cq *CompetitionQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { - var ids []uuid.UUID - if err := cq.Select(competition.FieldID).Scan(ctx, &ids); err != nil { +func (cq *CompetitionQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if cq.ctx.Unique == nil && cq.path != nil { + cq.Unique(true) + } + ctx = setContextOp(ctx, cq.ctx, "IDs") + if err = cq.Select(competition.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -273,10 +278,11 @@ func (cq *CompetitionQuery) IDsX(ctx context.Context) []uuid.UUID { // Count returns the count of the given query. func (cq *CompetitionQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, cq.ctx, "Count") if err := cq.prepareQuery(ctx); err != nil { return 0, err } - return cq.sqlCount(ctx) + return withInterceptors[int](ctx, cq, querierCount[*CompetitionQuery](), cq.inters) } // CountX is like Count, but panics if an error occurs. @@ -290,10 +296,15 @@ func (cq *CompetitionQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (cq *CompetitionQuery) Exist(ctx context.Context) (bool, error) { - if err := cq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, cq.ctx, "Exist") + switch _, err := cq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return cq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -313,24 +324,23 @@ func (cq *CompetitionQuery) Clone() *CompetitionQuery { } return &CompetitionQuery{ config: cq.config, - limit: cq.limit, - offset: cq.offset, - order: append([]OrderFunc{}, cq.order...), + ctx: cq.ctx.Clone(), + order: append([]competition.OrderOption{}, cq.order...), + inters: append([]Interceptor{}, cq.inters...), predicates: append([]predicate.Competition{}, cq.predicates...), withCompetitionToDNS: cq.withCompetitionToDNS.Clone(), withCompetitionToEnvironment: cq.withCompetitionToEnvironment.Clone(), withCompetitionToBuild: cq.withCompetitionToBuild.Clone(), // clone intermediate query. - sql: cq.sql.Clone(), - path: cq.path, - unique: cq.unique, + sql: cq.sql.Clone(), + path: cq.path, } } // WithCompetitionToDNS tells the query-builder to eager-load the nodes that are connected to // the "CompetitionToDNS" edge. The optional arguments are used to configure the query builder of the edge. func (cq *CompetitionQuery) WithCompetitionToDNS(opts ...func(*DNSQuery)) *CompetitionQuery { - query := &DNSQuery{config: cq.config} + query := (&DNSClient{config: cq.config}).Query() for _, opt := range opts { opt(query) } @@ -341,7 +351,7 @@ func (cq *CompetitionQuery) WithCompetitionToDNS(opts ...func(*DNSQuery)) *Compe // WithCompetitionToEnvironment tells the query-builder to eager-load the nodes that are connected to // the "CompetitionToEnvironment" edge. The optional arguments are used to configure the query builder of the edge. func (cq *CompetitionQuery) WithCompetitionToEnvironment(opts ...func(*EnvironmentQuery)) *CompetitionQuery { - query := &EnvironmentQuery{config: cq.config} + query := (&EnvironmentClient{config: cq.config}).Query() for _, opt := range opts { opt(query) } @@ -352,7 +362,7 @@ func (cq *CompetitionQuery) WithCompetitionToEnvironment(opts ...func(*Environme // WithCompetitionToBuild tells the query-builder to eager-load the nodes that are connected to // the "CompetitionToBuild" edge. The optional arguments are used to configure the query builder of the edge. func (cq *CompetitionQuery) WithCompetitionToBuild(opts ...func(*BuildQuery)) *CompetitionQuery { - query := &BuildQuery{config: cq.config} + query := (&BuildClient{config: cq.config}).Query() for _, opt := range opts { opt(query) } @@ -366,25 +376,21 @@ func (cq *CompetitionQuery) WithCompetitionToBuild(opts ...func(*BuildQuery)) *C // Example: // // var v []struct { -// HclID string `json:"hcl_id,omitempty" hcl:"id,label"` +// HCLID string `json:"hcl_id,omitempty" hcl:"id,label"` // Count int `json:"count,omitempty"` // } // // client.Competition.Query(). -// GroupBy(competition.FieldHclID). +// GroupBy(competition.FieldHCLID). // Aggregate(ent.Count()). // Scan(ctx, &v) -// func (cq *CompetitionQuery) GroupBy(field string, fields ...string) *CompetitionGroupBy { - group := &CompetitionGroupBy{config: cq.config} - group.fields = append([]string{field}, fields...) - group.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := cq.prepareQuery(ctx); err != nil { - return nil, err - } - return cq.sqlQuery(ctx), nil - } - return group + cq.ctx.Fields = append([]string{field}, fields...) + grbuild := &CompetitionGroupBy{build: cq} + grbuild.flds = &cq.ctx.Fields + grbuild.label = competition.Label + grbuild.scan = grbuild.Scan + return grbuild } // Select allows the selection one or more fields/columns for the given query, @@ -393,20 +399,37 @@ func (cq *CompetitionQuery) GroupBy(field string, fields ...string) *Competition // Example: // // var v []struct { -// HclID string `json:"hcl_id,omitempty" hcl:"id,label"` +// HCLID string `json:"hcl_id,omitempty" hcl:"id,label"` // } // // client.Competition.Query(). -// Select(competition.FieldHclID). +// Select(competition.FieldHCLID). // Scan(ctx, &v) -// func (cq *CompetitionQuery) Select(fields ...string) *CompetitionSelect { - cq.fields = append(cq.fields, fields...) - return &CompetitionSelect{CompetitionQuery: cq} + cq.ctx.Fields = append(cq.ctx.Fields, fields...) + sbuild := &CompetitionSelect{CompetitionQuery: cq} + sbuild.label = competition.Label + sbuild.flds, sbuild.scan = &cq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a CompetitionSelect configured with the given aggregations. +func (cq *CompetitionQuery) Aggregate(fns ...AggregateFunc) *CompetitionSelect { + return cq.Select().Aggregate(fns...) } func (cq *CompetitionQuery) prepareQuery(ctx context.Context) error { - for _, f := range cq.fields { + for _, inter := range cq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, cq); err != nil { + return err + } + } + } + for _, f := range cq.ctx.Fields { if !competition.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -421,7 +444,7 @@ func (cq *CompetitionQuery) prepareQuery(ctx context.Context) error { return nil } -func (cq *CompetitionQuery) sqlAll(ctx context.Context) ([]*Competition, error) { +func (cq *CompetitionQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Competition, error) { var ( nodes = []*Competition{} withFKs = cq.withFKs @@ -438,186 +461,215 @@ func (cq *CompetitionQuery) sqlAll(ctx context.Context) ([]*Competition, error) if withFKs { _spec.Node.Columns = append(_spec.Node.Columns, competition.ForeignKeys...) } - _spec.ScanValues = func(columns []string) ([]interface{}, error) { + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Competition).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { node := &Competition{config: cq.config} nodes = append(nodes, node) - return node.scanValues(columns) - } - _spec.Assign = func(columns []string, values []interface{}) error { - if len(nodes) == 0 { - return fmt.Errorf("ent: Assign called without calling ScanValues") - } - node := nodes[len(nodes)-1] node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(cq.modifiers) > 0 { + _spec.Modifiers = cq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } if err := sqlgraph.QueryNodes(ctx, cq.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := cq.withCompetitionToDNS; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - ids := make(map[uuid.UUID]*Competition, len(nodes)) - for _, node := range nodes { - ids[node.ID] = node - fks = append(fks, node.ID) - node.Edges.CompetitionToDNS = []*DNS{} + if err := cq.loadCompetitionToDNS(ctx, query, nodes, + func(n *Competition) { n.Edges.CompetitionToDNS = []*DNS{} }, + func(n *Competition, e *DNS) { n.Edges.CompetitionToDNS = append(n.Edges.CompetitionToDNS, e) }); err != nil { + return nil, err } - var ( - edgeids []uuid.UUID - edges = make(map[uuid.UUID][]*Competition) - ) - _spec := &sqlgraph.EdgeQuerySpec{ - Edge: &sqlgraph.EdgeSpec{ - Inverse: false, - Table: competition.CompetitionToDNSTable, - Columns: competition.CompetitionToDNSPrimaryKey, - }, - Predicate: func(s *sql.Selector) { - s.Where(sql.InValues(competition.CompetitionToDNSPrimaryKey[0], fks...)) - }, - ScanValues: func() [2]interface{} { - return [2]interface{}{new(uuid.UUID), new(uuid.UUID)} - }, - Assign: func(out, in interface{}) error { - eout, ok := out.(*uuid.UUID) - if !ok || eout == nil { - return fmt.Errorf("unexpected id value for edge-out") - } - ein, ok := in.(*uuid.UUID) - if !ok || ein == nil { - return fmt.Errorf("unexpected id value for edge-in") - } - outValue := *eout - inValue := *ein - node, ok := ids[outValue] - if !ok { - return fmt.Errorf("unexpected node id in edges: %v", outValue) - } - if _, ok := edges[inValue]; !ok { - edgeids = append(edgeids, inValue) - } - edges[inValue] = append(edges[inValue], node) - return nil - }, + } + if query := cq.withCompetitionToEnvironment; query != nil { + if err := cq.loadCompetitionToEnvironment(ctx, query, nodes, nil, + func(n *Competition, e *Environment) { n.Edges.CompetitionToEnvironment = e }); err != nil { + return nil, err } - if err := sqlgraph.QueryEdges(ctx, cq.driver, _spec); err != nil { - return nil, fmt.Errorf(`query edges "CompetitionToDNS": %w`, err) + } + if query := cq.withCompetitionToBuild; query != nil { + if err := cq.loadCompetitionToBuild(ctx, query, nodes, + func(n *Competition) { n.Edges.CompetitionToBuild = []*Build{} }, + func(n *Competition, e *Build) { n.Edges.CompetitionToBuild = append(n.Edges.CompetitionToBuild, e) }); err != nil { + return nil, err } - query.Where(dns.IDIn(edgeids...)) - neighbors, err := query.All(ctx) - if err != nil { + } + for name, query := range cq.withNamedCompetitionToDNS { + if err := cq.loadCompetitionToDNS(ctx, query, nodes, + func(n *Competition) { n.appendNamedCompetitionToDNS(name) }, + func(n *Competition, e *DNS) { n.appendNamedCompetitionToDNS(name, e) }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := edges[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected "CompetitionToDNS" node returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.CompetitionToDNS = append(nodes[i].Edges.CompetitionToDNS, n) - } + } + for name, query := range cq.withNamedCompetitionToBuild { + if err := cq.loadCompetitionToBuild(ctx, query, nodes, + func(n *Competition) { n.appendNamedCompetitionToBuild(name) }, + func(n *Competition, e *Build) { n.appendNamedCompetitionToBuild(name, e) }); err != nil { + return nil, err + } + } + for i := range cq.loadTotal { + if err := cq.loadTotal[i](ctx, nodes); err != nil { + return nil, err } } + return nodes, nil +} - if query := cq.withCompetitionToEnvironment; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*Competition) - for i := range nodes { - if nodes[i].environment_environment_to_competition == nil { - continue +func (cq *CompetitionQuery) loadCompetitionToDNS(ctx context.Context, query *DNSQuery, nodes []*Competition, init func(*Competition), assign func(*Competition, *DNS)) error { + edgeIDs := make([]driver.Value, len(nodes)) + byID := make(map[uuid.UUID]*Competition) + nids := make(map[uuid.UUID]map[*Competition]struct{}) + for i, node := range nodes { + edgeIDs[i] = node.ID + byID[node.ID] = node + if init != nil { + init(node) + } + } + query.Where(func(s *sql.Selector) { + joinT := sql.Table(competition.CompetitionToDNSTable) + s.Join(joinT).On(s.C(dns.FieldID), joinT.C(competition.CompetitionToDNSPrimaryKey[1])) + s.Where(sql.InValues(joinT.C(competition.CompetitionToDNSPrimaryKey[0]), edgeIDs...)) + columns := s.SelectedColumns() + s.Select(joinT.C(competition.CompetitionToDNSPrimaryKey[0])) + s.AppendSelect(columns...) + s.SetDistinct(false) + }) + if err := query.prepareQuery(ctx); err != nil { + return err + } + qr := QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + return query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) { + assign := spec.Assign + values := spec.ScanValues + spec.ScanValues = func(columns []string) ([]any, error) { + values, err := values(columns[1:]) + if err != nil { + return nil, err + } + return append([]any{new(uuid.UUID)}, values...), nil } - fk := *nodes[i].environment_environment_to_competition - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) + spec.Assign = func(columns []string, values []any) error { + outValue := *values[0].(*uuid.UUID) + inValue := *values[1].(*uuid.UUID) + if nids[inValue] == nil { + nids[inValue] = map[*Competition]struct{}{byID[outValue]: {}} + return assign(columns[1:], values[1:]) + } + nids[inValue][byID[outValue]] = struct{}{} + return nil } - nodeids[fk] = append(nodeids[fk], nodes[i]) + }) + }) + neighbors, err := withInterceptors[[]*DNS](ctx, query, qr, query.inters) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nids[n.ID] + if !ok { + return fmt.Errorf(`unexpected "CompetitionToDNS" node returned %v`, n.ID) } - query.Where(environment.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + for kn := range nodes { + assign(kn, n) } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "environment_environment_to_competition" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.CompetitionToEnvironment = n - } + } + return nil +} +func (cq *CompetitionQuery) loadCompetitionToEnvironment(ctx context.Context, query *EnvironmentQuery, nodes []*Competition, init func(*Competition), assign func(*Competition, *Environment)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*Competition) + for i := range nodes { + if nodes[i].environment_environment_to_competition == nil { + continue + } + fk := *nodes[i].environment_environment_to_competition + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) } + nodeids[fk] = append(nodeids[fk], nodes[i]) } - - if query := cq.withCompetitionToBuild; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*Competition) + if len(ids) == 0 { + return nil + } + query.Where(environment.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "environment_environment_to_competition" returned %v`, n.ID) + } for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.CompetitionToBuild = []*Build{} + assign(nodes[i], n) } - query.withFKs = true - query.Where(predicate.Build(func(s *sql.Selector) { - s.Where(sql.InValues(competition.CompetitionToBuildColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + } + return nil +} +func (cq *CompetitionQuery) loadCompetitionToBuild(ctx context.Context, query *BuildQuery, nodes []*Competition, init func(*Competition), assign func(*Competition, *Build)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Competition) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) } - for _, n := range neighbors { - fk := n.build_build_to_competition - if fk == nil { - return nil, fmt.Errorf(`foreign-key "build_build_to_competition" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "build_build_to_competition" returned %v for node %v`, *fk, n.ID) - } - node.Edges.CompetitionToBuild = append(node.Edges.CompetitionToBuild, n) + } + query.withFKs = true + query.Where(predicate.Build(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(competition.CompetitionToBuildColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.build_build_to_competition + if fk == nil { + return fmt.Errorf(`foreign-key "build_build_to_competition" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "build_build_to_competition" returned %v for node %v`, *fk, n.ID) } + assign(node, n) } - - return nodes, nil + return nil } func (cq *CompetitionQuery) sqlCount(ctx context.Context) (int, error) { _spec := cq.querySpec() - _spec.Node.Columns = cq.fields - if len(cq.fields) > 0 { - _spec.Unique = cq.unique != nil && *cq.unique + if len(cq.modifiers) > 0 { + _spec.Modifiers = cq.modifiers } - return sqlgraph.CountNodes(ctx, cq.driver, _spec) -} - -func (cq *CompetitionQuery) sqlExist(ctx context.Context) (bool, error) { - n, err := cq.sqlCount(ctx) - if err != nil { - return false, fmt.Errorf("ent: check existence: %w", err) + _spec.Node.Columns = cq.ctx.Fields + if len(cq.ctx.Fields) > 0 { + _spec.Unique = cq.ctx.Unique != nil && *cq.ctx.Unique } - return n > 0, nil + return sqlgraph.CountNodes(ctx, cq.driver, _spec) } func (cq *CompetitionQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: competition.Table, - Columns: competition.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: competition.FieldID, - }, - }, - From: cq.sql, - Unique: true, - } - if unique := cq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(competition.Table, competition.Columns, sqlgraph.NewFieldSpec(competition.FieldID, field.TypeUUID)) + _spec.From = cq.sql + if unique := cq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if cq.path != nil { + _spec.Unique = true } - if fields := cq.fields; len(fields) > 0 { + if fields := cq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, competition.FieldID) for i := range fields { @@ -633,10 +685,10 @@ func (cq *CompetitionQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := cq.limit; limit != nil { + if limit := cq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := cq.offset; offset != nil { + if offset := cq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := cq.order; len(ps) > 0 { @@ -652,7 +704,7 @@ func (cq *CompetitionQuery) querySpec() *sqlgraph.QuerySpec { func (cq *CompetitionQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(cq.driver.Dialect()) t1 := builder.Table(competition.Table) - columns := cq.fields + columns := cq.ctx.Fields if len(columns) == 0 { columns = competition.Columns } @@ -661,7 +713,7 @@ func (cq *CompetitionQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = cq.sql selector.Select(selector.Columns(columns...)...) } - if cq.unique != nil && *cq.unique { + if cq.ctx.Unique != nil && *cq.ctx.Unique { selector.Distinct() } for _, p := range cq.predicates { @@ -670,498 +722,128 @@ func (cq *CompetitionQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range cq.order { p(selector) } - if offset := cq.offset; offset != nil { + if offset := cq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := cq.limit; limit != nil { + if limit := cq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector } -// CompetitionGroupBy is the group-by builder for Competition entities. -type CompetitionGroupBy struct { - config - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) -} - -// Aggregate adds the given aggregation functions to the group-by query. -func (cgb *CompetitionGroupBy) Aggregate(fns ...AggregateFunc) *CompetitionGroupBy { - cgb.fns = append(cgb.fns, fns...) - return cgb -} - -// Scan applies the group-by query and scans the result into the given value. -func (cgb *CompetitionGroupBy) Scan(ctx context.Context, v interface{}) error { - query, err := cgb.path(ctx) - if err != nil { - return err - } - cgb.sql = query - return cgb.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (cgb *CompetitionGroupBy) ScanX(ctx context.Context, v interface{}) { - if err := cgb.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from group-by. -// It is only allowed when executing a group-by query with one field. -func (cgb *CompetitionGroupBy) Strings(ctx context.Context) ([]string, error) { - if len(cgb.fields) > 1 { - return nil, errors.New("ent: CompetitionGroupBy.Strings is not achievable when grouping more than 1 field") - } - var v []string - if err := cgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (cgb *CompetitionGroupBy) StringsX(ctx context.Context) []string { - v, err := cgb.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (cgb *CompetitionGroupBy) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = cgb.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{competition.Label} - default: - err = fmt.Errorf("ent: CompetitionGroupBy.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (cgb *CompetitionGroupBy) StringX(ctx context.Context) string { - v, err := cgb.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from group-by. -// It is only allowed when executing a group-by query with one field. -func (cgb *CompetitionGroupBy) Ints(ctx context.Context) ([]int, error) { - if len(cgb.fields) > 1 { - return nil, errors.New("ent: CompetitionGroupBy.Ints is not achievable when grouping more than 1 field") - } - var v []int - if err := cgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (cgb *CompetitionGroupBy) IntsX(ctx context.Context) []int { - v, err := cgb.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (cgb *CompetitionGroupBy) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = cgb.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{competition.Label} - default: - err = fmt.Errorf("ent: CompetitionGroupBy.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (cgb *CompetitionGroupBy) IntX(ctx context.Context) int { - v, err := cgb.Int(ctx) - if err != nil { - panic(err) +// WithNamedCompetitionToDNS tells the query-builder to eager-load the nodes that are connected to the "CompetitionToDNS" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (cq *CompetitionQuery) WithNamedCompetitionToDNS(name string, opts ...func(*DNSQuery)) *CompetitionQuery { + query := (&DNSClient{config: cq.config}).Query() + for _, opt := range opts { + opt(query) } - return v -} - -// Float64s returns list of float64s from group-by. -// It is only allowed when executing a group-by query with one field. -func (cgb *CompetitionGroupBy) Float64s(ctx context.Context) ([]float64, error) { - if len(cgb.fields) > 1 { - return nil, errors.New("ent: CompetitionGroupBy.Float64s is not achievable when grouping more than 1 field") - } - var v []float64 - if err := cgb.Scan(ctx, &v); err != nil { - return nil, err + if cq.withNamedCompetitionToDNS == nil { + cq.withNamedCompetitionToDNS = make(map[string]*DNSQuery) } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (cgb *CompetitionGroupBy) Float64sX(ctx context.Context) []float64 { - v, err := cgb.Float64s(ctx) - if err != nil { - panic(err) - } - return v + cq.withNamedCompetitionToDNS[name] = query + return cq } -// Float64 returns a single float64 from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (cgb *CompetitionGroupBy) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = cgb.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{competition.Label} - default: - err = fmt.Errorf("ent: CompetitionGroupBy.Float64s returned %d results when one was expected", len(v)) +// WithNamedCompetitionToBuild tells the query-builder to eager-load the nodes that are connected to the "CompetitionToBuild" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (cq *CompetitionQuery) WithNamedCompetitionToBuild(name string, opts ...func(*BuildQuery)) *CompetitionQuery { + query := (&BuildClient{config: cq.config}).Query() + for _, opt := range opts { + opt(query) } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (cgb *CompetitionGroupBy) Float64X(ctx context.Context) float64 { - v, err := cgb.Float64(ctx) - if err != nil { - panic(err) + if cq.withNamedCompetitionToBuild == nil { + cq.withNamedCompetitionToBuild = make(map[string]*BuildQuery) } - return v + cq.withNamedCompetitionToBuild[name] = query + return cq } -// Bools returns list of bools from group-by. -// It is only allowed when executing a group-by query with one field. -func (cgb *CompetitionGroupBy) Bools(ctx context.Context) ([]bool, error) { - if len(cgb.fields) > 1 { - return nil, errors.New("ent: CompetitionGroupBy.Bools is not achievable when grouping more than 1 field") - } - var v []bool - if err := cgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil +// CompetitionGroupBy is the group-by builder for Competition entities. +type CompetitionGroupBy struct { + selector + build *CompetitionQuery } -// BoolsX is like Bools, but panics if an error occurs. -func (cgb *CompetitionGroupBy) BoolsX(ctx context.Context) []bool { - v, err := cgb.Bools(ctx) - if err != nil { - panic(err) - } - return v +// Aggregate adds the given aggregation functions to the group-by query. +func (cgb *CompetitionGroupBy) Aggregate(fns ...AggregateFunc) *CompetitionGroupBy { + cgb.fns = append(cgb.fns, fns...) + return cgb } -// Bool returns a single bool from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (cgb *CompetitionGroupBy) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = cgb.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{competition.Label} - default: - err = fmt.Errorf("ent: CompetitionGroupBy.Bools returned %d results when one was expected", len(v)) +// Scan applies the selector query and scans the result into the given value. +func (cgb *CompetitionGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, cgb.build.ctx, "GroupBy") + if err := cgb.build.prepareQuery(ctx); err != nil { + return err } - return + return scanWithInterceptors[*CompetitionQuery, *CompetitionGroupBy](ctx, cgb.build, cgb, cgb.build.inters, v) } -// BoolX is like Bool, but panics if an error occurs. -func (cgb *CompetitionGroupBy) BoolX(ctx context.Context) bool { - v, err := cgb.Bool(ctx) - if err != nil { - panic(err) +func (cgb *CompetitionGroupBy) sqlScan(ctx context.Context, root *CompetitionQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(cgb.fns)) + for _, fn := range cgb.fns { + aggregation = append(aggregation, fn(selector)) } - return v -} - -func (cgb *CompetitionGroupBy) sqlScan(ctx context.Context, v interface{}) error { - for _, f := range cgb.fields { - if !competition.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*cgb.flds)+len(cgb.fns)) + for _, f := range *cgb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := cgb.sqlQuery() + selector.GroupBy(selector.Columns(*cgb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := cgb.driver.Query(ctx, query, args, rows); err != nil { + if err := cgb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (cgb *CompetitionGroupBy) sqlQuery() *sql.Selector { - selector := cgb.sql.Select() - aggregation := make([]string, 0, len(cgb.fns)) - for _, fn := range cgb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(cgb.fields)+len(cgb.fns)) - for _, f := range cgb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(cgb.fields...)...) -} - // CompetitionSelect is the builder for selecting fields of Competition entities. type CompetitionSelect struct { *CompetitionQuery - // intermediate query (i.e. traversal path). - sql *sql.Selector + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (cs *CompetitionSelect) Aggregate(fns ...AggregateFunc) *CompetitionSelect { + cs.fns = append(cs.fns, fns...) + return cs } // Scan applies the selector query and scans the result into the given value. -func (cs *CompetitionSelect) Scan(ctx context.Context, v interface{}) error { +func (cs *CompetitionSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, cs.ctx, "Select") if err := cs.prepareQuery(ctx); err != nil { return err } - cs.sql = cs.CompetitionQuery.sqlQuery(ctx) - return cs.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (cs *CompetitionSelect) ScanX(ctx context.Context, v interface{}) { - if err := cs.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from a selector. It is only allowed when selecting one field. -func (cs *CompetitionSelect) Strings(ctx context.Context) ([]string, error) { - if len(cs.fields) > 1 { - return nil, errors.New("ent: CompetitionSelect.Strings is not achievable when selecting more than 1 field") - } - var v []string - if err := cs.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (cs *CompetitionSelect) StringsX(ctx context.Context) []string { - v, err := cs.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a selector. It is only allowed when selecting one field. -func (cs *CompetitionSelect) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = cs.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{competition.Label} - default: - err = fmt.Errorf("ent: CompetitionSelect.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (cs *CompetitionSelect) StringX(ctx context.Context) string { - v, err := cs.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from a selector. It is only allowed when selecting one field. -func (cs *CompetitionSelect) Ints(ctx context.Context) ([]int, error) { - if len(cs.fields) > 1 { - return nil, errors.New("ent: CompetitionSelect.Ints is not achievable when selecting more than 1 field") - } - var v []int - if err := cs.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil + return scanWithInterceptors[*CompetitionQuery, *CompetitionSelect](ctx, cs.CompetitionQuery, cs, cs.inters, v) } -// IntsX is like Ints, but panics if an error occurs. -func (cs *CompetitionSelect) IntsX(ctx context.Context) []int { - v, err := cs.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a selector. It is only allowed when selecting one field. -func (cs *CompetitionSelect) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = cs.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{competition.Label} - default: - err = fmt.Errorf("ent: CompetitionSelect.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (cs *CompetitionSelect) IntX(ctx context.Context) int { - v, err := cs.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. -func (cs *CompetitionSelect) Float64s(ctx context.Context) ([]float64, error) { - if len(cs.fields) > 1 { - return nil, errors.New("ent: CompetitionSelect.Float64s is not achievable when selecting more than 1 field") - } - var v []float64 - if err := cs.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (cs *CompetitionSelect) Float64sX(ctx context.Context) []float64 { - v, err := cs.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. -func (cs *CompetitionSelect) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = cs.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{competition.Label} - default: - err = fmt.Errorf("ent: CompetitionSelect.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (cs *CompetitionSelect) Float64X(ctx context.Context) float64 { - v, err := cs.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from a selector. It is only allowed when selecting one field. -func (cs *CompetitionSelect) Bools(ctx context.Context) ([]bool, error) { - if len(cs.fields) > 1 { - return nil, errors.New("ent: CompetitionSelect.Bools is not achievable when selecting more than 1 field") - } - var v []bool - if err := cs.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (cs *CompetitionSelect) BoolsX(ctx context.Context) []bool { - v, err := cs.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a selector. It is only allowed when selecting one field. -func (cs *CompetitionSelect) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = cs.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{competition.Label} - default: - err = fmt.Errorf("ent: CompetitionSelect.Bools returned %d results when one was expected", len(v)) +func (cs *CompetitionSelect) sqlScan(ctx context.Context, root *CompetitionQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(cs.fns)) + for _, fn := range cs.fns { + aggregation = append(aggregation, fn(selector)) } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (cs *CompetitionSelect) BoolX(ctx context.Context) bool { - v, err := cs.Bool(ctx) - if err != nil { - panic(err) + switch n := len(*cs.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) } - return v -} - -func (cs *CompetitionSelect) sqlScan(ctx context.Context, v interface{}) error { rows := &sql.Rows{} - query, args := cs.sql.Query() + query, args := selector.Query() if err := cs.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/ent/competition_update.go b/ent/competition_update.go index c2807c68..cb33a91b 100755 --- a/ent/competition_update.go +++ b/ent/competition_update.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -31,9 +31,17 @@ func (cu *CompetitionUpdate) Where(ps ...predicate.Competition) *CompetitionUpda return cu } -// SetHclID sets the "hcl_id" field. -func (cu *CompetitionUpdate) SetHclID(s string) *CompetitionUpdate { - cu.mutation.SetHclID(s) +// SetHCLID sets the "hcl_id" field. +func (cu *CompetitionUpdate) SetHCLID(s string) *CompetitionUpdate { + cu.mutation.SetHCLID(s) + return cu +} + +// SetNillableHCLID sets the "hcl_id" field if the given value is not nil. +func (cu *CompetitionUpdate) SetNillableHCLID(s *string) *CompetitionUpdate { + if s != nil { + cu.SetHCLID(*s) + } return cu } @@ -43,6 +51,14 @@ func (cu *CompetitionUpdate) SetRootPassword(s string) *CompetitionUpdate { return cu } +// SetNillableRootPassword sets the "root_password" field if the given value is not nil. +func (cu *CompetitionUpdate) SetNillableRootPassword(s *string) *CompetitionUpdate { + if s != nil { + cu.SetRootPassword(*s) + } + return cu +} + // SetConfig sets the "config" field. func (cu *CompetitionUpdate) SetConfig(m map[string]string) *CompetitionUpdate { cu.mutation.SetConfig(m) @@ -159,34 +175,7 @@ func (cu *CompetitionUpdate) RemoveCompetitionToBuild(b ...*Build) *CompetitionU // Save executes the query and returns the number of nodes affected by the update operation. func (cu *CompetitionUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(cu.hooks) == 0 { - affected, err = cu.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*CompetitionMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - cu.mutation = mutation - affected, err = cu.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(cu.hooks) - 1; i >= 0; i-- { - if cu.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = cu.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, cu.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, cu.sqlSave, cu.mutation, cu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -212,16 +201,7 @@ func (cu *CompetitionUpdate) ExecX(ctx context.Context) { } func (cu *CompetitionUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: competition.Table, - Columns: competition.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: competition.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(competition.Table, competition.Columns, sqlgraph.NewFieldSpec(competition.FieldID, field.TypeUUID)) if ps := cu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -229,33 +209,17 @@ func (cu *CompetitionUpdate) sqlSave(ctx context.Context) (n int, err error) { } } } - if value, ok := cu.mutation.HclID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: competition.FieldHclID, - }) + if value, ok := cu.mutation.HCLID(); ok { + _spec.SetField(competition.FieldHCLID, field.TypeString, value) } if value, ok := cu.mutation.RootPassword(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: competition.FieldRootPassword, - }) + _spec.SetField(competition.FieldRootPassword, field.TypeString, value) } if value, ok := cu.mutation.Config(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: competition.FieldConfig, - }) + _spec.SetField(competition.FieldConfig, field.TypeJSON, value) } if value, ok := cu.mutation.Tags(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: competition.FieldTags, - }) + _spec.SetField(competition.FieldTags, field.TypeJSON, value) } if cu.mutation.CompetitionToDNSCleared() { edge := &sqlgraph.EdgeSpec{ @@ -265,10 +229,7 @@ func (cu *CompetitionUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: competition.CompetitionToDNSPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: dns.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(dns.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -281,10 +242,7 @@ func (cu *CompetitionUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: competition.CompetitionToDNSPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: dns.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(dns.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -300,10 +258,7 @@ func (cu *CompetitionUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: competition.CompetitionToDNSPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: dns.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(dns.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -319,10 +274,7 @@ func (cu *CompetitionUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{competition.CompetitionToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -335,10 +287,7 @@ func (cu *CompetitionUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{competition.CompetitionToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -354,10 +303,7 @@ func (cu *CompetitionUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{competition.CompetitionToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -370,10 +316,7 @@ func (cu *CompetitionUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{competition.CompetitionToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -389,10 +332,7 @@ func (cu *CompetitionUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{competition.CompetitionToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -404,10 +344,11 @@ func (cu *CompetitionUpdate) sqlSave(ctx context.Context) (n int, err error) { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{competition.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return 0, err } + cu.mutation.done = true return n, nil } @@ -419,9 +360,17 @@ type CompetitionUpdateOne struct { mutation *CompetitionMutation } -// SetHclID sets the "hcl_id" field. -func (cuo *CompetitionUpdateOne) SetHclID(s string) *CompetitionUpdateOne { - cuo.mutation.SetHclID(s) +// SetHCLID sets the "hcl_id" field. +func (cuo *CompetitionUpdateOne) SetHCLID(s string) *CompetitionUpdateOne { + cuo.mutation.SetHCLID(s) + return cuo +} + +// SetNillableHCLID sets the "hcl_id" field if the given value is not nil. +func (cuo *CompetitionUpdateOne) SetNillableHCLID(s *string) *CompetitionUpdateOne { + if s != nil { + cuo.SetHCLID(*s) + } return cuo } @@ -431,6 +380,14 @@ func (cuo *CompetitionUpdateOne) SetRootPassword(s string) *CompetitionUpdateOne return cuo } +// SetNillableRootPassword sets the "root_password" field if the given value is not nil. +func (cuo *CompetitionUpdateOne) SetNillableRootPassword(s *string) *CompetitionUpdateOne { + if s != nil { + cuo.SetRootPassword(*s) + } + return cuo +} + // SetConfig sets the "config" field. func (cuo *CompetitionUpdateOne) SetConfig(m map[string]string) *CompetitionUpdateOne { cuo.mutation.SetConfig(m) @@ -545,6 +502,12 @@ func (cuo *CompetitionUpdateOne) RemoveCompetitionToBuild(b ...*Build) *Competit return cuo.RemoveCompetitionToBuildIDs(ids...) } +// Where appends a list predicates to the CompetitionUpdate builder. +func (cuo *CompetitionUpdateOne) Where(ps ...predicate.Competition) *CompetitionUpdateOne { + cuo.mutation.Where(ps...) + return cuo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (cuo *CompetitionUpdateOne) Select(field string, fields ...string) *CompetitionUpdateOne { @@ -554,34 +517,7 @@ func (cuo *CompetitionUpdateOne) Select(field string, fields ...string) *Competi // Save executes the query and returns the updated Competition entity. func (cuo *CompetitionUpdateOne) Save(ctx context.Context) (*Competition, error) { - var ( - err error - node *Competition - ) - if len(cuo.hooks) == 0 { - node, err = cuo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*CompetitionMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - cuo.mutation = mutation - node, err = cuo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(cuo.hooks) - 1; i >= 0; i-- { - if cuo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = cuo.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, cuo.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, cuo.sqlSave, cuo.mutation, cuo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -607,16 +543,7 @@ func (cuo *CompetitionUpdateOne) ExecX(ctx context.Context) { } func (cuo *CompetitionUpdateOne) sqlSave(ctx context.Context) (_node *Competition, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: competition.Table, - Columns: competition.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: competition.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(competition.Table, competition.Columns, sqlgraph.NewFieldSpec(competition.FieldID, field.TypeUUID)) id, ok := cuo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Competition.id" for update`)} @@ -641,33 +568,17 @@ func (cuo *CompetitionUpdateOne) sqlSave(ctx context.Context) (_node *Competitio } } } - if value, ok := cuo.mutation.HclID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: competition.FieldHclID, - }) + if value, ok := cuo.mutation.HCLID(); ok { + _spec.SetField(competition.FieldHCLID, field.TypeString, value) } if value, ok := cuo.mutation.RootPassword(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: competition.FieldRootPassword, - }) + _spec.SetField(competition.FieldRootPassword, field.TypeString, value) } if value, ok := cuo.mutation.Config(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: competition.FieldConfig, - }) + _spec.SetField(competition.FieldConfig, field.TypeJSON, value) } if value, ok := cuo.mutation.Tags(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: competition.FieldTags, - }) + _spec.SetField(competition.FieldTags, field.TypeJSON, value) } if cuo.mutation.CompetitionToDNSCleared() { edge := &sqlgraph.EdgeSpec{ @@ -677,10 +588,7 @@ func (cuo *CompetitionUpdateOne) sqlSave(ctx context.Context) (_node *Competitio Columns: competition.CompetitionToDNSPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: dns.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(dns.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -693,10 +601,7 @@ func (cuo *CompetitionUpdateOne) sqlSave(ctx context.Context) (_node *Competitio Columns: competition.CompetitionToDNSPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: dns.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(dns.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -712,10 +617,7 @@ func (cuo *CompetitionUpdateOne) sqlSave(ctx context.Context) (_node *Competitio Columns: competition.CompetitionToDNSPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: dns.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(dns.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -731,10 +633,7 @@ func (cuo *CompetitionUpdateOne) sqlSave(ctx context.Context) (_node *Competitio Columns: []string{competition.CompetitionToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -747,10 +646,7 @@ func (cuo *CompetitionUpdateOne) sqlSave(ctx context.Context) (_node *Competitio Columns: []string{competition.CompetitionToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -766,10 +662,7 @@ func (cuo *CompetitionUpdateOne) sqlSave(ctx context.Context) (_node *Competitio Columns: []string{competition.CompetitionToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -782,10 +675,7 @@ func (cuo *CompetitionUpdateOne) sqlSave(ctx context.Context) (_node *Competitio Columns: []string{competition.CompetitionToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -801,10 +691,7 @@ func (cuo *CompetitionUpdateOne) sqlSave(ctx context.Context) (_node *Competitio Columns: []string{competition.CompetitionToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -819,9 +706,10 @@ func (cuo *CompetitionUpdateOne) sqlSave(ctx context.Context) (_node *Competitio if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{competition.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } + cuo.mutation.done = true return _node, nil } diff --git a/ent/config.go b/ent/config.go deleted file mode 100755 index b11fd14f..00000000 --- a/ent/config.go +++ /dev/null @@ -1,95 +0,0 @@ -// Code generated by entc, DO NOT EDIT. - -package ent - -import ( - "entgo.io/ent" - "entgo.io/ent/dialect" -) - -// Option function to configure the client. -type Option func(*config) - -// Config is the configuration for the client and its builder. -type config struct { - // driver used for executing database requests. - driver dialect.Driver - // debug enable a debug logging. - debug bool - // log used for logging on debug mode. - log func(...interface{}) - // hooks to execute on mutations. - hooks *hooks -} - -// hooks per client, for fast access. -type hooks struct { - AdhocPlan []ent.Hook - AgentStatus []ent.Hook - AgentTask []ent.Hook - Ansible []ent.Hook - AuthUser []ent.Hook - Build []ent.Hook - BuildCommit []ent.Hook - Command []ent.Hook - Competition []ent.Hook - DNS []ent.Hook - DNSRecord []ent.Hook - Disk []ent.Hook - Environment []ent.Hook - FileDelete []ent.Hook - FileDownload []ent.Hook - FileExtract []ent.Hook - Finding []ent.Hook - GinFileMiddleware []ent.Hook - Host []ent.Hook - HostDependency []ent.Hook - Identity []ent.Hook - IncludedNetwork []ent.Hook - Network []ent.Hook - Plan []ent.Hook - PlanDiff []ent.Hook - ProvisionedHost []ent.Hook - ProvisionedNetwork []ent.Hook - ProvisioningStep []ent.Hook - RepoCommit []ent.Hook - Repository []ent.Hook - Script []ent.Hook - ServerTask []ent.Hook - Status []ent.Hook - Tag []ent.Hook - Team []ent.Hook - Token []ent.Hook - User []ent.Hook -} - -// Options applies the options on the config object. -func (c *config) options(opts ...Option) { - for _, opt := range opts { - opt(c) - } - if c.debug { - c.driver = dialect.Debug(c.driver, c.log) - } -} - -// Debug enables debug logging on the ent.Driver. -func Debug() Option { - return func(c *config) { - c.debug = true - } -} - -// Log sets the logging function for debug mode. -func Log(fn func(...interface{})) Option { - return func(c *config) { - c.log = fn - } -} - -// Driver configures the client driver. -func Driver(driver dialect.Driver) Option { - return func(c *config) { - c.driver = driver - } -} diff --git a/ent/context.go b/ent/context.go deleted file mode 100755 index 08407261..00000000 --- a/ent/context.go +++ /dev/null @@ -1,33 +0,0 @@ -// Code generated by entc, DO NOT EDIT. - -package ent - -import ( - "context" -) - -type clientCtxKey struct{} - -// FromContext returns a Client stored inside a context, or nil if there isn't one. -func FromContext(ctx context.Context) *Client { - c, _ := ctx.Value(clientCtxKey{}).(*Client) - return c -} - -// NewContext returns a new context with the given Client attached. -func NewContext(parent context.Context, c *Client) context.Context { - return context.WithValue(parent, clientCtxKey{}, c) -} - -type txCtxKey struct{} - -// TxFromContext returns a Tx stored inside a context, or nil if there isn't one. -func TxFromContext(ctx context.Context) *Tx { - tx, _ := ctx.Value(txCtxKey{}).(*Tx) - return tx -} - -// NewTxContext returns a new context with the given Tx attached. -func NewTxContext(parent context.Context, tx *Tx) context.Context { - return context.WithValue(parent, txCtxKey{}, tx) -} diff --git a/ent/disk.go b/ent/disk.go index c04bdb88..115ee472 100755 --- a/ent/disk.go +++ b/ent/disk.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -6,6 +6,7 @@ import ( "fmt" "strings" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/gen0cide/laforge/ent/disk" "github.com/gen0cide/laforge/ent/host" @@ -23,11 +24,13 @@ type Disk struct { // The values are being populated by the DiskQuery when eager-loading is set. Edges DiskEdges `json:"edges"` + // vvvvvvvvvvvv CUSTOM vvvvvvvvvvvv // Edges put into the main struct to be loaded via hcl // DiskToHost holds the value of the DiskToHost edge. HCLDiskToHost *Host `json:"DiskToHost,omitempty"` - // + // ^^^^^^^^^^^^ CUSTOM ^^^^^^^^^^^^^ host_host_to_disk *uuid.UUID + selectValues sql.SelectValues } // DiskEdges holds the relations/edges for other nodes in the graph. @@ -37,6 +40,8 @@ type DiskEdges struct { // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. loadedTypes [1]bool + // totalCount holds the count of the edges above. + totalCount [1]map[string]int } // DiskToHostOrErr returns the DiskToHost value or an error if the edge @@ -44,8 +49,7 @@ type DiskEdges struct { func (e DiskEdges) DiskToHostOrErr() (*Host, error) { if e.loadedTypes[0] { if e.DiskToHost == nil { - // The edge DiskToHost was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: host.Label} } return e.DiskToHost, nil @@ -54,8 +58,8 @@ func (e DiskEdges) DiskToHostOrErr() (*Host, error) { } // scanValues returns the types for scanning values from sql.Rows. -func (*Disk) scanValues(columns []string) ([]interface{}, error) { - values := make([]interface{}, len(columns)) +func (*Disk) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) for i := range columns { switch columns[i] { case disk.FieldSize: @@ -65,7 +69,7 @@ func (*Disk) scanValues(columns []string) ([]interface{}, error) { case disk.ForeignKeys[0]: // host_host_to_disk values[i] = &sql.NullScanner{S: new(uuid.UUID)} default: - return nil, fmt.Errorf("unexpected column %q for type Disk", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -73,7 +77,7 @@ func (*Disk) scanValues(columns []string) ([]interface{}, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the Disk fields. -func (d *Disk) assignValues(columns []string, values []interface{}) error { +func (d *Disk) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -98,31 +102,39 @@ func (d *Disk) assignValues(columns []string, values []interface{}) error { d.host_host_to_disk = new(uuid.UUID) *d.host_host_to_disk = *value.S.(*uuid.UUID) } + default: + d.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the Disk. +// This includes values selected through modifiers, order, etc. +func (d *Disk) Value(name string) (ent.Value, error) { + return d.selectValues.Get(name) +} + // QueryDiskToHost queries the "DiskToHost" edge of the Disk entity. func (d *Disk) QueryDiskToHost() *HostQuery { - return (&DiskClient{config: d.config}).QueryDiskToHost(d) + return NewDiskClient(d.config).QueryDiskToHost(d) } // Update returns a builder for updating this Disk. // Note that you need to call Disk.Unwrap() before calling this method if this Disk // was returned from a transaction, and the transaction was committed or rolled back. func (d *Disk) Update() *DiskUpdateOne { - return (&DiskClient{config: d.config}).UpdateOne(d) + return NewDiskClient(d.config).UpdateOne(d) } // Unwrap unwraps the Disk entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. func (d *Disk) Unwrap() *Disk { - tx, ok := d.config.driver.(*txDriver) + _tx, ok := d.config.driver.(*txDriver) if !ok { panic("ent: Disk is not a transactional entity") } - d.config.driver = tx.drv + d.config.driver = _tx.drv return d } @@ -130,8 +142,8 @@ func (d *Disk) Unwrap() *Disk { func (d *Disk) String() string { var builder strings.Builder builder.WriteString("Disk(") - builder.WriteString(fmt.Sprintf("id=%v", d.ID)) - builder.WriteString(", size=") + builder.WriteString(fmt.Sprintf("id=%v, ", d.ID)) + builder.WriteString("size=") builder.WriteString(fmt.Sprintf("%v", d.Size)) builder.WriteByte(')') return builder.String() @@ -139,9 +151,3 @@ func (d *Disk) String() string { // Disks is a parsable slice of Disk. type Disks []*Disk - -func (d Disks) config(cfg config) { - for _i := range d { - d[_i].config = cfg - } -} diff --git a/ent/disk/disk.go b/ent/disk/disk.go index 2b3da4fe..21a2a996 100755 --- a/ent/disk/disk.go +++ b/ent/disk/disk.go @@ -1,8 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package disk import ( + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -59,3 +61,30 @@ var ( // DefaultID holds the default value on creation for the "id" field. DefaultID func() uuid.UUID ) + +// OrderOption defines the ordering options for the Disk queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// BySize orders the results by the size field. +func BySize(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSize, opts...).ToFunc() +} + +// ByDiskToHostField orders the results by DiskToHost field. +func ByDiskToHostField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newDiskToHostStep(), sql.OrderByField(field, opts...)) + } +} +func newDiskToHostStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(DiskToHostInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2O, true, DiskToHostTable, DiskToHostColumn), + ) +} diff --git a/ent/disk/where.go b/ent/disk/where.go index 69fa6379..840da289 100755 --- a/ent/disk/where.go +++ b/ent/disk/where.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package disk @@ -11,168 +11,92 @@ import ( // ID filters vertices based on their ID field. func ID(id uuid.UUID) predicate.Disk { - return predicate.Disk(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Disk(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id uuid.UUID) predicate.Disk { - return predicate.Disk(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Disk(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id uuid.UUID) predicate.Disk { - return predicate.Disk(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.Disk(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...uuid.UUID) predicate.Disk { - return predicate.Disk(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.Disk(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...uuid.UUID) predicate.Disk { - return predicate.Disk(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.Disk(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id uuid.UUID) predicate.Disk { - return predicate.Disk(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.Disk(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id uuid.UUID) predicate.Disk { - return predicate.Disk(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.Disk(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id uuid.UUID) predicate.Disk { - return predicate.Disk(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.Disk(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id uuid.UUID) predicate.Disk { - return predicate.Disk(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.Disk(sql.FieldLTE(FieldID, id)) } // Size applies equality check predicate on the "size" field. It's identical to SizeEQ. func Size(v int) predicate.Disk { - return predicate.Disk(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSize), v)) - }) + return predicate.Disk(sql.FieldEQ(FieldSize, v)) } // SizeEQ applies the EQ predicate on the "size" field. func SizeEQ(v int) predicate.Disk { - return predicate.Disk(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSize), v)) - }) + return predicate.Disk(sql.FieldEQ(FieldSize, v)) } // SizeNEQ applies the NEQ predicate on the "size" field. func SizeNEQ(v int) predicate.Disk { - return predicate.Disk(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldSize), v)) - }) + return predicate.Disk(sql.FieldNEQ(FieldSize, v)) } // SizeIn applies the In predicate on the "size" field. func SizeIn(vs ...int) predicate.Disk { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Disk(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldSize), v...)) - }) + return predicate.Disk(sql.FieldIn(FieldSize, vs...)) } // SizeNotIn applies the NotIn predicate on the "size" field. func SizeNotIn(vs ...int) predicate.Disk { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Disk(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldSize), v...)) - }) + return predicate.Disk(sql.FieldNotIn(FieldSize, vs...)) } // SizeGT applies the GT predicate on the "size" field. func SizeGT(v int) predicate.Disk { - return predicate.Disk(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldSize), v)) - }) + return predicate.Disk(sql.FieldGT(FieldSize, v)) } // SizeGTE applies the GTE predicate on the "size" field. func SizeGTE(v int) predicate.Disk { - return predicate.Disk(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldSize), v)) - }) + return predicate.Disk(sql.FieldGTE(FieldSize, v)) } // SizeLT applies the LT predicate on the "size" field. func SizeLT(v int) predicate.Disk { - return predicate.Disk(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldSize), v)) - }) + return predicate.Disk(sql.FieldLT(FieldSize, v)) } // SizeLTE applies the LTE predicate on the "size" field. func SizeLTE(v int) predicate.Disk { - return predicate.Disk(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldSize), v)) - }) + return predicate.Disk(sql.FieldLTE(FieldSize, v)) } // HasDiskToHost applies the HasEdge predicate on the "DiskToHost" edge. @@ -180,7 +104,6 @@ func HasDiskToHost() predicate.Disk { return predicate.Disk(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(DiskToHostTable, FieldID), sqlgraph.Edge(sqlgraph.O2O, true, DiskToHostTable, DiskToHostColumn), ) sqlgraph.HasNeighbors(s, step) @@ -190,11 +113,7 @@ func HasDiskToHost() predicate.Disk { // HasDiskToHostWith applies the HasEdge predicate on the "DiskToHost" edge with a given conditions (other predicates). func HasDiskToHostWith(preds ...predicate.Host) predicate.Disk { return predicate.Disk(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(DiskToHostInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2O, true, DiskToHostTable, DiskToHostColumn), - ) + step := newDiskToHostStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -205,32 +124,15 @@ func HasDiskToHostWith(preds ...predicate.Host) predicate.Disk { // And groups predicates with the AND operator between them. func And(predicates ...predicate.Disk) predicate.Disk { - return predicate.Disk(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Disk(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.Disk) predicate.Disk { - return predicate.Disk(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Disk(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.Disk) predicate.Disk { - return predicate.Disk(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.Disk(sql.NotPredicates(p)) } diff --git a/ent/disk_create.go b/ent/disk_create.go index 80410839..79518920 100755 --- a/ent/disk_create.go +++ b/ent/disk_create.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -67,44 +67,8 @@ func (dc *DiskCreate) Mutation() *DiskMutation { // Save creates the Disk in the database. func (dc *DiskCreate) Save(ctx context.Context) (*Disk, error) { - var ( - err error - node *Disk - ) dc.defaults() - if len(dc.hooks) == 0 { - if err = dc.check(); err != nil { - return nil, err - } - node, err = dc.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*DiskMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = dc.check(); err != nil { - return nil, err - } - dc.mutation = mutation - if node, err = dc.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(dc.hooks) - 1; i >= 0; i-- { - if dc.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = dc.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, dc.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, dc.sqlSave, dc.mutation, dc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -151,10 +115,13 @@ func (dc *DiskCreate) check() error { } func (dc *DiskCreate) sqlSave(ctx context.Context) (*Disk, error) { + if err := dc.check(); err != nil { + return nil, err + } _node, _spec := dc.createSpec() if err := sqlgraph.CreateNode(ctx, dc.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } @@ -165,30 +132,22 @@ func (dc *DiskCreate) sqlSave(ctx context.Context) (*Disk, error) { return nil, err } } + dc.mutation.id = &_node.ID + dc.mutation.done = true return _node, nil } func (dc *DiskCreate) createSpec() (*Disk, *sqlgraph.CreateSpec) { var ( _node = &Disk{config: dc.config} - _spec = &sqlgraph.CreateSpec{ - Table: disk.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: disk.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(disk.Table, sqlgraph.NewFieldSpec(disk.FieldID, field.TypeUUID)) ) if id, ok := dc.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } if value, ok := dc.mutation.Size(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: disk.FieldSize, - }) + _spec.SetField(disk.FieldSize, field.TypeInt, value) _node.Size = value } if nodes := dc.mutation.DiskToHostIDs(); len(nodes) > 0 { @@ -199,10 +158,7 @@ func (dc *DiskCreate) createSpec() (*Disk, *sqlgraph.CreateSpec) { Columns: []string{disk.DiskToHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: host.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(host.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -217,11 +173,15 @@ func (dc *DiskCreate) createSpec() (*Disk, *sqlgraph.CreateSpec) { // DiskCreateBulk is the builder for creating many Disk entities in bulk. type DiskCreateBulk struct { config + err error builders []*DiskCreate } // Save creates the Disk entities in the database. func (dcb *DiskCreateBulk) Save(ctx context.Context) ([]*Disk, error) { + if dcb.err != nil { + return nil, dcb.err + } specs := make([]*sqlgraph.CreateSpec, len(dcb.builders)) nodes := make([]*Disk, len(dcb.builders)) mutators := make([]Mutator, len(dcb.builders)) @@ -238,8 +198,8 @@ func (dcb *DiskCreateBulk) Save(ctx context.Context) ([]*Disk, error) { return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, dcb.builders[i+1].mutation) } else { @@ -247,7 +207,7 @@ func (dcb *DiskCreateBulk) Save(ctx context.Context) ([]*Disk, error) { // Invoke the actual operation on the latest mutation in the chain. if err = sqlgraph.BatchCreate(ctx, dcb.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } } } diff --git a/ent/disk_delete.go b/ent/disk_delete.go index c39e306d..4234e6f7 100755 --- a/ent/disk_delete.go +++ b/ent/disk_delete.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (dd *DiskDelete) Where(ps ...predicate.Disk) *DiskDelete { // Exec executes the deletion query and returns how many vertices were deleted. func (dd *DiskDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(dd.hooks) == 0 { - affected, err = dd.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*DiskMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - dd.mutation = mutation - affected, err = dd.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(dd.hooks) - 1; i >= 0; i-- { - if dd.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = dd.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, dd.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, dd.sqlExec, dd.mutation, dd.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (dd *DiskDelete) ExecX(ctx context.Context) int { } func (dd *DiskDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: disk.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: disk.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(disk.Table, sqlgraph.NewFieldSpec(disk.FieldID, field.TypeUUID)) if ps := dd.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -84,7 +48,12 @@ func (dd *DiskDelete) sqlExec(ctx context.Context) (int, error) { } } } - return sqlgraph.DeleteNodes(ctx, dd.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, dd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + dd.mutation.done = true + return affected, err } // DiskDeleteOne is the builder for deleting a single Disk entity. @@ -92,6 +61,12 @@ type DiskDeleteOne struct { dd *DiskDelete } +// Where appends a list predicates to the DiskDelete builder. +func (ddo *DiskDeleteOne) Where(ps ...predicate.Disk) *DiskDeleteOne { + ddo.dd.mutation.Where(ps...) + return ddo +} + // Exec executes the deletion query. func (ddo *DiskDeleteOne) Exec(ctx context.Context) error { n, err := ddo.dd.Exec(ctx) @@ -107,5 +82,7 @@ func (ddo *DiskDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (ddo *DiskDeleteOne) ExecX(ctx context.Context) { - ddo.dd.ExecX(ctx) + if err := ddo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/ent/disk_query.go b/ent/disk_query.go index 42352136..e2371535 100755 --- a/ent/disk_query.go +++ b/ent/disk_query.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "errors" "fmt" "math" @@ -20,15 +19,14 @@ import ( // DiskQuery is the builder for querying Disk entities. type DiskQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string - predicates []predicate.Disk - // eager-loading edges. + ctx *QueryContext + order []disk.OrderOption + inters []Interceptor + predicates []predicate.Disk withDiskToHost *HostQuery withFKs bool + modifiers []func(*sql.Selector) + loadTotal []func(context.Context, []*Disk) error // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -40,34 +38,34 @@ func (dq *DiskQuery) Where(ps ...predicate.Disk) *DiskQuery { return dq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (dq *DiskQuery) Limit(limit int) *DiskQuery { - dq.limit = &limit + dq.ctx.Limit = &limit return dq } -// Offset adds an offset step to the query. +// Offset to start from. func (dq *DiskQuery) Offset(offset int) *DiskQuery { - dq.offset = &offset + dq.ctx.Offset = &offset return dq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (dq *DiskQuery) Unique(unique bool) *DiskQuery { - dq.unique = &unique + dq.ctx.Unique = &unique return dq } -// Order adds an order step to the query. -func (dq *DiskQuery) Order(o ...OrderFunc) *DiskQuery { +// Order specifies how the records should be ordered. +func (dq *DiskQuery) Order(o ...disk.OrderOption) *DiskQuery { dq.order = append(dq.order, o...) return dq } // QueryDiskToHost chains the current query on the "DiskToHost" edge. func (dq *DiskQuery) QueryDiskToHost() *HostQuery { - query := &HostQuery{config: dq.config} + query := (&HostClient{config: dq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := dq.prepareQuery(ctx); err != nil { return nil, err @@ -90,7 +88,7 @@ func (dq *DiskQuery) QueryDiskToHost() *HostQuery { // First returns the first Disk entity from the query. // Returns a *NotFoundError when no Disk was found. func (dq *DiskQuery) First(ctx context.Context) (*Disk, error) { - nodes, err := dq.Limit(1).All(ctx) + nodes, err := dq.Limit(1).All(setContextOp(ctx, dq.ctx, "First")) if err != nil { return nil, err } @@ -113,7 +111,7 @@ func (dq *DiskQuery) FirstX(ctx context.Context) *Disk { // Returns a *NotFoundError when no Disk ID was found. func (dq *DiskQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = dq.Limit(1).IDs(ctx); err != nil { + if ids, err = dq.Limit(1).IDs(setContextOp(ctx, dq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -136,7 +134,7 @@ func (dq *DiskQuery) FirstIDX(ctx context.Context) uuid.UUID { // Returns a *NotSingularError when more than one Disk entity is found. // Returns a *NotFoundError when no Disk entities are found. func (dq *DiskQuery) Only(ctx context.Context) (*Disk, error) { - nodes, err := dq.Limit(2).All(ctx) + nodes, err := dq.Limit(2).All(setContextOp(ctx, dq.ctx, "Only")) if err != nil { return nil, err } @@ -164,7 +162,7 @@ func (dq *DiskQuery) OnlyX(ctx context.Context) *Disk { // Returns a *NotFoundError when no entities are found. func (dq *DiskQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = dq.Limit(2).IDs(ctx); err != nil { + if ids, err = dq.Limit(2).IDs(setContextOp(ctx, dq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -189,10 +187,12 @@ func (dq *DiskQuery) OnlyIDX(ctx context.Context) uuid.UUID { // All executes the query and returns a list of Disks. func (dq *DiskQuery) All(ctx context.Context) ([]*Disk, error) { + ctx = setContextOp(ctx, dq.ctx, "All") if err := dq.prepareQuery(ctx); err != nil { return nil, err } - return dq.sqlAll(ctx) + qr := querierAll[[]*Disk, *DiskQuery]() + return withInterceptors[[]*Disk](ctx, dq, qr, dq.inters) } // AllX is like All, but panics if an error occurs. @@ -205,9 +205,12 @@ func (dq *DiskQuery) AllX(ctx context.Context) []*Disk { } // IDs executes the query and returns a list of Disk IDs. -func (dq *DiskQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { - var ids []uuid.UUID - if err := dq.Select(disk.FieldID).Scan(ctx, &ids); err != nil { +func (dq *DiskQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if dq.ctx.Unique == nil && dq.path != nil { + dq.Unique(true) + } + ctx = setContextOp(ctx, dq.ctx, "IDs") + if err = dq.Select(disk.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -224,10 +227,11 @@ func (dq *DiskQuery) IDsX(ctx context.Context) []uuid.UUID { // Count returns the count of the given query. func (dq *DiskQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, dq.ctx, "Count") if err := dq.prepareQuery(ctx); err != nil { return 0, err } - return dq.sqlCount(ctx) + return withInterceptors[int](ctx, dq, querierCount[*DiskQuery](), dq.inters) } // CountX is like Count, but panics if an error occurs. @@ -241,10 +245,15 @@ func (dq *DiskQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (dq *DiskQuery) Exist(ctx context.Context) (bool, error) { - if err := dq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, dq.ctx, "Exist") + switch _, err := dq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return dq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -264,22 +273,21 @@ func (dq *DiskQuery) Clone() *DiskQuery { } return &DiskQuery{ config: dq.config, - limit: dq.limit, - offset: dq.offset, - order: append([]OrderFunc{}, dq.order...), + ctx: dq.ctx.Clone(), + order: append([]disk.OrderOption{}, dq.order...), + inters: append([]Interceptor{}, dq.inters...), predicates: append([]predicate.Disk{}, dq.predicates...), withDiskToHost: dq.withDiskToHost.Clone(), // clone intermediate query. - sql: dq.sql.Clone(), - path: dq.path, - unique: dq.unique, + sql: dq.sql.Clone(), + path: dq.path, } } // WithDiskToHost tells the query-builder to eager-load the nodes that are connected to // the "DiskToHost" edge. The optional arguments are used to configure the query builder of the edge. func (dq *DiskQuery) WithDiskToHost(opts ...func(*HostQuery)) *DiskQuery { - query := &HostQuery{config: dq.config} + query := (&HostClient{config: dq.config}).Query() for _, opt := range opts { opt(query) } @@ -301,17 +309,13 @@ func (dq *DiskQuery) WithDiskToHost(opts ...func(*HostQuery)) *DiskQuery { // GroupBy(disk.FieldSize). // Aggregate(ent.Count()). // Scan(ctx, &v) -// func (dq *DiskQuery) GroupBy(field string, fields ...string) *DiskGroupBy { - group := &DiskGroupBy{config: dq.config} - group.fields = append([]string{field}, fields...) - group.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := dq.prepareQuery(ctx); err != nil { - return nil, err - } - return dq.sqlQuery(ctx), nil - } - return group + dq.ctx.Fields = append([]string{field}, fields...) + grbuild := &DiskGroupBy{build: dq} + grbuild.flds = &dq.ctx.Fields + grbuild.label = disk.Label + grbuild.scan = grbuild.Scan + return grbuild } // Select allows the selection one or more fields/columns for the given query, @@ -326,14 +330,31 @@ func (dq *DiskQuery) GroupBy(field string, fields ...string) *DiskGroupBy { // client.Disk.Query(). // Select(disk.FieldSize). // Scan(ctx, &v) -// func (dq *DiskQuery) Select(fields ...string) *DiskSelect { - dq.fields = append(dq.fields, fields...) - return &DiskSelect{DiskQuery: dq} + dq.ctx.Fields = append(dq.ctx.Fields, fields...) + sbuild := &DiskSelect{DiskQuery: dq} + sbuild.label = disk.Label + sbuild.flds, sbuild.scan = &dq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a DiskSelect configured with the given aggregations. +func (dq *DiskQuery) Aggregate(fns ...AggregateFunc) *DiskSelect { + return dq.Select().Aggregate(fns...) } func (dq *DiskQuery) prepareQuery(ctx context.Context) error { - for _, f := range dq.fields { + for _, inter := range dq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, dq); err != nil { + return err + } + } + } + for _, f := range dq.ctx.Fields { if !disk.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -348,7 +369,7 @@ func (dq *DiskQuery) prepareQuery(ctx context.Context) error { return nil } -func (dq *DiskQuery) sqlAll(ctx context.Context) ([]*Disk, error) { +func (dq *DiskQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Disk, error) { var ( nodes = []*Disk{} withFKs = dq.withFKs @@ -363,92 +384,95 @@ func (dq *DiskQuery) sqlAll(ctx context.Context) ([]*Disk, error) { if withFKs { _spec.Node.Columns = append(_spec.Node.Columns, disk.ForeignKeys...) } - _spec.ScanValues = func(columns []string) ([]interface{}, error) { + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Disk).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { node := &Disk{config: dq.config} nodes = append(nodes, node) - return node.scanValues(columns) - } - _spec.Assign = func(columns []string, values []interface{}) error { - if len(nodes) == 0 { - return fmt.Errorf("ent: Assign called without calling ScanValues") - } - node := nodes[len(nodes)-1] node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(dq.modifiers) > 0 { + _spec.Modifiers = dq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } if err := sqlgraph.QueryNodes(ctx, dq.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := dq.withDiskToHost; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*Disk) - for i := range nodes { - if nodes[i].host_host_to_disk == nil { - continue - } - fk := *nodes[i].host_host_to_disk - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) - } - query.Where(host.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + if err := dq.loadDiskToHost(ctx, query, nodes, nil, + func(n *Disk, e *Host) { n.Edges.DiskToHost = e }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "host_host_to_disk" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.DiskToHost = n - } + } + for i := range dq.loadTotal { + if err := dq.loadTotal[i](ctx, nodes); err != nil { + return nil, err } } - return nodes, nil } -func (dq *DiskQuery) sqlCount(ctx context.Context) (int, error) { - _spec := dq.querySpec() - _spec.Node.Columns = dq.fields - if len(dq.fields) > 0 { - _spec.Unique = dq.unique != nil && *dq.unique +func (dq *DiskQuery) loadDiskToHost(ctx context.Context, query *HostQuery, nodes []*Disk, init func(*Disk), assign func(*Disk, *Host)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*Disk) + for i := range nodes { + if nodes[i].host_host_to_disk == nil { + continue + } + fk := *nodes[i].host_host_to_disk + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) } - return sqlgraph.CountNodes(ctx, dq.driver, _spec) + if len(ids) == 0 { + return nil + } + query.Where(host.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "host_host_to_disk" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil } -func (dq *DiskQuery) sqlExist(ctx context.Context) (bool, error) { - n, err := dq.sqlCount(ctx) - if err != nil { - return false, fmt.Errorf("ent: check existence: %w", err) +func (dq *DiskQuery) sqlCount(ctx context.Context) (int, error) { + _spec := dq.querySpec() + if len(dq.modifiers) > 0 { + _spec.Modifiers = dq.modifiers } - return n > 0, nil + _spec.Node.Columns = dq.ctx.Fields + if len(dq.ctx.Fields) > 0 { + _spec.Unique = dq.ctx.Unique != nil && *dq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, dq.driver, _spec) } func (dq *DiskQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: disk.Table, - Columns: disk.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: disk.FieldID, - }, - }, - From: dq.sql, - Unique: true, - } - if unique := dq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(disk.Table, disk.Columns, sqlgraph.NewFieldSpec(disk.FieldID, field.TypeUUID)) + _spec.From = dq.sql + if unique := dq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if dq.path != nil { + _spec.Unique = true } - if fields := dq.fields; len(fields) > 0 { + if fields := dq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, disk.FieldID) for i := range fields { @@ -464,10 +488,10 @@ func (dq *DiskQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := dq.limit; limit != nil { + if limit := dq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := dq.offset; offset != nil { + if offset := dq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := dq.order; len(ps) > 0 { @@ -483,7 +507,7 @@ func (dq *DiskQuery) querySpec() *sqlgraph.QuerySpec { func (dq *DiskQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(dq.driver.Dialect()) t1 := builder.Table(disk.Table) - columns := dq.fields + columns := dq.ctx.Fields if len(columns) == 0 { columns = disk.Columns } @@ -492,7 +516,7 @@ func (dq *DiskQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = dq.sql selector.Select(selector.Columns(columns...)...) } - if dq.unique != nil && *dq.unique { + if dq.ctx.Unique != nil && *dq.ctx.Unique { selector.Distinct() } for _, p := range dq.predicates { @@ -501,12 +525,12 @@ func (dq *DiskQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range dq.order { p(selector) } - if offset := dq.offset; offset != nil { + if offset := dq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := dq.limit; limit != nil { + if limit := dq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -514,12 +538,8 @@ func (dq *DiskQuery) sqlQuery(ctx context.Context) *sql.Selector { // DiskGroupBy is the group-by builder for Disk entities. type DiskGroupBy struct { - config - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) + selector + build *DiskQuery } // Aggregate adds the given aggregation functions to the group-by query. @@ -528,471 +548,77 @@ func (dgb *DiskGroupBy) Aggregate(fns ...AggregateFunc) *DiskGroupBy { return dgb } -// Scan applies the group-by query and scans the result into the given value. -func (dgb *DiskGroupBy) Scan(ctx context.Context, v interface{}) error { - query, err := dgb.path(ctx) - if err != nil { +// Scan applies the selector query and scans the result into the given value. +func (dgb *DiskGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, dgb.build.ctx, "GroupBy") + if err := dgb.build.prepareQuery(ctx); err != nil { return err } - dgb.sql = query - return dgb.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (dgb *DiskGroupBy) ScanX(ctx context.Context, v interface{}) { - if err := dgb.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from group-by. -// It is only allowed when executing a group-by query with one field. -func (dgb *DiskGroupBy) Strings(ctx context.Context) ([]string, error) { - if len(dgb.fields) > 1 { - return nil, errors.New("ent: DiskGroupBy.Strings is not achievable when grouping more than 1 field") - } - var v []string - if err := dgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (dgb *DiskGroupBy) StringsX(ctx context.Context) []string { - v, err := dgb.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (dgb *DiskGroupBy) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = dgb.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{disk.Label} - default: - err = fmt.Errorf("ent: DiskGroupBy.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (dgb *DiskGroupBy) StringX(ctx context.Context) string { - v, err := dgb.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from group-by. -// It is only allowed when executing a group-by query with one field. -func (dgb *DiskGroupBy) Ints(ctx context.Context) ([]int, error) { - if len(dgb.fields) > 1 { - return nil, errors.New("ent: DiskGroupBy.Ints is not achievable when grouping more than 1 field") - } - var v []int - if err := dgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (dgb *DiskGroupBy) IntsX(ctx context.Context) []int { - v, err := dgb.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (dgb *DiskGroupBy) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = dgb.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{disk.Label} - default: - err = fmt.Errorf("ent: DiskGroupBy.Ints returned %d results when one was expected", len(v)) - } - return + return scanWithInterceptors[*DiskQuery, *DiskGroupBy](ctx, dgb.build, dgb, dgb.build.inters, v) } -// IntX is like Int, but panics if an error occurs. -func (dgb *DiskGroupBy) IntX(ctx context.Context) int { - v, err := dgb.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from group-by. -// It is only allowed when executing a group-by query with one field. -func (dgb *DiskGroupBy) Float64s(ctx context.Context) ([]float64, error) { - if len(dgb.fields) > 1 { - return nil, errors.New("ent: DiskGroupBy.Float64s is not achievable when grouping more than 1 field") - } - var v []float64 - if err := dgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (dgb *DiskGroupBy) Float64sX(ctx context.Context) []float64 { - v, err := dgb.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (dgb *DiskGroupBy) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = dgb.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{disk.Label} - default: - err = fmt.Errorf("ent: DiskGroupBy.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (dgb *DiskGroupBy) Float64X(ctx context.Context) float64 { - v, err := dgb.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from group-by. -// It is only allowed when executing a group-by query with one field. -func (dgb *DiskGroupBy) Bools(ctx context.Context) ([]bool, error) { - if len(dgb.fields) > 1 { - return nil, errors.New("ent: DiskGroupBy.Bools is not achievable when grouping more than 1 field") - } - var v []bool - if err := dgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (dgb *DiskGroupBy) BoolsX(ctx context.Context) []bool { - v, err := dgb.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (dgb *DiskGroupBy) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = dgb.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{disk.Label} - default: - err = fmt.Errorf("ent: DiskGroupBy.Bools returned %d results when one was expected", len(v)) - } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (dgb *DiskGroupBy) BoolX(ctx context.Context) bool { - v, err := dgb.Bool(ctx) - if err != nil { - panic(err) +func (dgb *DiskGroupBy) sqlScan(ctx context.Context, root *DiskQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(dgb.fns)) + for _, fn := range dgb.fns { + aggregation = append(aggregation, fn(selector)) } - return v -} - -func (dgb *DiskGroupBy) sqlScan(ctx context.Context, v interface{}) error { - for _, f := range dgb.fields { - if !disk.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*dgb.flds)+len(dgb.fns)) + for _, f := range *dgb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := dgb.sqlQuery() + selector.GroupBy(selector.Columns(*dgb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := dgb.driver.Query(ctx, query, args, rows); err != nil { + if err := dgb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (dgb *DiskGroupBy) sqlQuery() *sql.Selector { - selector := dgb.sql.Select() - aggregation := make([]string, 0, len(dgb.fns)) - for _, fn := range dgb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(dgb.fields)+len(dgb.fns)) - for _, f := range dgb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(dgb.fields...)...) -} - // DiskSelect is the builder for selecting fields of Disk entities. type DiskSelect struct { *DiskQuery - // intermediate query (i.e. traversal path). - sql *sql.Selector + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (ds *DiskSelect) Aggregate(fns ...AggregateFunc) *DiskSelect { + ds.fns = append(ds.fns, fns...) + return ds } // Scan applies the selector query and scans the result into the given value. -func (ds *DiskSelect) Scan(ctx context.Context, v interface{}) error { +func (ds *DiskSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ds.ctx, "Select") if err := ds.prepareQuery(ctx); err != nil { return err } - ds.sql = ds.DiskQuery.sqlQuery(ctx) - return ds.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (ds *DiskSelect) ScanX(ctx context.Context, v interface{}) { - if err := ds.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from a selector. It is only allowed when selecting one field. -func (ds *DiskSelect) Strings(ctx context.Context) ([]string, error) { - if len(ds.fields) > 1 { - return nil, errors.New("ent: DiskSelect.Strings is not achievable when selecting more than 1 field") - } - var v []string - if err := ds.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (ds *DiskSelect) StringsX(ctx context.Context) []string { - v, err := ds.Strings(ctx) - if err != nil { - panic(err) - } - return v + return scanWithInterceptors[*DiskQuery, *DiskSelect](ctx, ds.DiskQuery, ds, ds.inters, v) } -// String returns a single string from a selector. It is only allowed when selecting one field. -func (ds *DiskSelect) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = ds.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{disk.Label} - default: - err = fmt.Errorf("ent: DiskSelect.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (ds *DiskSelect) StringX(ctx context.Context) string { - v, err := ds.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from a selector. It is only allowed when selecting one field. -func (ds *DiskSelect) Ints(ctx context.Context) ([]int, error) { - if len(ds.fields) > 1 { - return nil, errors.New("ent: DiskSelect.Ints is not achievable when selecting more than 1 field") - } - var v []int - if err := ds.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (ds *DiskSelect) IntsX(ctx context.Context) []int { - v, err := ds.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a selector. It is only allowed when selecting one field. -func (ds *DiskSelect) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = ds.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{disk.Label} - default: - err = fmt.Errorf("ent: DiskSelect.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (ds *DiskSelect) IntX(ctx context.Context) int { - v, err := ds.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. -func (ds *DiskSelect) Float64s(ctx context.Context) ([]float64, error) { - if len(ds.fields) > 1 { - return nil, errors.New("ent: DiskSelect.Float64s is not achievable when selecting more than 1 field") - } - var v []float64 - if err := ds.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (ds *DiskSelect) Float64sX(ctx context.Context) []float64 { - v, err := ds.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. -func (ds *DiskSelect) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = ds.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{disk.Label} - default: - err = fmt.Errorf("ent: DiskSelect.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (ds *DiskSelect) Float64X(ctx context.Context) float64 { - v, err := ds.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from a selector. It is only allowed when selecting one field. -func (ds *DiskSelect) Bools(ctx context.Context) ([]bool, error) { - if len(ds.fields) > 1 { - return nil, errors.New("ent: DiskSelect.Bools is not achievable when selecting more than 1 field") - } - var v []bool - if err := ds.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (ds *DiskSelect) BoolsX(ctx context.Context) []bool { - v, err := ds.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a selector. It is only allowed when selecting one field. -func (ds *DiskSelect) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = ds.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{disk.Label} - default: - err = fmt.Errorf("ent: DiskSelect.Bools returned %d results when one was expected", len(v)) +func (ds *DiskSelect) sqlScan(ctx context.Context, root *DiskQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(ds.fns)) + for _, fn := range ds.fns { + aggregation = append(aggregation, fn(selector)) } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (ds *DiskSelect) BoolX(ctx context.Context) bool { - v, err := ds.Bool(ctx) - if err != nil { - panic(err) + switch n := len(*ds.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) } - return v -} - -func (ds *DiskSelect) sqlScan(ctx context.Context, v interface{}) error { rows := &sql.Rows{} - query, args := ds.sql.Query() + query, args := selector.Query() if err := ds.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/ent/disk_update.go b/ent/disk_update.go index fcd398e6..b731b647 100755 --- a/ent/disk_update.go +++ b/ent/disk_update.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -36,6 +36,14 @@ func (du *DiskUpdate) SetSize(i int) *DiskUpdate { return du } +// SetNillableSize sets the "size" field if the given value is not nil. +func (du *DiskUpdate) SetNillableSize(i *int) *DiskUpdate { + if i != nil { + du.SetSize(*i) + } + return du +} + // AddSize adds i to the "size" field. func (du *DiskUpdate) AddSize(i int) *DiskUpdate { du.mutation.AddSize(i) @@ -74,40 +82,7 @@ func (du *DiskUpdate) ClearDiskToHost() *DiskUpdate { // Save executes the query and returns the number of nodes affected by the update operation. func (du *DiskUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(du.hooks) == 0 { - if err = du.check(); err != nil { - return 0, err - } - affected, err = du.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*DiskMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = du.check(); err != nil { - return 0, err - } - du.mutation = mutation - affected, err = du.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(du.hooks) - 1; i >= 0; i-- { - if du.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = du.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, du.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, du.sqlSave, du.mutation, du.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -143,16 +118,10 @@ func (du *DiskUpdate) check() error { } func (du *DiskUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: disk.Table, - Columns: disk.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: disk.FieldID, - }, - }, + if err := du.check(); err != nil { + return n, err } + _spec := sqlgraph.NewUpdateSpec(disk.Table, disk.Columns, sqlgraph.NewFieldSpec(disk.FieldID, field.TypeUUID)) if ps := du.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -161,18 +130,10 @@ func (du *DiskUpdate) sqlSave(ctx context.Context) (n int, err error) { } } if value, ok := du.mutation.Size(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: disk.FieldSize, - }) + _spec.SetField(disk.FieldSize, field.TypeInt, value) } if value, ok := du.mutation.AddedSize(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: disk.FieldSize, - }) + _spec.AddField(disk.FieldSize, field.TypeInt, value) } if du.mutation.DiskToHostCleared() { edge := &sqlgraph.EdgeSpec{ @@ -182,10 +143,7 @@ func (du *DiskUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{disk.DiskToHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: host.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(host.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -198,10 +156,7 @@ func (du *DiskUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{disk.DiskToHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: host.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(host.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -213,10 +168,11 @@ func (du *DiskUpdate) sqlSave(ctx context.Context) (n int, err error) { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{disk.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return 0, err } + du.mutation.done = true return n, nil } @@ -235,6 +191,14 @@ func (duo *DiskUpdateOne) SetSize(i int) *DiskUpdateOne { return duo } +// SetNillableSize sets the "size" field if the given value is not nil. +func (duo *DiskUpdateOne) SetNillableSize(i *int) *DiskUpdateOne { + if i != nil { + duo.SetSize(*i) + } + return duo +} + // AddSize adds i to the "size" field. func (duo *DiskUpdateOne) AddSize(i int) *DiskUpdateOne { duo.mutation.AddSize(i) @@ -271,6 +235,12 @@ func (duo *DiskUpdateOne) ClearDiskToHost() *DiskUpdateOne { return duo } +// Where appends a list predicates to the DiskUpdate builder. +func (duo *DiskUpdateOne) Where(ps ...predicate.Disk) *DiskUpdateOne { + duo.mutation.Where(ps...) + return duo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (duo *DiskUpdateOne) Select(field string, fields ...string) *DiskUpdateOne { @@ -280,40 +250,7 @@ func (duo *DiskUpdateOne) Select(field string, fields ...string) *DiskUpdateOne // Save executes the query and returns the updated Disk entity. func (duo *DiskUpdateOne) Save(ctx context.Context) (*Disk, error) { - var ( - err error - node *Disk - ) - if len(duo.hooks) == 0 { - if err = duo.check(); err != nil { - return nil, err - } - node, err = duo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*DiskMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = duo.check(); err != nil { - return nil, err - } - duo.mutation = mutation - node, err = duo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(duo.hooks) - 1; i >= 0; i-- { - if duo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = duo.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, duo.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, duo.sqlSave, duo.mutation, duo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -349,16 +286,10 @@ func (duo *DiskUpdateOne) check() error { } func (duo *DiskUpdateOne) sqlSave(ctx context.Context) (_node *Disk, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: disk.Table, - Columns: disk.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: disk.FieldID, - }, - }, + if err := duo.check(); err != nil { + return _node, err } + _spec := sqlgraph.NewUpdateSpec(disk.Table, disk.Columns, sqlgraph.NewFieldSpec(disk.FieldID, field.TypeUUID)) id, ok := duo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Disk.id" for update`)} @@ -384,18 +315,10 @@ func (duo *DiskUpdateOne) sqlSave(ctx context.Context) (_node *Disk, err error) } } if value, ok := duo.mutation.Size(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: disk.FieldSize, - }) + _spec.SetField(disk.FieldSize, field.TypeInt, value) } if value, ok := duo.mutation.AddedSize(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: disk.FieldSize, - }) + _spec.AddField(disk.FieldSize, field.TypeInt, value) } if duo.mutation.DiskToHostCleared() { edge := &sqlgraph.EdgeSpec{ @@ -405,10 +328,7 @@ func (duo *DiskUpdateOne) sqlSave(ctx context.Context) (_node *Disk, err error) Columns: []string{disk.DiskToHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: host.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(host.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -421,10 +341,7 @@ func (duo *DiskUpdateOne) sqlSave(ctx context.Context) (_node *Disk, err error) Columns: []string{disk.DiskToHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: host.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(host.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -439,9 +356,10 @@ func (duo *DiskUpdateOne) sqlSave(ctx context.Context) (_node *Disk, err error) if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{disk.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } + duo.mutation.done = true return _node, nil } diff --git a/ent/dns.go b/ent/dns.go index 501a1d00..e465002c 100755 --- a/ent/dns.go +++ b/ent/dns.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -7,6 +7,7 @@ import ( "fmt" "strings" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/gen0cide/laforge/ent/dns" "github.com/google/uuid" @@ -17,8 +18,8 @@ type DNS struct { config ` json:"-"` // ID of the ent. ID uuid.UUID `json:"id,omitempty"` - // HclID holds the value of the "hcl_id" field. - HclID string `json:"hcl_id,omitempty" hcl:"id,label"` + // HCLID holds the value of the "hcl_id" field. + HCLID string `json:"hcl_id,omitempty" hcl:"id,label"` // Type holds the value of the "type" field. Type string `json:"type,omitempty" hcl:"type,attr"` // RootDomain holds the value of the "root_domain" field. @@ -33,13 +34,14 @@ type DNS struct { // The values are being populated by the DNSQuery when eager-loading is set. Edges DNSEdges `json:"edges"` + // vvvvvvvvvvvv CUSTOM vvvvvvvvvvvv // Edges put into the main struct to be loaded via hcl // DNSToEnvironment holds the value of the DNSToEnvironment edge. HCLDNSToEnvironment []*Environment `json:"DNSToEnvironment,omitempty"` // DNSToCompetition holds the value of the DNSToCompetition edge. HCLDNSToCompetition []*Competition `json:"DNSToCompetition,omitempty"` - // - + // ^^^^^^^^^^^^ CUSTOM ^^^^^^^^^^^^^ + selectValues sql.SelectValues } // DNSEdges holds the relations/edges for other nodes in the graph. @@ -51,6 +53,11 @@ type DNSEdges struct { // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. loadedTypes [2]bool + // totalCount holds the count of the edges above. + totalCount [2]map[string]int + + namedDNSToEnvironment map[string][]*Environment + namedDNSToCompetition map[string][]*Competition } // DNSToEnvironmentOrErr returns the DNSToEnvironment value or an error if the edge @@ -72,18 +79,18 @@ func (e DNSEdges) DNSToCompetitionOrErr() ([]*Competition, error) { } // scanValues returns the types for scanning values from sql.Rows. -func (*DNS) scanValues(columns []string) ([]interface{}, error) { - values := make([]interface{}, len(columns)) +func (*DNS) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) for i := range columns { switch columns[i] { case dns.FieldDNSServers, dns.FieldNtpServers, dns.FieldConfig: values[i] = new([]byte) - case dns.FieldHclID, dns.FieldType, dns.FieldRootDomain: + case dns.FieldHCLID, dns.FieldType, dns.FieldRootDomain: values[i] = new(sql.NullString) case dns.FieldID: values[i] = new(uuid.UUID) default: - return nil, fmt.Errorf("unexpected column %q for type DNS", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -91,7 +98,7 @@ func (*DNS) scanValues(columns []string) ([]interface{}, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the DNS fields. -func (d *DNS) assignValues(columns []string, values []interface{}) error { +func (d *DNS) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -103,11 +110,11 @@ func (d *DNS) assignValues(columns []string, values []interface{}) error { } else if value != nil { d.ID = *value } - case dns.FieldHclID: + case dns.FieldHCLID: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field hcl_id", values[i]) } else if value.Valid { - d.HclID = value.String + d.HCLID = value.String } case dns.FieldType: if value, ok := values[i].(*sql.NullString); !ok { @@ -145,36 +152,44 @@ func (d *DNS) assignValues(columns []string, values []interface{}) error { return fmt.Errorf("unmarshal field config: %w", err) } } + default: + d.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the DNS. +// This includes values selected through modifiers, order, etc. +func (d *DNS) Value(name string) (ent.Value, error) { + return d.selectValues.Get(name) +} + // QueryDNSToEnvironment queries the "DNSToEnvironment" edge of the DNS entity. func (d *DNS) QueryDNSToEnvironment() *EnvironmentQuery { - return (&DNSClient{config: d.config}).QueryDNSToEnvironment(d) + return NewDNSClient(d.config).QueryDNSToEnvironment(d) } // QueryDNSToCompetition queries the "DNSToCompetition" edge of the DNS entity. func (d *DNS) QueryDNSToCompetition() *CompetitionQuery { - return (&DNSClient{config: d.config}).QueryDNSToCompetition(d) + return NewDNSClient(d.config).QueryDNSToCompetition(d) } // Update returns a builder for updating this DNS. // Note that you need to call DNS.Unwrap() before calling this method if this DNS // was returned from a transaction, and the transaction was committed or rolled back. func (d *DNS) Update() *DNSUpdateOne { - return (&DNSClient{config: d.config}).UpdateOne(d) + return NewDNSClient(d.config).UpdateOne(d) } // Unwrap unwraps the DNS entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. func (d *DNS) Unwrap() *DNS { - tx, ok := d.config.driver.(*txDriver) + _tx, ok := d.config.driver.(*txDriver) if !ok { panic("ent: DNS is not a transactional entity") } - d.config.driver = tx.drv + d.config.driver = _tx.drv return d } @@ -182,28 +197,75 @@ func (d *DNS) Unwrap() *DNS { func (d *DNS) String() string { var builder strings.Builder builder.WriteString("DNS(") - builder.WriteString(fmt.Sprintf("id=%v", d.ID)) - builder.WriteString(", hcl_id=") - builder.WriteString(d.HclID) - builder.WriteString(", type=") + builder.WriteString(fmt.Sprintf("id=%v, ", d.ID)) + builder.WriteString("hcl_id=") + builder.WriteString(d.HCLID) + builder.WriteString(", ") + builder.WriteString("type=") builder.WriteString(d.Type) - builder.WriteString(", root_domain=") + builder.WriteString(", ") + builder.WriteString("root_domain=") builder.WriteString(d.RootDomain) - builder.WriteString(", dns_servers=") + builder.WriteString(", ") + builder.WriteString("dns_servers=") builder.WriteString(fmt.Sprintf("%v", d.DNSServers)) - builder.WriteString(", ntp_servers=") + builder.WriteString(", ") + builder.WriteString("ntp_servers=") builder.WriteString(fmt.Sprintf("%v", d.NtpServers)) - builder.WriteString(", config=") + builder.WriteString(", ") + builder.WriteString("config=") builder.WriteString(fmt.Sprintf("%v", d.Config)) builder.WriteByte(')') return builder.String() } -// DNSs is a parsable slice of DNS. -type DNSs []*DNS +// NamedDNSToEnvironment returns the DNSToEnvironment named value or an error if the edge was not +// loaded in eager-loading with this name. +func (d *DNS) NamedDNSToEnvironment(name string) ([]*Environment, error) { + if d.Edges.namedDNSToEnvironment == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := d.Edges.namedDNSToEnvironment[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} + +func (d *DNS) appendNamedDNSToEnvironment(name string, edges ...*Environment) { + if d.Edges.namedDNSToEnvironment == nil { + d.Edges.namedDNSToEnvironment = make(map[string][]*Environment) + } + if len(edges) == 0 { + d.Edges.namedDNSToEnvironment[name] = []*Environment{} + } else { + d.Edges.namedDNSToEnvironment[name] = append(d.Edges.namedDNSToEnvironment[name], edges...) + } +} + +// NamedDNSToCompetition returns the DNSToCompetition named value or an error if the edge was not +// loaded in eager-loading with this name. +func (d *DNS) NamedDNSToCompetition(name string) ([]*Competition, error) { + if d.Edges.namedDNSToCompetition == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := d.Edges.namedDNSToCompetition[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} -func (d DNSs) config(cfg config) { - for _i := range d { - d[_i].config = cfg +func (d *DNS) appendNamedDNSToCompetition(name string, edges ...*Competition) { + if d.Edges.namedDNSToCompetition == nil { + d.Edges.namedDNSToCompetition = make(map[string][]*Competition) + } + if len(edges) == 0 { + d.Edges.namedDNSToCompetition[name] = []*Competition{} + } else { + d.Edges.namedDNSToCompetition[name] = append(d.Edges.namedDNSToCompetition[name], edges...) } } + +// DNSs is a parsable slice of DNS. +type DNSs []*DNS diff --git a/ent/dns/dns.go b/ent/dns/dns.go index 18570c1c..9cd05c6e 100755 --- a/ent/dns/dns.go +++ b/ent/dns/dns.go @@ -1,8 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package dns import ( + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -11,8 +13,8 @@ const ( Label = "dns" // FieldID holds the string denoting the id field in the database. FieldID = "id" - // FieldHclID holds the string denoting the hcl_id field in the database. - FieldHclID = "hcl_id" + // FieldHCLID holds the string denoting the hcl_id field in the database. + FieldHCLID = "hcl_id" // FieldType holds the string denoting the type field in the database. FieldType = "type" // FieldRootDomain holds the string denoting the root_domain field in the database. @@ -44,7 +46,7 @@ const ( // Columns holds all SQL columns for dns fields. var Columns = []string{ FieldID, - FieldHclID, + FieldHCLID, FieldType, FieldRootDomain, FieldDNSServers, @@ -75,3 +77,68 @@ var ( // DefaultID holds the default value on creation for the "id" field. DefaultID func() uuid.UUID ) + +// OrderOption defines the ordering options for the DNS queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByHCLID orders the results by the hcl_id field. +func ByHCLID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldHCLID, opts...).ToFunc() +} + +// ByType orders the results by the type field. +func ByType(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldType, opts...).ToFunc() +} + +// ByRootDomain orders the results by the root_domain field. +func ByRootDomain(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldRootDomain, opts...).ToFunc() +} + +// ByDNSToEnvironmentCount orders the results by DNSToEnvironment count. +func ByDNSToEnvironmentCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newDNSToEnvironmentStep(), opts...) + } +} + +// ByDNSToEnvironment orders the results by DNSToEnvironment terms. +func ByDNSToEnvironment(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newDNSToEnvironmentStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByDNSToCompetitionCount orders the results by DNSToCompetition count. +func ByDNSToCompetitionCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newDNSToCompetitionStep(), opts...) + } +} + +// ByDNSToCompetition orders the results by DNSToCompetition terms. +func ByDNSToCompetition(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newDNSToCompetitionStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newDNSToEnvironmentStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(DNSToEnvironmentInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, DNSToEnvironmentTable, DNSToEnvironmentPrimaryKey...), + ) +} +func newDNSToCompetitionStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(DNSToCompetitionInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, DNSToCompetitionTable, DNSToCompetitionPrimaryKey...), + ) +} diff --git a/ent/dns/where.go b/ent/dns/where.go index 42f4b26b..e76ab054 100755 --- a/ent/dns/where.go +++ b/ent/dns/where.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package dns @@ -11,439 +11,257 @@ import ( // ID filters vertices based on their ID field. func ID(id uuid.UUID) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.DNS(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id uuid.UUID) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.DNS(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id uuid.UUID) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.DNS(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...uuid.UUID) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.DNS(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...uuid.UUID) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.DNS(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id uuid.UUID) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.DNS(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id uuid.UUID) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.DNS(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id uuid.UUID) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.DNS(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id uuid.UUID) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.DNS(sql.FieldLTE(FieldID, id)) } -// HclID applies equality check predicate on the "hcl_id" field. It's identical to HclIDEQ. -func HclID(v string) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldHclID), v)) - }) +// HCLID applies equality check predicate on the "hcl_id" field. It's identical to HCLIDEQ. +func HCLID(v string) predicate.DNS { + return predicate.DNS(sql.FieldEQ(FieldHCLID, v)) } // Type applies equality check predicate on the "type" field. It's identical to TypeEQ. func Type(v string) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldType), v)) - }) + return predicate.DNS(sql.FieldEQ(FieldType, v)) } // RootDomain applies equality check predicate on the "root_domain" field. It's identical to RootDomainEQ. func RootDomain(v string) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldRootDomain), v)) - }) + return predicate.DNS(sql.FieldEQ(FieldRootDomain, v)) } -// HclIDEQ applies the EQ predicate on the "hcl_id" field. -func HclIDEQ(v string) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldHclID), v)) - }) +// HCLIDEQ applies the EQ predicate on the "hcl_id" field. +func HCLIDEQ(v string) predicate.DNS { + return predicate.DNS(sql.FieldEQ(FieldHCLID, v)) } -// HclIDNEQ applies the NEQ predicate on the "hcl_id" field. -func HclIDNEQ(v string) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldHclID), v)) - }) +// HCLIDNEQ applies the NEQ predicate on the "hcl_id" field. +func HCLIDNEQ(v string) predicate.DNS { + return predicate.DNS(sql.FieldNEQ(FieldHCLID, v)) } -// HclIDIn applies the In predicate on the "hcl_id" field. -func HclIDIn(vs ...string) predicate.DNS { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.DNS(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldHclID), v...)) - }) +// HCLIDIn applies the In predicate on the "hcl_id" field. +func HCLIDIn(vs ...string) predicate.DNS { + return predicate.DNS(sql.FieldIn(FieldHCLID, vs...)) } -// HclIDNotIn applies the NotIn predicate on the "hcl_id" field. -func HclIDNotIn(vs ...string) predicate.DNS { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.DNS(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldHclID), v...)) - }) +// HCLIDNotIn applies the NotIn predicate on the "hcl_id" field. +func HCLIDNotIn(vs ...string) predicate.DNS { + return predicate.DNS(sql.FieldNotIn(FieldHCLID, vs...)) } -// HclIDGT applies the GT predicate on the "hcl_id" field. -func HclIDGT(v string) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldHclID), v)) - }) +// HCLIDGT applies the GT predicate on the "hcl_id" field. +func HCLIDGT(v string) predicate.DNS { + return predicate.DNS(sql.FieldGT(FieldHCLID, v)) } -// HclIDGTE applies the GTE predicate on the "hcl_id" field. -func HclIDGTE(v string) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldHclID), v)) - }) +// HCLIDGTE applies the GTE predicate on the "hcl_id" field. +func HCLIDGTE(v string) predicate.DNS { + return predicate.DNS(sql.FieldGTE(FieldHCLID, v)) } -// HclIDLT applies the LT predicate on the "hcl_id" field. -func HclIDLT(v string) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldHclID), v)) - }) +// HCLIDLT applies the LT predicate on the "hcl_id" field. +func HCLIDLT(v string) predicate.DNS { + return predicate.DNS(sql.FieldLT(FieldHCLID, v)) } -// HclIDLTE applies the LTE predicate on the "hcl_id" field. -func HclIDLTE(v string) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldHclID), v)) - }) +// HCLIDLTE applies the LTE predicate on the "hcl_id" field. +func HCLIDLTE(v string) predicate.DNS { + return predicate.DNS(sql.FieldLTE(FieldHCLID, v)) } -// HclIDContains applies the Contains predicate on the "hcl_id" field. -func HclIDContains(v string) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldHclID), v)) - }) +// HCLIDContains applies the Contains predicate on the "hcl_id" field. +func HCLIDContains(v string) predicate.DNS { + return predicate.DNS(sql.FieldContains(FieldHCLID, v)) } -// HclIDHasPrefix applies the HasPrefix predicate on the "hcl_id" field. -func HclIDHasPrefix(v string) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldHclID), v)) - }) +// HCLIDHasPrefix applies the HasPrefix predicate on the "hcl_id" field. +func HCLIDHasPrefix(v string) predicate.DNS { + return predicate.DNS(sql.FieldHasPrefix(FieldHCLID, v)) } -// HclIDHasSuffix applies the HasSuffix predicate on the "hcl_id" field. -func HclIDHasSuffix(v string) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldHclID), v)) - }) +// HCLIDHasSuffix applies the HasSuffix predicate on the "hcl_id" field. +func HCLIDHasSuffix(v string) predicate.DNS { + return predicate.DNS(sql.FieldHasSuffix(FieldHCLID, v)) } -// HclIDEqualFold applies the EqualFold predicate on the "hcl_id" field. -func HclIDEqualFold(v string) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldHclID), v)) - }) +// HCLIDEqualFold applies the EqualFold predicate on the "hcl_id" field. +func HCLIDEqualFold(v string) predicate.DNS { + return predicate.DNS(sql.FieldEqualFold(FieldHCLID, v)) } -// HclIDContainsFold applies the ContainsFold predicate on the "hcl_id" field. -func HclIDContainsFold(v string) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldHclID), v)) - }) +// HCLIDContainsFold applies the ContainsFold predicate on the "hcl_id" field. +func HCLIDContainsFold(v string) predicate.DNS { + return predicate.DNS(sql.FieldContainsFold(FieldHCLID, v)) } // TypeEQ applies the EQ predicate on the "type" field. func TypeEQ(v string) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldType), v)) - }) + return predicate.DNS(sql.FieldEQ(FieldType, v)) } // TypeNEQ applies the NEQ predicate on the "type" field. func TypeNEQ(v string) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldType), v)) - }) + return predicate.DNS(sql.FieldNEQ(FieldType, v)) } // TypeIn applies the In predicate on the "type" field. func TypeIn(vs ...string) predicate.DNS { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.DNS(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldType), v...)) - }) + return predicate.DNS(sql.FieldIn(FieldType, vs...)) } // TypeNotIn applies the NotIn predicate on the "type" field. func TypeNotIn(vs ...string) predicate.DNS { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.DNS(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldType), v...)) - }) + return predicate.DNS(sql.FieldNotIn(FieldType, vs...)) } // TypeGT applies the GT predicate on the "type" field. func TypeGT(v string) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldType), v)) - }) + return predicate.DNS(sql.FieldGT(FieldType, v)) } // TypeGTE applies the GTE predicate on the "type" field. func TypeGTE(v string) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldType), v)) - }) + return predicate.DNS(sql.FieldGTE(FieldType, v)) } // TypeLT applies the LT predicate on the "type" field. func TypeLT(v string) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldType), v)) - }) + return predicate.DNS(sql.FieldLT(FieldType, v)) } // TypeLTE applies the LTE predicate on the "type" field. func TypeLTE(v string) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldType), v)) - }) + return predicate.DNS(sql.FieldLTE(FieldType, v)) } // TypeContains applies the Contains predicate on the "type" field. func TypeContains(v string) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldType), v)) - }) + return predicate.DNS(sql.FieldContains(FieldType, v)) } // TypeHasPrefix applies the HasPrefix predicate on the "type" field. func TypeHasPrefix(v string) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldType), v)) - }) + return predicate.DNS(sql.FieldHasPrefix(FieldType, v)) } // TypeHasSuffix applies the HasSuffix predicate on the "type" field. func TypeHasSuffix(v string) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldType), v)) - }) + return predicate.DNS(sql.FieldHasSuffix(FieldType, v)) } // TypeEqualFold applies the EqualFold predicate on the "type" field. func TypeEqualFold(v string) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldType), v)) - }) + return predicate.DNS(sql.FieldEqualFold(FieldType, v)) } // TypeContainsFold applies the ContainsFold predicate on the "type" field. func TypeContainsFold(v string) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldType), v)) - }) + return predicate.DNS(sql.FieldContainsFold(FieldType, v)) } // RootDomainEQ applies the EQ predicate on the "root_domain" field. func RootDomainEQ(v string) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldRootDomain), v)) - }) + return predicate.DNS(sql.FieldEQ(FieldRootDomain, v)) } // RootDomainNEQ applies the NEQ predicate on the "root_domain" field. func RootDomainNEQ(v string) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldRootDomain), v)) - }) + return predicate.DNS(sql.FieldNEQ(FieldRootDomain, v)) } // RootDomainIn applies the In predicate on the "root_domain" field. func RootDomainIn(vs ...string) predicate.DNS { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.DNS(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldRootDomain), v...)) - }) + return predicate.DNS(sql.FieldIn(FieldRootDomain, vs...)) } // RootDomainNotIn applies the NotIn predicate on the "root_domain" field. func RootDomainNotIn(vs ...string) predicate.DNS { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.DNS(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldRootDomain), v...)) - }) + return predicate.DNS(sql.FieldNotIn(FieldRootDomain, vs...)) } // RootDomainGT applies the GT predicate on the "root_domain" field. func RootDomainGT(v string) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldRootDomain), v)) - }) + return predicate.DNS(sql.FieldGT(FieldRootDomain, v)) } // RootDomainGTE applies the GTE predicate on the "root_domain" field. func RootDomainGTE(v string) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldRootDomain), v)) - }) + return predicate.DNS(sql.FieldGTE(FieldRootDomain, v)) } // RootDomainLT applies the LT predicate on the "root_domain" field. func RootDomainLT(v string) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldRootDomain), v)) - }) + return predicate.DNS(sql.FieldLT(FieldRootDomain, v)) } // RootDomainLTE applies the LTE predicate on the "root_domain" field. func RootDomainLTE(v string) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldRootDomain), v)) - }) + return predicate.DNS(sql.FieldLTE(FieldRootDomain, v)) } // RootDomainContains applies the Contains predicate on the "root_domain" field. func RootDomainContains(v string) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldRootDomain), v)) - }) + return predicate.DNS(sql.FieldContains(FieldRootDomain, v)) } // RootDomainHasPrefix applies the HasPrefix predicate on the "root_domain" field. func RootDomainHasPrefix(v string) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldRootDomain), v)) - }) + return predicate.DNS(sql.FieldHasPrefix(FieldRootDomain, v)) } // RootDomainHasSuffix applies the HasSuffix predicate on the "root_domain" field. func RootDomainHasSuffix(v string) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldRootDomain), v)) - }) + return predicate.DNS(sql.FieldHasSuffix(FieldRootDomain, v)) } // RootDomainEqualFold applies the EqualFold predicate on the "root_domain" field. func RootDomainEqualFold(v string) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldRootDomain), v)) - }) + return predicate.DNS(sql.FieldEqualFold(FieldRootDomain, v)) } // RootDomainContainsFold applies the ContainsFold predicate on the "root_domain" field. func RootDomainContainsFold(v string) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldRootDomain), v)) - }) + return predicate.DNS(sql.FieldContainsFold(FieldRootDomain, v)) } // HasDNSToEnvironment applies the HasEdge predicate on the "DNSToEnvironment" edge. @@ -451,7 +269,6 @@ func HasDNSToEnvironment() predicate.DNS { return predicate.DNS(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(DNSToEnvironmentTable, FieldID), sqlgraph.Edge(sqlgraph.M2M, true, DNSToEnvironmentTable, DNSToEnvironmentPrimaryKey...), ) sqlgraph.HasNeighbors(s, step) @@ -461,11 +278,7 @@ func HasDNSToEnvironment() predicate.DNS { // HasDNSToEnvironmentWith applies the HasEdge predicate on the "DNSToEnvironment" edge with a given conditions (other predicates). func HasDNSToEnvironmentWith(preds ...predicate.Environment) predicate.DNS { return predicate.DNS(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(DNSToEnvironmentInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2M, true, DNSToEnvironmentTable, DNSToEnvironmentPrimaryKey...), - ) + step := newDNSToEnvironmentStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -479,7 +292,6 @@ func HasDNSToCompetition() predicate.DNS { return predicate.DNS(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(DNSToCompetitionTable, FieldID), sqlgraph.Edge(sqlgraph.M2M, true, DNSToCompetitionTable, DNSToCompetitionPrimaryKey...), ) sqlgraph.HasNeighbors(s, step) @@ -489,11 +301,7 @@ func HasDNSToCompetition() predicate.DNS { // HasDNSToCompetitionWith applies the HasEdge predicate on the "DNSToCompetition" edge with a given conditions (other predicates). func HasDNSToCompetitionWith(preds ...predicate.Competition) predicate.DNS { return predicate.DNS(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(DNSToCompetitionInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2M, true, DNSToCompetitionTable, DNSToCompetitionPrimaryKey...), - ) + step := newDNSToCompetitionStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -504,32 +312,15 @@ func HasDNSToCompetitionWith(preds ...predicate.Competition) predicate.DNS { // And groups predicates with the AND operator between them. func And(predicates ...predicate.DNS) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.DNS(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.DNS) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.DNS(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.DNS) predicate.DNS { - return predicate.DNS(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.DNS(sql.NotPredicates(p)) } diff --git a/ent/dns_create.go b/ent/dns_create.go index 57c6112f..28d8cdba 100755 --- a/ent/dns_create.go +++ b/ent/dns_create.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -22,9 +22,9 @@ type DNSCreate struct { hooks []Hook } -// SetHclID sets the "hcl_id" field. -func (dc *DNSCreate) SetHclID(s string) *DNSCreate { - dc.mutation.SetHclID(s) +// SetHCLID sets the "hcl_id" field. +func (dc *DNSCreate) SetHCLID(s string) *DNSCreate { + dc.mutation.SetHCLID(s) return dc } @@ -109,44 +109,8 @@ func (dc *DNSCreate) Mutation() *DNSMutation { // Save creates the DNS in the database. func (dc *DNSCreate) Save(ctx context.Context) (*DNS, error) { - var ( - err error - node *DNS - ) dc.defaults() - if len(dc.hooks) == 0 { - if err = dc.check(); err != nil { - return nil, err - } - node, err = dc.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*DNSMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = dc.check(); err != nil { - return nil, err - } - dc.mutation = mutation - if node, err = dc.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(dc.hooks) - 1; i >= 0; i-- { - if dc.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = dc.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, dc.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, dc.sqlSave, dc.mutation, dc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -181,7 +145,7 @@ func (dc *DNSCreate) defaults() { // check runs all checks and user-defined validators on the builder. func (dc *DNSCreate) check() error { - if _, ok := dc.mutation.HclID(); !ok { + if _, ok := dc.mutation.HCLID(); !ok { return &ValidationError{Name: "hcl_id", err: errors.New(`ent: missing required field "DNS.hcl_id"`)} } if _, ok := dc.mutation.GetType(); !ok { @@ -203,10 +167,13 @@ func (dc *DNSCreate) check() error { } func (dc *DNSCreate) sqlSave(ctx context.Context) (*DNS, error) { + if err := dc.check(); err != nil { + return nil, err + } _node, _spec := dc.createSpec() if err := sqlgraph.CreateNode(ctx, dc.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } @@ -217,70 +184,42 @@ func (dc *DNSCreate) sqlSave(ctx context.Context) (*DNS, error) { return nil, err } } + dc.mutation.id = &_node.ID + dc.mutation.done = true return _node, nil } func (dc *DNSCreate) createSpec() (*DNS, *sqlgraph.CreateSpec) { var ( _node = &DNS{config: dc.config} - _spec = &sqlgraph.CreateSpec{ - Table: dns.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: dns.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(dns.Table, sqlgraph.NewFieldSpec(dns.FieldID, field.TypeUUID)) ) if id, ok := dc.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } - if value, ok := dc.mutation.HclID(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: dns.FieldHclID, - }) - _node.HclID = value + if value, ok := dc.mutation.HCLID(); ok { + _spec.SetField(dns.FieldHCLID, field.TypeString, value) + _node.HCLID = value } if value, ok := dc.mutation.GetType(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: dns.FieldType, - }) + _spec.SetField(dns.FieldType, field.TypeString, value) _node.Type = value } if value, ok := dc.mutation.RootDomain(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: dns.FieldRootDomain, - }) + _spec.SetField(dns.FieldRootDomain, field.TypeString, value) _node.RootDomain = value } if value, ok := dc.mutation.DNSServers(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: dns.FieldDNSServers, - }) + _spec.SetField(dns.FieldDNSServers, field.TypeJSON, value) _node.DNSServers = value } if value, ok := dc.mutation.NtpServers(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: dns.FieldNtpServers, - }) + _spec.SetField(dns.FieldNtpServers, field.TypeJSON, value) _node.NtpServers = value } if value, ok := dc.mutation.Config(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: dns.FieldConfig, - }) + _spec.SetField(dns.FieldConfig, field.TypeJSON, value) _node.Config = value } if nodes := dc.mutation.DNSToEnvironmentIDs(); len(nodes) > 0 { @@ -291,10 +230,7 @@ func (dc *DNSCreate) createSpec() (*DNS, *sqlgraph.CreateSpec) { Columns: dns.DNSToEnvironmentPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -310,10 +246,7 @@ func (dc *DNSCreate) createSpec() (*DNS, *sqlgraph.CreateSpec) { Columns: dns.DNSToCompetitionPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: competition.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(competition.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -327,11 +260,15 @@ func (dc *DNSCreate) createSpec() (*DNS, *sqlgraph.CreateSpec) { // DNSCreateBulk is the builder for creating many DNS entities in bulk. type DNSCreateBulk struct { config + err error builders []*DNSCreate } // Save creates the DNS entities in the database. func (dcb *DNSCreateBulk) Save(ctx context.Context) ([]*DNS, error) { + if dcb.err != nil { + return nil, dcb.err + } specs := make([]*sqlgraph.CreateSpec, len(dcb.builders)) nodes := make([]*DNS, len(dcb.builders)) mutators := make([]Mutator, len(dcb.builders)) @@ -348,8 +285,8 @@ func (dcb *DNSCreateBulk) Save(ctx context.Context) ([]*DNS, error) { return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, dcb.builders[i+1].mutation) } else { @@ -357,7 +294,7 @@ func (dcb *DNSCreateBulk) Save(ctx context.Context) ([]*DNS, error) { // Invoke the actual operation on the latest mutation in the chain. if err = sqlgraph.BatchCreate(ctx, dcb.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } } } diff --git a/ent/dns_delete.go b/ent/dns_delete.go index 33d7df9c..80e61db0 100755 --- a/ent/dns_delete.go +++ b/ent/dns_delete.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (dd *DNSDelete) Where(ps ...predicate.DNS) *DNSDelete { // Exec executes the deletion query and returns how many vertices were deleted. func (dd *DNSDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(dd.hooks) == 0 { - affected, err = dd.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*DNSMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - dd.mutation = mutation - affected, err = dd.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(dd.hooks) - 1; i >= 0; i-- { - if dd.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = dd.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, dd.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, dd.sqlExec, dd.mutation, dd.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (dd *DNSDelete) ExecX(ctx context.Context) int { } func (dd *DNSDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: dns.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: dns.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(dns.Table, sqlgraph.NewFieldSpec(dns.FieldID, field.TypeUUID)) if ps := dd.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -84,7 +48,12 @@ func (dd *DNSDelete) sqlExec(ctx context.Context) (int, error) { } } } - return sqlgraph.DeleteNodes(ctx, dd.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, dd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + dd.mutation.done = true + return affected, err } // DNSDeleteOne is the builder for deleting a single DNS entity. @@ -92,6 +61,12 @@ type DNSDeleteOne struct { dd *DNSDelete } +// Where appends a list predicates to the DNSDelete builder. +func (ddo *DNSDeleteOne) Where(ps ...predicate.DNS) *DNSDeleteOne { + ddo.dd.mutation.Where(ps...) + return ddo +} + // Exec executes the deletion query. func (ddo *DNSDeleteOne) Exec(ctx context.Context) error { n, err := ddo.dd.Exec(ctx) @@ -107,5 +82,7 @@ func (ddo *DNSDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (ddo *DNSDeleteOne) ExecX(ctx context.Context) { - ddo.dd.ExecX(ctx) + if err := ddo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/ent/dns_query.go b/ent/dns_query.go index af776d01..4be4372f 100755 --- a/ent/dns_query.go +++ b/ent/dns_query.go @@ -1,11 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" "database/sql/driver" - "errors" "fmt" "math" @@ -22,15 +21,16 @@ import ( // DNSQuery is the builder for querying DNS entities. type DNSQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string - predicates []predicate.DNS - // eager-loading edges. - withDNSToEnvironment *EnvironmentQuery - withDNSToCompetition *CompetitionQuery + ctx *QueryContext + order []dns.OrderOption + inters []Interceptor + predicates []predicate.DNS + withDNSToEnvironment *EnvironmentQuery + withDNSToCompetition *CompetitionQuery + modifiers []func(*sql.Selector) + loadTotal []func(context.Context, []*DNS) error + withNamedDNSToEnvironment map[string]*EnvironmentQuery + withNamedDNSToCompetition map[string]*CompetitionQuery // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -42,34 +42,34 @@ func (dq *DNSQuery) Where(ps ...predicate.DNS) *DNSQuery { return dq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (dq *DNSQuery) Limit(limit int) *DNSQuery { - dq.limit = &limit + dq.ctx.Limit = &limit return dq } -// Offset adds an offset step to the query. +// Offset to start from. func (dq *DNSQuery) Offset(offset int) *DNSQuery { - dq.offset = &offset + dq.ctx.Offset = &offset return dq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (dq *DNSQuery) Unique(unique bool) *DNSQuery { - dq.unique = &unique + dq.ctx.Unique = &unique return dq } -// Order adds an order step to the query. -func (dq *DNSQuery) Order(o ...OrderFunc) *DNSQuery { +// Order specifies how the records should be ordered. +func (dq *DNSQuery) Order(o ...dns.OrderOption) *DNSQuery { dq.order = append(dq.order, o...) return dq } // QueryDNSToEnvironment chains the current query on the "DNSToEnvironment" edge. func (dq *DNSQuery) QueryDNSToEnvironment() *EnvironmentQuery { - query := &EnvironmentQuery{config: dq.config} + query := (&EnvironmentClient{config: dq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := dq.prepareQuery(ctx); err != nil { return nil, err @@ -91,7 +91,7 @@ func (dq *DNSQuery) QueryDNSToEnvironment() *EnvironmentQuery { // QueryDNSToCompetition chains the current query on the "DNSToCompetition" edge. func (dq *DNSQuery) QueryDNSToCompetition() *CompetitionQuery { - query := &CompetitionQuery{config: dq.config} + query := (&CompetitionClient{config: dq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := dq.prepareQuery(ctx); err != nil { return nil, err @@ -114,7 +114,7 @@ func (dq *DNSQuery) QueryDNSToCompetition() *CompetitionQuery { // First returns the first DNS entity from the query. // Returns a *NotFoundError when no DNS was found. func (dq *DNSQuery) First(ctx context.Context) (*DNS, error) { - nodes, err := dq.Limit(1).All(ctx) + nodes, err := dq.Limit(1).All(setContextOp(ctx, dq.ctx, "First")) if err != nil { return nil, err } @@ -137,7 +137,7 @@ func (dq *DNSQuery) FirstX(ctx context.Context) *DNS { // Returns a *NotFoundError when no DNS ID was found. func (dq *DNSQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = dq.Limit(1).IDs(ctx); err != nil { + if ids, err = dq.Limit(1).IDs(setContextOp(ctx, dq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -160,7 +160,7 @@ func (dq *DNSQuery) FirstIDX(ctx context.Context) uuid.UUID { // Returns a *NotSingularError when more than one DNS entity is found. // Returns a *NotFoundError when no DNS entities are found. func (dq *DNSQuery) Only(ctx context.Context) (*DNS, error) { - nodes, err := dq.Limit(2).All(ctx) + nodes, err := dq.Limit(2).All(setContextOp(ctx, dq.ctx, "Only")) if err != nil { return nil, err } @@ -188,7 +188,7 @@ func (dq *DNSQuery) OnlyX(ctx context.Context) *DNS { // Returns a *NotFoundError when no entities are found. func (dq *DNSQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = dq.Limit(2).IDs(ctx); err != nil { + if ids, err = dq.Limit(2).IDs(setContextOp(ctx, dq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -213,10 +213,12 @@ func (dq *DNSQuery) OnlyIDX(ctx context.Context) uuid.UUID { // All executes the query and returns a list of DNSs. func (dq *DNSQuery) All(ctx context.Context) ([]*DNS, error) { + ctx = setContextOp(ctx, dq.ctx, "All") if err := dq.prepareQuery(ctx); err != nil { return nil, err } - return dq.sqlAll(ctx) + qr := querierAll[[]*DNS, *DNSQuery]() + return withInterceptors[[]*DNS](ctx, dq, qr, dq.inters) } // AllX is like All, but panics if an error occurs. @@ -229,9 +231,12 @@ func (dq *DNSQuery) AllX(ctx context.Context) []*DNS { } // IDs executes the query and returns a list of DNS IDs. -func (dq *DNSQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { - var ids []uuid.UUID - if err := dq.Select(dns.FieldID).Scan(ctx, &ids); err != nil { +func (dq *DNSQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if dq.ctx.Unique == nil && dq.path != nil { + dq.Unique(true) + } + ctx = setContextOp(ctx, dq.ctx, "IDs") + if err = dq.Select(dns.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -248,10 +253,11 @@ func (dq *DNSQuery) IDsX(ctx context.Context) []uuid.UUID { // Count returns the count of the given query. func (dq *DNSQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, dq.ctx, "Count") if err := dq.prepareQuery(ctx); err != nil { return 0, err } - return dq.sqlCount(ctx) + return withInterceptors[int](ctx, dq, querierCount[*DNSQuery](), dq.inters) } // CountX is like Count, but panics if an error occurs. @@ -265,10 +271,15 @@ func (dq *DNSQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (dq *DNSQuery) Exist(ctx context.Context) (bool, error) { - if err := dq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, dq.ctx, "Exist") + switch _, err := dq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return dq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -288,23 +299,22 @@ func (dq *DNSQuery) Clone() *DNSQuery { } return &DNSQuery{ config: dq.config, - limit: dq.limit, - offset: dq.offset, - order: append([]OrderFunc{}, dq.order...), + ctx: dq.ctx.Clone(), + order: append([]dns.OrderOption{}, dq.order...), + inters: append([]Interceptor{}, dq.inters...), predicates: append([]predicate.DNS{}, dq.predicates...), withDNSToEnvironment: dq.withDNSToEnvironment.Clone(), withDNSToCompetition: dq.withDNSToCompetition.Clone(), // clone intermediate query. - sql: dq.sql.Clone(), - path: dq.path, - unique: dq.unique, + sql: dq.sql.Clone(), + path: dq.path, } } // WithDNSToEnvironment tells the query-builder to eager-load the nodes that are connected to // the "DNSToEnvironment" edge. The optional arguments are used to configure the query builder of the edge. func (dq *DNSQuery) WithDNSToEnvironment(opts ...func(*EnvironmentQuery)) *DNSQuery { - query := &EnvironmentQuery{config: dq.config} + query := (&EnvironmentClient{config: dq.config}).Query() for _, opt := range opts { opt(query) } @@ -315,7 +325,7 @@ func (dq *DNSQuery) WithDNSToEnvironment(opts ...func(*EnvironmentQuery)) *DNSQu // WithDNSToCompetition tells the query-builder to eager-load the nodes that are connected to // the "DNSToCompetition" edge. The optional arguments are used to configure the query builder of the edge. func (dq *DNSQuery) WithDNSToCompetition(opts ...func(*CompetitionQuery)) *DNSQuery { - query := &CompetitionQuery{config: dq.config} + query := (&CompetitionClient{config: dq.config}).Query() for _, opt := range opts { opt(query) } @@ -329,25 +339,21 @@ func (dq *DNSQuery) WithDNSToCompetition(opts ...func(*CompetitionQuery)) *DNSQu // Example: // // var v []struct { -// HclID string `json:"hcl_id,omitempty" hcl:"id,label"` +// HCLID string `json:"hcl_id,omitempty" hcl:"id,label"` // Count int `json:"count,omitempty"` // } // // client.DNS.Query(). -// GroupBy(dns.FieldHclID). +// GroupBy(dns.FieldHCLID). // Aggregate(ent.Count()). // Scan(ctx, &v) -// func (dq *DNSQuery) GroupBy(field string, fields ...string) *DNSGroupBy { - group := &DNSGroupBy{config: dq.config} - group.fields = append([]string{field}, fields...) - group.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := dq.prepareQuery(ctx); err != nil { - return nil, err - } - return dq.sqlQuery(ctx), nil - } - return group + dq.ctx.Fields = append([]string{field}, fields...) + grbuild := &DNSGroupBy{build: dq} + grbuild.flds = &dq.ctx.Fields + grbuild.label = dns.Label + grbuild.scan = grbuild.Scan + return grbuild } // Select allows the selection one or more fields/columns for the given query, @@ -356,20 +362,37 @@ func (dq *DNSQuery) GroupBy(field string, fields ...string) *DNSGroupBy { // Example: // // var v []struct { -// HclID string `json:"hcl_id,omitempty" hcl:"id,label"` +// HCLID string `json:"hcl_id,omitempty" hcl:"id,label"` // } // // client.DNS.Query(). -// Select(dns.FieldHclID). +// Select(dns.FieldHCLID). // Scan(ctx, &v) -// func (dq *DNSQuery) Select(fields ...string) *DNSSelect { - dq.fields = append(dq.fields, fields...) - return &DNSSelect{DNSQuery: dq} + dq.ctx.Fields = append(dq.ctx.Fields, fields...) + sbuild := &DNSSelect{DNSQuery: dq} + sbuild.label = dns.Label + sbuild.flds, sbuild.scan = &dq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a DNSSelect configured with the given aggregations. +func (dq *DNSQuery) Aggregate(fns ...AggregateFunc) *DNSSelect { + return dq.Select().Aggregate(fns...) } func (dq *DNSQuery) prepareQuery(ctx context.Context) error { - for _, f := range dq.fields { + for _, inter := range dq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, dq); err != nil { + return err + } + } + } + for _, f := range dq.ctx.Fields { if !dns.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -384,7 +407,7 @@ func (dq *DNSQuery) prepareQuery(ctx context.Context) error { return nil } -func (dq *DNSQuery) sqlAll(ctx context.Context) ([]*DNS, error) { +func (dq *DNSQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*DNS, error) { var ( nodes = []*DNS{} _spec = dq.querySpec() @@ -393,193 +416,207 @@ func (dq *DNSQuery) sqlAll(ctx context.Context) ([]*DNS, error) { dq.withDNSToCompetition != nil, } ) - _spec.ScanValues = func(columns []string) ([]interface{}, error) { + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*DNS).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { node := &DNS{config: dq.config} nodes = append(nodes, node) - return node.scanValues(columns) - } - _spec.Assign = func(columns []string, values []interface{}) error { - if len(nodes) == 0 { - return fmt.Errorf("ent: Assign called without calling ScanValues") - } - node := nodes[len(nodes)-1] node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(dq.modifiers) > 0 { + _spec.Modifiers = dq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } if err := sqlgraph.QueryNodes(ctx, dq.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := dq.withDNSToEnvironment; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - ids := make(map[uuid.UUID]*DNS, len(nodes)) - for _, node := range nodes { - ids[node.ID] = node - fks = append(fks, node.ID) - node.Edges.DNSToEnvironment = []*Environment{} + if err := dq.loadDNSToEnvironment(ctx, query, nodes, + func(n *DNS) { n.Edges.DNSToEnvironment = []*Environment{} }, + func(n *DNS, e *Environment) { n.Edges.DNSToEnvironment = append(n.Edges.DNSToEnvironment, e) }); err != nil { + return nil, err } - var ( - edgeids []uuid.UUID - edges = make(map[uuid.UUID][]*DNS) - ) - _spec := &sqlgraph.EdgeQuerySpec{ - Edge: &sqlgraph.EdgeSpec{ - Inverse: true, - Table: dns.DNSToEnvironmentTable, - Columns: dns.DNSToEnvironmentPrimaryKey, - }, - Predicate: func(s *sql.Selector) { - s.Where(sql.InValues(dns.DNSToEnvironmentPrimaryKey[1], fks...)) - }, - ScanValues: func() [2]interface{} { - return [2]interface{}{new(uuid.UUID), new(uuid.UUID)} - }, - Assign: func(out, in interface{}) error { - eout, ok := out.(*uuid.UUID) - if !ok || eout == nil { - return fmt.Errorf("unexpected id value for edge-out") - } - ein, ok := in.(*uuid.UUID) - if !ok || ein == nil { - return fmt.Errorf("unexpected id value for edge-in") - } - outValue := *eout - inValue := *ein - node, ok := ids[outValue] - if !ok { - return fmt.Errorf("unexpected node id in edges: %v", outValue) - } - if _, ok := edges[inValue]; !ok { - edgeids = append(edgeids, inValue) - } - edges[inValue] = append(edges[inValue], node) - return nil - }, + } + if query := dq.withDNSToCompetition; query != nil { + if err := dq.loadDNSToCompetition(ctx, query, nodes, + func(n *DNS) { n.Edges.DNSToCompetition = []*Competition{} }, + func(n *DNS, e *Competition) { n.Edges.DNSToCompetition = append(n.Edges.DNSToCompetition, e) }); err != nil { + return nil, err } - if err := sqlgraph.QueryEdges(ctx, dq.driver, _spec); err != nil { - return nil, fmt.Errorf(`query edges "DNSToEnvironment": %w`, err) + } + for name, query := range dq.withNamedDNSToEnvironment { + if err := dq.loadDNSToEnvironment(ctx, query, nodes, + func(n *DNS) { n.appendNamedDNSToEnvironment(name) }, + func(n *DNS, e *Environment) { n.appendNamedDNSToEnvironment(name, e) }); err != nil { + return nil, err } - query.Where(environment.IDIn(edgeids...)) - neighbors, err := query.All(ctx) - if err != nil { + } + for name, query := range dq.withNamedDNSToCompetition { + if err := dq.loadDNSToCompetition(ctx, query, nodes, + func(n *DNS) { n.appendNamedDNSToCompetition(name) }, + func(n *DNS, e *Competition) { n.appendNamedDNSToCompetition(name, e) }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := edges[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected "DNSToEnvironment" node returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.DNSToEnvironment = append(nodes[i].Edges.DNSToEnvironment, n) - } + } + for i := range dq.loadTotal { + if err := dq.loadTotal[i](ctx, nodes); err != nil { + return nil, err } } + return nodes, nil +} - if query := dq.withDNSToCompetition; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - ids := make(map[uuid.UUID]*DNS, len(nodes)) - for _, node := range nodes { - ids[node.ID] = node - fks = append(fks, node.ID) - node.Edges.DNSToCompetition = []*Competition{} +func (dq *DNSQuery) loadDNSToEnvironment(ctx context.Context, query *EnvironmentQuery, nodes []*DNS, init func(*DNS), assign func(*DNS, *Environment)) error { + edgeIDs := make([]driver.Value, len(nodes)) + byID := make(map[uuid.UUID]*DNS) + nids := make(map[uuid.UUID]map[*DNS]struct{}) + for i, node := range nodes { + edgeIDs[i] = node.ID + byID[node.ID] = node + if init != nil { + init(node) } - var ( - edgeids []uuid.UUID - edges = make(map[uuid.UUID][]*DNS) - ) - _spec := &sqlgraph.EdgeQuerySpec{ - Edge: &sqlgraph.EdgeSpec{ - Inverse: true, - Table: dns.DNSToCompetitionTable, - Columns: dns.DNSToCompetitionPrimaryKey, - }, - Predicate: func(s *sql.Selector) { - s.Where(sql.InValues(dns.DNSToCompetitionPrimaryKey[1], fks...)) - }, - ScanValues: func() [2]interface{} { - return [2]interface{}{new(uuid.UUID), new(uuid.UUID)} - }, - Assign: func(out, in interface{}) error { - eout, ok := out.(*uuid.UUID) - if !ok || eout == nil { - return fmt.Errorf("unexpected id value for edge-out") - } - ein, ok := in.(*uuid.UUID) - if !ok || ein == nil { - return fmt.Errorf("unexpected id value for edge-in") - } - outValue := *eout - inValue := *ein - node, ok := ids[outValue] - if !ok { - return fmt.Errorf("unexpected node id in edges: %v", outValue) + } + query.Where(func(s *sql.Selector) { + joinT := sql.Table(dns.DNSToEnvironmentTable) + s.Join(joinT).On(s.C(environment.FieldID), joinT.C(dns.DNSToEnvironmentPrimaryKey[0])) + s.Where(sql.InValues(joinT.C(dns.DNSToEnvironmentPrimaryKey[1]), edgeIDs...)) + columns := s.SelectedColumns() + s.Select(joinT.C(dns.DNSToEnvironmentPrimaryKey[1])) + s.AppendSelect(columns...) + s.SetDistinct(false) + }) + if err := query.prepareQuery(ctx); err != nil { + return err + } + qr := QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + return query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) { + assign := spec.Assign + values := spec.ScanValues + spec.ScanValues = func(columns []string) ([]any, error) { + values, err := values(columns[1:]) + if err != nil { + return nil, err } - if _, ok := edges[inValue]; !ok { - edgeids = append(edgeids, inValue) + return append([]any{new(uuid.UUID)}, values...), nil + } + spec.Assign = func(columns []string, values []any) error { + outValue := *values[0].(*uuid.UUID) + inValue := *values[1].(*uuid.UUID) + if nids[inValue] == nil { + nids[inValue] = map[*DNS]struct{}{byID[outValue]: {}} + return assign(columns[1:], values[1:]) } - edges[inValue] = append(edges[inValue], node) + nids[inValue][byID[outValue]] = struct{}{} return nil - }, + } + }) + }) + neighbors, err := withInterceptors[[]*Environment](ctx, query, qr, query.inters) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nids[n.ID] + if !ok { + return fmt.Errorf(`unexpected "DNSToEnvironment" node returned %v`, n.ID) } - if err := sqlgraph.QueryEdges(ctx, dq.driver, _spec); err != nil { - return nil, fmt.Errorf(`query edges "DNSToCompetition": %w`, err) + for kn := range nodes { + assign(kn, n) } - query.Where(competition.IDIn(edgeids...)) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + } + return nil +} +func (dq *DNSQuery) loadDNSToCompetition(ctx context.Context, query *CompetitionQuery, nodes []*DNS, init func(*DNS), assign func(*DNS, *Competition)) error { + edgeIDs := make([]driver.Value, len(nodes)) + byID := make(map[uuid.UUID]*DNS) + nids := make(map[uuid.UUID]map[*DNS]struct{}) + for i, node := range nodes { + edgeIDs[i] = node.ID + byID[node.ID] = node + if init != nil { + init(node) } - for _, n := range neighbors { - nodes, ok := edges[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected "DNSToCompetition" node returned %v`, n.ID) + } + query.Where(func(s *sql.Selector) { + joinT := sql.Table(dns.DNSToCompetitionTable) + s.Join(joinT).On(s.C(competition.FieldID), joinT.C(dns.DNSToCompetitionPrimaryKey[0])) + s.Where(sql.InValues(joinT.C(dns.DNSToCompetitionPrimaryKey[1]), edgeIDs...)) + columns := s.SelectedColumns() + s.Select(joinT.C(dns.DNSToCompetitionPrimaryKey[1])) + s.AppendSelect(columns...) + s.SetDistinct(false) + }) + if err := query.prepareQuery(ctx); err != nil { + return err + } + qr := QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + return query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) { + assign := spec.Assign + values := spec.ScanValues + spec.ScanValues = func(columns []string) ([]any, error) { + values, err := values(columns[1:]) + if err != nil { + return nil, err + } + return append([]any{new(uuid.UUID)}, values...), nil } - for i := range nodes { - nodes[i].Edges.DNSToCompetition = append(nodes[i].Edges.DNSToCompetition, n) + spec.Assign = func(columns []string, values []any) error { + outValue := *values[0].(*uuid.UUID) + inValue := *values[1].(*uuid.UUID) + if nids[inValue] == nil { + nids[inValue] = map[*DNS]struct{}{byID[outValue]: {}} + return assign(columns[1:], values[1:]) + } + nids[inValue][byID[outValue]] = struct{}{} + return nil } + }) + }) + neighbors, err := withInterceptors[[]*Competition](ctx, query, qr, query.inters) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nids[n.ID] + if !ok { + return fmt.Errorf(`unexpected "DNSToCompetition" node returned %v`, n.ID) + } + for kn := range nodes { + assign(kn, n) } } - - return nodes, nil + return nil } func (dq *DNSQuery) sqlCount(ctx context.Context) (int, error) { _spec := dq.querySpec() - _spec.Node.Columns = dq.fields - if len(dq.fields) > 0 { - _spec.Unique = dq.unique != nil && *dq.unique + if len(dq.modifiers) > 0 { + _spec.Modifiers = dq.modifiers } - return sqlgraph.CountNodes(ctx, dq.driver, _spec) -} - -func (dq *DNSQuery) sqlExist(ctx context.Context) (bool, error) { - n, err := dq.sqlCount(ctx) - if err != nil { - return false, fmt.Errorf("ent: check existence: %w", err) + _spec.Node.Columns = dq.ctx.Fields + if len(dq.ctx.Fields) > 0 { + _spec.Unique = dq.ctx.Unique != nil && *dq.ctx.Unique } - return n > 0, nil + return sqlgraph.CountNodes(ctx, dq.driver, _spec) } func (dq *DNSQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: dns.Table, - Columns: dns.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: dns.FieldID, - }, - }, - From: dq.sql, - Unique: true, - } - if unique := dq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(dns.Table, dns.Columns, sqlgraph.NewFieldSpec(dns.FieldID, field.TypeUUID)) + _spec.From = dq.sql + if unique := dq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if dq.path != nil { + _spec.Unique = true } - if fields := dq.fields; len(fields) > 0 { + if fields := dq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, dns.FieldID) for i := range fields { @@ -595,10 +632,10 @@ func (dq *DNSQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := dq.limit; limit != nil { + if limit := dq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := dq.offset; offset != nil { + if offset := dq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := dq.order; len(ps) > 0 { @@ -614,7 +651,7 @@ func (dq *DNSQuery) querySpec() *sqlgraph.QuerySpec { func (dq *DNSQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(dq.driver.Dialect()) t1 := builder.Table(dns.Table) - columns := dq.fields + columns := dq.ctx.Fields if len(columns) == 0 { columns = dns.Columns } @@ -623,7 +660,7 @@ func (dq *DNSQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = dq.sql selector.Select(selector.Columns(columns...)...) } - if dq.unique != nil && *dq.unique { + if dq.ctx.Unique != nil && *dq.ctx.Unique { selector.Distinct() } for _, p := range dq.predicates { @@ -632,498 +669,128 @@ func (dq *DNSQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range dq.order { p(selector) } - if offset := dq.offset; offset != nil { + if offset := dq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := dq.limit; limit != nil { + if limit := dq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector } -// DNSGroupBy is the group-by builder for DNS entities. -type DNSGroupBy struct { - config - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) -} - -// Aggregate adds the given aggregation functions to the group-by query. -func (dgb *DNSGroupBy) Aggregate(fns ...AggregateFunc) *DNSGroupBy { - dgb.fns = append(dgb.fns, fns...) - return dgb -} - -// Scan applies the group-by query and scans the result into the given value. -func (dgb *DNSGroupBy) Scan(ctx context.Context, v interface{}) error { - query, err := dgb.path(ctx) - if err != nil { - return err - } - dgb.sql = query - return dgb.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (dgb *DNSGroupBy) ScanX(ctx context.Context, v interface{}) { - if err := dgb.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from group-by. -// It is only allowed when executing a group-by query with one field. -func (dgb *DNSGroupBy) Strings(ctx context.Context) ([]string, error) { - if len(dgb.fields) > 1 { - return nil, errors.New("ent: DNSGroupBy.Strings is not achievable when grouping more than 1 field") - } - var v []string - if err := dgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (dgb *DNSGroupBy) StringsX(ctx context.Context) []string { - v, err := dgb.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (dgb *DNSGroupBy) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = dgb.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{dns.Label} - default: - err = fmt.Errorf("ent: DNSGroupBy.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (dgb *DNSGroupBy) StringX(ctx context.Context) string { - v, err := dgb.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from group-by. -// It is only allowed when executing a group-by query with one field. -func (dgb *DNSGroupBy) Ints(ctx context.Context) ([]int, error) { - if len(dgb.fields) > 1 { - return nil, errors.New("ent: DNSGroupBy.Ints is not achievable when grouping more than 1 field") - } - var v []int - if err := dgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (dgb *DNSGroupBy) IntsX(ctx context.Context) []int { - v, err := dgb.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (dgb *DNSGroupBy) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = dgb.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{dns.Label} - default: - err = fmt.Errorf("ent: DNSGroupBy.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (dgb *DNSGroupBy) IntX(ctx context.Context) int { - v, err := dgb.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from group-by. -// It is only allowed when executing a group-by query with one field. -func (dgb *DNSGroupBy) Float64s(ctx context.Context) ([]float64, error) { - if len(dgb.fields) > 1 { - return nil, errors.New("ent: DNSGroupBy.Float64s is not achievable when grouping more than 1 field") - } - var v []float64 - if err := dgb.Scan(ctx, &v); err != nil { - return nil, err +// WithNamedDNSToEnvironment tells the query-builder to eager-load the nodes that are connected to the "DNSToEnvironment" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (dq *DNSQuery) WithNamedDNSToEnvironment(name string, opts ...func(*EnvironmentQuery)) *DNSQuery { + query := (&EnvironmentClient{config: dq.config}).Query() + for _, opt := range opts { + opt(query) } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (dgb *DNSGroupBy) Float64sX(ctx context.Context) []float64 { - v, err := dgb.Float64s(ctx) - if err != nil { - panic(err) + if dq.withNamedDNSToEnvironment == nil { + dq.withNamedDNSToEnvironment = make(map[string]*EnvironmentQuery) } - return v + dq.withNamedDNSToEnvironment[name] = query + return dq } -// Float64 returns a single float64 from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (dgb *DNSGroupBy) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = dgb.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{dns.Label} - default: - err = fmt.Errorf("ent: DNSGroupBy.Float64s returned %d results when one was expected", len(v)) +// WithNamedDNSToCompetition tells the query-builder to eager-load the nodes that are connected to the "DNSToCompetition" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (dq *DNSQuery) WithNamedDNSToCompetition(name string, opts ...func(*CompetitionQuery)) *DNSQuery { + query := (&CompetitionClient{config: dq.config}).Query() + for _, opt := range opts { + opt(query) } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (dgb *DNSGroupBy) Float64X(ctx context.Context) float64 { - v, err := dgb.Float64(ctx) - if err != nil { - panic(err) + if dq.withNamedDNSToCompetition == nil { + dq.withNamedDNSToCompetition = make(map[string]*CompetitionQuery) } - return v + dq.withNamedDNSToCompetition[name] = query + return dq } -// Bools returns list of bools from group-by. -// It is only allowed when executing a group-by query with one field. -func (dgb *DNSGroupBy) Bools(ctx context.Context) ([]bool, error) { - if len(dgb.fields) > 1 { - return nil, errors.New("ent: DNSGroupBy.Bools is not achievable when grouping more than 1 field") - } - var v []bool - if err := dgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil +// DNSGroupBy is the group-by builder for DNS entities. +type DNSGroupBy struct { + selector + build *DNSQuery } -// BoolsX is like Bools, but panics if an error occurs. -func (dgb *DNSGroupBy) BoolsX(ctx context.Context) []bool { - v, err := dgb.Bools(ctx) - if err != nil { - panic(err) - } - return v +// Aggregate adds the given aggregation functions to the group-by query. +func (dgb *DNSGroupBy) Aggregate(fns ...AggregateFunc) *DNSGroupBy { + dgb.fns = append(dgb.fns, fns...) + return dgb } -// Bool returns a single bool from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (dgb *DNSGroupBy) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = dgb.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{dns.Label} - default: - err = fmt.Errorf("ent: DNSGroupBy.Bools returned %d results when one was expected", len(v)) +// Scan applies the selector query and scans the result into the given value. +func (dgb *DNSGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, dgb.build.ctx, "GroupBy") + if err := dgb.build.prepareQuery(ctx); err != nil { + return err } - return + return scanWithInterceptors[*DNSQuery, *DNSGroupBy](ctx, dgb.build, dgb, dgb.build.inters, v) } -// BoolX is like Bool, but panics if an error occurs. -func (dgb *DNSGroupBy) BoolX(ctx context.Context) bool { - v, err := dgb.Bool(ctx) - if err != nil { - panic(err) +func (dgb *DNSGroupBy) sqlScan(ctx context.Context, root *DNSQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(dgb.fns)) + for _, fn := range dgb.fns { + aggregation = append(aggregation, fn(selector)) } - return v -} - -func (dgb *DNSGroupBy) sqlScan(ctx context.Context, v interface{}) error { - for _, f := range dgb.fields { - if !dns.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*dgb.flds)+len(dgb.fns)) + for _, f := range *dgb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := dgb.sqlQuery() + selector.GroupBy(selector.Columns(*dgb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := dgb.driver.Query(ctx, query, args, rows); err != nil { + if err := dgb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (dgb *DNSGroupBy) sqlQuery() *sql.Selector { - selector := dgb.sql.Select() - aggregation := make([]string, 0, len(dgb.fns)) - for _, fn := range dgb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(dgb.fields)+len(dgb.fns)) - for _, f := range dgb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(dgb.fields...)...) -} - // DNSSelect is the builder for selecting fields of DNS entities. type DNSSelect struct { *DNSQuery - // intermediate query (i.e. traversal path). - sql *sql.Selector + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (ds *DNSSelect) Aggregate(fns ...AggregateFunc) *DNSSelect { + ds.fns = append(ds.fns, fns...) + return ds } // Scan applies the selector query and scans the result into the given value. -func (ds *DNSSelect) Scan(ctx context.Context, v interface{}) error { +func (ds *DNSSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ds.ctx, "Select") if err := ds.prepareQuery(ctx); err != nil { return err } - ds.sql = ds.DNSQuery.sqlQuery(ctx) - return ds.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (ds *DNSSelect) ScanX(ctx context.Context, v interface{}) { - if err := ds.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from a selector. It is only allowed when selecting one field. -func (ds *DNSSelect) Strings(ctx context.Context) ([]string, error) { - if len(ds.fields) > 1 { - return nil, errors.New("ent: DNSSelect.Strings is not achievable when selecting more than 1 field") - } - var v []string - if err := ds.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (ds *DNSSelect) StringsX(ctx context.Context) []string { - v, err := ds.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a selector. It is only allowed when selecting one field. -func (ds *DNSSelect) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = ds.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{dns.Label} - default: - err = fmt.Errorf("ent: DNSSelect.Strings returned %d results when one was expected", len(v)) - } - return + return scanWithInterceptors[*DNSQuery, *DNSSelect](ctx, ds.DNSQuery, ds, ds.inters, v) } -// StringX is like String, but panics if an error occurs. -func (ds *DNSSelect) StringX(ctx context.Context) string { - v, err := ds.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from a selector. It is only allowed when selecting one field. -func (ds *DNSSelect) Ints(ctx context.Context) ([]int, error) { - if len(ds.fields) > 1 { - return nil, errors.New("ent: DNSSelect.Ints is not achievable when selecting more than 1 field") - } - var v []int - if err := ds.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (ds *DNSSelect) IntsX(ctx context.Context) []int { - v, err := ds.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a selector. It is only allowed when selecting one field. -func (ds *DNSSelect) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = ds.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{dns.Label} - default: - err = fmt.Errorf("ent: DNSSelect.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (ds *DNSSelect) IntX(ctx context.Context) int { - v, err := ds.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. -func (ds *DNSSelect) Float64s(ctx context.Context) ([]float64, error) { - if len(ds.fields) > 1 { - return nil, errors.New("ent: DNSSelect.Float64s is not achievable when selecting more than 1 field") - } - var v []float64 - if err := ds.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (ds *DNSSelect) Float64sX(ctx context.Context) []float64 { - v, err := ds.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. -func (ds *DNSSelect) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = ds.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{dns.Label} - default: - err = fmt.Errorf("ent: DNSSelect.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (ds *DNSSelect) Float64X(ctx context.Context) float64 { - v, err := ds.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from a selector. It is only allowed when selecting one field. -func (ds *DNSSelect) Bools(ctx context.Context) ([]bool, error) { - if len(ds.fields) > 1 { - return nil, errors.New("ent: DNSSelect.Bools is not achievable when selecting more than 1 field") - } - var v []bool - if err := ds.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (ds *DNSSelect) BoolsX(ctx context.Context) []bool { - v, err := ds.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a selector. It is only allowed when selecting one field. -func (ds *DNSSelect) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = ds.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{dns.Label} - default: - err = fmt.Errorf("ent: DNSSelect.Bools returned %d results when one was expected", len(v)) +func (ds *DNSSelect) sqlScan(ctx context.Context, root *DNSQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(ds.fns)) + for _, fn := range ds.fns { + aggregation = append(aggregation, fn(selector)) } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (ds *DNSSelect) BoolX(ctx context.Context) bool { - v, err := ds.Bool(ctx) - if err != nil { - panic(err) + switch n := len(*ds.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) } - return v -} - -func (ds *DNSSelect) sqlScan(ctx context.Context, v interface{}) error { rows := &sql.Rows{} - query, args := ds.sql.Query() + query, args := selector.Query() if err := ds.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/ent/dns_update.go b/ent/dns_update.go index 4929f5b0..8513368a 100755 --- a/ent/dns_update.go +++ b/ent/dns_update.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -9,6 +9,7 @@ import ( "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/dialect/sql/sqljson" "entgo.io/ent/schema/field" "github.com/gen0cide/laforge/ent/competition" "github.com/gen0cide/laforge/ent/dns" @@ -30,9 +31,17 @@ func (du *DNSUpdate) Where(ps ...predicate.DNS) *DNSUpdate { return du } -// SetHclID sets the "hcl_id" field. -func (du *DNSUpdate) SetHclID(s string) *DNSUpdate { - du.mutation.SetHclID(s) +// SetHCLID sets the "hcl_id" field. +func (du *DNSUpdate) SetHCLID(s string) *DNSUpdate { + du.mutation.SetHCLID(s) + return du +} + +// SetNillableHCLID sets the "hcl_id" field if the given value is not nil. +func (du *DNSUpdate) SetNillableHCLID(s *string) *DNSUpdate { + if s != nil { + du.SetHCLID(*s) + } return du } @@ -42,24 +51,52 @@ func (du *DNSUpdate) SetType(s string) *DNSUpdate { return du } +// SetNillableType sets the "type" field if the given value is not nil. +func (du *DNSUpdate) SetNillableType(s *string) *DNSUpdate { + if s != nil { + du.SetType(*s) + } + return du +} + // SetRootDomain sets the "root_domain" field. func (du *DNSUpdate) SetRootDomain(s string) *DNSUpdate { du.mutation.SetRootDomain(s) return du } +// SetNillableRootDomain sets the "root_domain" field if the given value is not nil. +func (du *DNSUpdate) SetNillableRootDomain(s *string) *DNSUpdate { + if s != nil { + du.SetRootDomain(*s) + } + return du +} + // SetDNSServers sets the "dns_servers" field. func (du *DNSUpdate) SetDNSServers(s []string) *DNSUpdate { du.mutation.SetDNSServers(s) return du } +// AppendDNSServers appends s to the "dns_servers" field. +func (du *DNSUpdate) AppendDNSServers(s []string) *DNSUpdate { + du.mutation.AppendDNSServers(s) + return du +} + // SetNtpServers sets the "ntp_servers" field. func (du *DNSUpdate) SetNtpServers(s []string) *DNSUpdate { du.mutation.SetNtpServers(s) return du } +// AppendNtpServers appends s to the "ntp_servers" field. +func (du *DNSUpdate) AppendNtpServers(s []string) *DNSUpdate { + du.mutation.AppendNtpServers(s) + return du +} + // SetConfig sets the "config" field. func (du *DNSUpdate) SetConfig(m map[string]string) *DNSUpdate { du.mutation.SetConfig(m) @@ -145,34 +182,7 @@ func (du *DNSUpdate) RemoveDNSToCompetition(c ...*Competition) *DNSUpdate { // Save executes the query and returns the number of nodes affected by the update operation. func (du *DNSUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(du.hooks) == 0 { - affected, err = du.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*DNSMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - du.mutation = mutation - affected, err = du.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(du.hooks) - 1; i >= 0; i-- { - if du.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = du.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, du.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, du.sqlSave, du.mutation, du.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -198,16 +208,7 @@ func (du *DNSUpdate) ExecX(ctx context.Context) { } func (du *DNSUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: dns.Table, - Columns: dns.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: dns.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(dns.Table, dns.Columns, sqlgraph.NewFieldSpec(dns.FieldID, field.TypeUUID)) if ps := du.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -215,47 +216,33 @@ func (du *DNSUpdate) sqlSave(ctx context.Context) (n int, err error) { } } } - if value, ok := du.mutation.HclID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: dns.FieldHclID, - }) + if value, ok := du.mutation.HCLID(); ok { + _spec.SetField(dns.FieldHCLID, field.TypeString, value) } if value, ok := du.mutation.GetType(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: dns.FieldType, - }) + _spec.SetField(dns.FieldType, field.TypeString, value) } if value, ok := du.mutation.RootDomain(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: dns.FieldRootDomain, - }) + _spec.SetField(dns.FieldRootDomain, field.TypeString, value) } if value, ok := du.mutation.DNSServers(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: dns.FieldDNSServers, + _spec.SetField(dns.FieldDNSServers, field.TypeJSON, value) + } + if value, ok := du.mutation.AppendedDNSServers(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, dns.FieldDNSServers, value) }) } if value, ok := du.mutation.NtpServers(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: dns.FieldNtpServers, + _spec.SetField(dns.FieldNtpServers, field.TypeJSON, value) + } + if value, ok := du.mutation.AppendedNtpServers(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, dns.FieldNtpServers, value) }) } if value, ok := du.mutation.Config(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: dns.FieldConfig, - }) + _spec.SetField(dns.FieldConfig, field.TypeJSON, value) } if du.mutation.DNSToEnvironmentCleared() { edge := &sqlgraph.EdgeSpec{ @@ -265,10 +252,7 @@ func (du *DNSUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: dns.DNSToEnvironmentPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -281,10 +265,7 @@ func (du *DNSUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: dns.DNSToEnvironmentPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -300,10 +281,7 @@ func (du *DNSUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: dns.DNSToEnvironmentPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -319,10 +297,7 @@ func (du *DNSUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: dns.DNSToCompetitionPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: competition.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(competition.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -335,10 +310,7 @@ func (du *DNSUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: dns.DNSToCompetitionPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: competition.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(competition.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -354,10 +326,7 @@ func (du *DNSUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: dns.DNSToCompetitionPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: competition.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(competition.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -369,10 +338,11 @@ func (du *DNSUpdate) sqlSave(ctx context.Context) (n int, err error) { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{dns.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return 0, err } + du.mutation.done = true return n, nil } @@ -384,9 +354,17 @@ type DNSUpdateOne struct { mutation *DNSMutation } -// SetHclID sets the "hcl_id" field. -func (duo *DNSUpdateOne) SetHclID(s string) *DNSUpdateOne { - duo.mutation.SetHclID(s) +// SetHCLID sets the "hcl_id" field. +func (duo *DNSUpdateOne) SetHCLID(s string) *DNSUpdateOne { + duo.mutation.SetHCLID(s) + return duo +} + +// SetNillableHCLID sets the "hcl_id" field if the given value is not nil. +func (duo *DNSUpdateOne) SetNillableHCLID(s *string) *DNSUpdateOne { + if s != nil { + duo.SetHCLID(*s) + } return duo } @@ -396,24 +374,52 @@ func (duo *DNSUpdateOne) SetType(s string) *DNSUpdateOne { return duo } +// SetNillableType sets the "type" field if the given value is not nil. +func (duo *DNSUpdateOne) SetNillableType(s *string) *DNSUpdateOne { + if s != nil { + duo.SetType(*s) + } + return duo +} + // SetRootDomain sets the "root_domain" field. func (duo *DNSUpdateOne) SetRootDomain(s string) *DNSUpdateOne { duo.mutation.SetRootDomain(s) return duo } +// SetNillableRootDomain sets the "root_domain" field if the given value is not nil. +func (duo *DNSUpdateOne) SetNillableRootDomain(s *string) *DNSUpdateOne { + if s != nil { + duo.SetRootDomain(*s) + } + return duo +} + // SetDNSServers sets the "dns_servers" field. func (duo *DNSUpdateOne) SetDNSServers(s []string) *DNSUpdateOne { duo.mutation.SetDNSServers(s) return duo } +// AppendDNSServers appends s to the "dns_servers" field. +func (duo *DNSUpdateOne) AppendDNSServers(s []string) *DNSUpdateOne { + duo.mutation.AppendDNSServers(s) + return duo +} + // SetNtpServers sets the "ntp_servers" field. func (duo *DNSUpdateOne) SetNtpServers(s []string) *DNSUpdateOne { duo.mutation.SetNtpServers(s) return duo } +// AppendNtpServers appends s to the "ntp_servers" field. +func (duo *DNSUpdateOne) AppendNtpServers(s []string) *DNSUpdateOne { + duo.mutation.AppendNtpServers(s) + return duo +} + // SetConfig sets the "config" field. func (duo *DNSUpdateOne) SetConfig(m map[string]string) *DNSUpdateOne { duo.mutation.SetConfig(m) @@ -497,6 +503,12 @@ func (duo *DNSUpdateOne) RemoveDNSToCompetition(c ...*Competition) *DNSUpdateOne return duo.RemoveDNSToCompetitionIDs(ids...) } +// Where appends a list predicates to the DNSUpdate builder. +func (duo *DNSUpdateOne) Where(ps ...predicate.DNS) *DNSUpdateOne { + duo.mutation.Where(ps...) + return duo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (duo *DNSUpdateOne) Select(field string, fields ...string) *DNSUpdateOne { @@ -506,34 +518,7 @@ func (duo *DNSUpdateOne) Select(field string, fields ...string) *DNSUpdateOne { // Save executes the query and returns the updated DNS entity. func (duo *DNSUpdateOne) Save(ctx context.Context) (*DNS, error) { - var ( - err error - node *DNS - ) - if len(duo.hooks) == 0 { - node, err = duo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*DNSMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - duo.mutation = mutation - node, err = duo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(duo.hooks) - 1; i >= 0; i-- { - if duo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = duo.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, duo.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, duo.sqlSave, duo.mutation, duo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -559,16 +544,7 @@ func (duo *DNSUpdateOne) ExecX(ctx context.Context) { } func (duo *DNSUpdateOne) sqlSave(ctx context.Context) (_node *DNS, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: dns.Table, - Columns: dns.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: dns.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(dns.Table, dns.Columns, sqlgraph.NewFieldSpec(dns.FieldID, field.TypeUUID)) id, ok := duo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "DNS.id" for update`)} @@ -593,47 +569,33 @@ func (duo *DNSUpdateOne) sqlSave(ctx context.Context) (_node *DNS, err error) { } } } - if value, ok := duo.mutation.HclID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: dns.FieldHclID, - }) + if value, ok := duo.mutation.HCLID(); ok { + _spec.SetField(dns.FieldHCLID, field.TypeString, value) } if value, ok := duo.mutation.GetType(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: dns.FieldType, - }) + _spec.SetField(dns.FieldType, field.TypeString, value) } if value, ok := duo.mutation.RootDomain(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: dns.FieldRootDomain, - }) + _spec.SetField(dns.FieldRootDomain, field.TypeString, value) } if value, ok := duo.mutation.DNSServers(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: dns.FieldDNSServers, + _spec.SetField(dns.FieldDNSServers, field.TypeJSON, value) + } + if value, ok := duo.mutation.AppendedDNSServers(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, dns.FieldDNSServers, value) }) } if value, ok := duo.mutation.NtpServers(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: dns.FieldNtpServers, + _spec.SetField(dns.FieldNtpServers, field.TypeJSON, value) + } + if value, ok := duo.mutation.AppendedNtpServers(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, dns.FieldNtpServers, value) }) } if value, ok := duo.mutation.Config(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: dns.FieldConfig, - }) + _spec.SetField(dns.FieldConfig, field.TypeJSON, value) } if duo.mutation.DNSToEnvironmentCleared() { edge := &sqlgraph.EdgeSpec{ @@ -643,10 +605,7 @@ func (duo *DNSUpdateOne) sqlSave(ctx context.Context) (_node *DNS, err error) { Columns: dns.DNSToEnvironmentPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -659,10 +618,7 @@ func (duo *DNSUpdateOne) sqlSave(ctx context.Context) (_node *DNS, err error) { Columns: dns.DNSToEnvironmentPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -678,10 +634,7 @@ func (duo *DNSUpdateOne) sqlSave(ctx context.Context) (_node *DNS, err error) { Columns: dns.DNSToEnvironmentPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -697,10 +650,7 @@ func (duo *DNSUpdateOne) sqlSave(ctx context.Context) (_node *DNS, err error) { Columns: dns.DNSToCompetitionPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: competition.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(competition.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -713,10 +663,7 @@ func (duo *DNSUpdateOne) sqlSave(ctx context.Context) (_node *DNS, err error) { Columns: dns.DNSToCompetitionPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: competition.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(competition.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -732,10 +679,7 @@ func (duo *DNSUpdateOne) sqlSave(ctx context.Context) (_node *DNS, err error) { Columns: dns.DNSToCompetitionPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: competition.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(competition.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -750,9 +694,10 @@ func (duo *DNSUpdateOne) sqlSave(ctx context.Context) (_node *DNS, err error) { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{dns.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } + duo.mutation.done = true return _node, nil } diff --git a/ent/dnsrecord.go b/ent/dnsrecord.go index f4057df0..87c0bb80 100755 --- a/ent/dnsrecord.go +++ b/ent/dnsrecord.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -7,6 +7,7 @@ import ( "fmt" "strings" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/gen0cide/laforge/ent/dnsrecord" "github.com/gen0cide/laforge/ent/environment" @@ -18,8 +19,8 @@ type DNSRecord struct { config ` json:"-"` // ID of the ent. ID uuid.UUID `json:"id,omitempty"` - // HclID holds the value of the "hcl_id" field. - HclID string `json:"hcl_id,omitempty" hcl:"id,label"` + // HCLID holds the value of the "hcl_id" field. + HCLID string `json:"hcl_id,omitempty" hcl:"id,label"` // Name holds the value of the "name" field. Name string `json:"name,omitempty" hcl:"name,attr"` // Values holds the value of the "values" field. @@ -38,11 +39,13 @@ type DNSRecord struct { // The values are being populated by the DNSRecordQuery when eager-loading is set. Edges DNSRecordEdges `json:"edges"` + // vvvvvvvvvvvv CUSTOM vvvvvvvvvvvv // Edges put into the main struct to be loaded via hcl // DNSRecordToEnvironment holds the value of the DNSRecordToEnvironment edge. HCLDNSRecordToEnvironment *Environment `json:"DNSRecordToEnvironment,omitempty"` - // + // ^^^^^^^^^^^^ CUSTOM ^^^^^^^^^^^^^ environment_environment_to_dns_record *uuid.UUID + selectValues sql.SelectValues } // DNSRecordEdges holds the relations/edges for other nodes in the graph. @@ -52,6 +55,8 @@ type DNSRecordEdges struct { // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. loadedTypes [1]bool + // totalCount holds the count of the edges above. + totalCount [1]map[string]int } // DNSRecordToEnvironmentOrErr returns the DNSRecordToEnvironment value or an error if the edge @@ -59,8 +64,7 @@ type DNSRecordEdges struct { func (e DNSRecordEdges) DNSRecordToEnvironmentOrErr() (*Environment, error) { if e.loadedTypes[0] { if e.DNSRecordToEnvironment == nil { - // The edge DNSRecordToEnvironment was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: environment.Label} } return e.DNSRecordToEnvironment, nil @@ -69,22 +73,22 @@ func (e DNSRecordEdges) DNSRecordToEnvironmentOrErr() (*Environment, error) { } // scanValues returns the types for scanning values from sql.Rows. -func (*DNSRecord) scanValues(columns []string) ([]interface{}, error) { - values := make([]interface{}, len(columns)) +func (*DNSRecord) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) for i := range columns { switch columns[i] { case dnsrecord.FieldValues, dnsrecord.FieldVars, dnsrecord.FieldTags: values[i] = new([]byte) case dnsrecord.FieldDisabled: values[i] = new(sql.NullBool) - case dnsrecord.FieldHclID, dnsrecord.FieldName, dnsrecord.FieldType, dnsrecord.FieldZone: + case dnsrecord.FieldHCLID, dnsrecord.FieldName, dnsrecord.FieldType, dnsrecord.FieldZone: values[i] = new(sql.NullString) case dnsrecord.FieldID: values[i] = new(uuid.UUID) case dnsrecord.ForeignKeys[0]: // environment_environment_to_dns_record values[i] = &sql.NullScanner{S: new(uuid.UUID)} default: - return nil, fmt.Errorf("unexpected column %q for type DNSRecord", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -92,7 +96,7 @@ func (*DNSRecord) scanValues(columns []string) ([]interface{}, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the DNSRecord fields. -func (dr *DNSRecord) assignValues(columns []string, values []interface{}) error { +func (dr *DNSRecord) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -104,11 +108,11 @@ func (dr *DNSRecord) assignValues(columns []string, values []interface{}) error } else if value != nil { dr.ID = *value } - case dnsrecord.FieldHclID: + case dnsrecord.FieldHCLID: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field hcl_id", values[i]) } else if value.Valid { - dr.HclID = value.String + dr.HCLID = value.String } case dnsrecord.FieldName: if value, ok := values[i].(*sql.NullString); !ok { @@ -165,31 +169,39 @@ func (dr *DNSRecord) assignValues(columns []string, values []interface{}) error dr.environment_environment_to_dns_record = new(uuid.UUID) *dr.environment_environment_to_dns_record = *value.S.(*uuid.UUID) } + default: + dr.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the DNSRecord. +// This includes values selected through modifiers, order, etc. +func (dr *DNSRecord) Value(name string) (ent.Value, error) { + return dr.selectValues.Get(name) +} + // QueryDNSRecordToEnvironment queries the "DNSRecordToEnvironment" edge of the DNSRecord entity. func (dr *DNSRecord) QueryDNSRecordToEnvironment() *EnvironmentQuery { - return (&DNSRecordClient{config: dr.config}).QueryDNSRecordToEnvironment(dr) + return NewDNSRecordClient(dr.config).QueryDNSRecordToEnvironment(dr) } // Update returns a builder for updating this DNSRecord. // Note that you need to call DNSRecord.Unwrap() before calling this method if this DNSRecord // was returned from a transaction, and the transaction was committed or rolled back. func (dr *DNSRecord) Update() *DNSRecordUpdateOne { - return (&DNSRecordClient{config: dr.config}).UpdateOne(dr) + return NewDNSRecordClient(dr.config).UpdateOne(dr) } // Unwrap unwraps the DNSRecord entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. func (dr *DNSRecord) Unwrap() *DNSRecord { - tx, ok := dr.config.driver.(*txDriver) + _tx, ok := dr.config.driver.(*txDriver) if !ok { panic("ent: DNSRecord is not a transactional entity") } - dr.config.driver = tx.drv + dr.config.driver = _tx.drv return dr } @@ -197,22 +209,29 @@ func (dr *DNSRecord) Unwrap() *DNSRecord { func (dr *DNSRecord) String() string { var builder strings.Builder builder.WriteString("DNSRecord(") - builder.WriteString(fmt.Sprintf("id=%v", dr.ID)) - builder.WriteString(", hcl_id=") - builder.WriteString(dr.HclID) - builder.WriteString(", name=") + builder.WriteString(fmt.Sprintf("id=%v, ", dr.ID)) + builder.WriteString("hcl_id=") + builder.WriteString(dr.HCLID) + builder.WriteString(", ") + builder.WriteString("name=") builder.WriteString(dr.Name) - builder.WriteString(", values=") + builder.WriteString(", ") + builder.WriteString("values=") builder.WriteString(fmt.Sprintf("%v", dr.Values)) - builder.WriteString(", type=") + builder.WriteString(", ") + builder.WriteString("type=") builder.WriteString(dr.Type) - builder.WriteString(", zone=") + builder.WriteString(", ") + builder.WriteString("zone=") builder.WriteString(dr.Zone) - builder.WriteString(", vars=") + builder.WriteString(", ") + builder.WriteString("vars=") builder.WriteString(fmt.Sprintf("%v", dr.Vars)) - builder.WriteString(", disabled=") + builder.WriteString(", ") + builder.WriteString("disabled=") builder.WriteString(fmt.Sprintf("%v", dr.Disabled)) - builder.WriteString(", tags=") + builder.WriteString(", ") + builder.WriteString("tags=") builder.WriteString(fmt.Sprintf("%v", dr.Tags)) builder.WriteByte(')') return builder.String() @@ -220,9 +239,3 @@ func (dr *DNSRecord) String() string { // DNSRecords is a parsable slice of DNSRecord. type DNSRecords []*DNSRecord - -func (dr DNSRecords) config(cfg config) { - for _i := range dr { - dr[_i].config = cfg - } -} diff --git a/ent/dnsrecord/dnsrecord.go b/ent/dnsrecord/dnsrecord.go index c49ef078..54263811 100755 --- a/ent/dnsrecord/dnsrecord.go +++ b/ent/dnsrecord/dnsrecord.go @@ -1,8 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package dnsrecord import ( + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -11,8 +13,8 @@ const ( Label = "dns_record" // FieldID holds the string denoting the id field in the database. FieldID = "id" - // FieldHclID holds the string denoting the hcl_id field in the database. - FieldHclID = "hcl_id" + // FieldHCLID holds the string denoting the hcl_id field in the database. + FieldHCLID = "hcl_id" // FieldName holds the string denoting the name field in the database. FieldName = "name" // FieldValues holds the string denoting the values field in the database. @@ -43,7 +45,7 @@ const ( // Columns holds all SQL columns for dnsrecord fields. var Columns = []string{ FieldID, - FieldHclID, + FieldHCLID, FieldName, FieldValues, FieldType, @@ -78,3 +80,50 @@ var ( // DefaultID holds the default value on creation for the "id" field. DefaultID func() uuid.UUID ) + +// OrderOption defines the ordering options for the DNSRecord queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByHCLID orders the results by the hcl_id field. +func ByHCLID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldHCLID, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByType orders the results by the type field. +func ByType(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldType, opts...).ToFunc() +} + +// ByZone orders the results by the zone field. +func ByZone(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldZone, opts...).ToFunc() +} + +// ByDisabled orders the results by the disabled field. +func ByDisabled(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDisabled, opts...).ToFunc() +} + +// ByDNSRecordToEnvironmentField orders the results by DNSRecordToEnvironment field. +func ByDNSRecordToEnvironmentField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newDNSRecordToEnvironmentStep(), sql.OrderByField(field, opts...)) + } +} +func newDNSRecordToEnvironmentStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(DNSRecordToEnvironmentInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, DNSRecordToEnvironmentTable, DNSRecordToEnvironmentColumn), + ) +} diff --git a/ent/dnsrecord/where.go b/ent/dnsrecord/where.go index 9e9cc7d0..c9286d69 100755 --- a/ent/dnsrecord/where.go +++ b/ent/dnsrecord/where.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package dnsrecord @@ -11,578 +11,342 @@ import ( // ID filters vertices based on their ID field. func ID(id uuid.UUID) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.DNSRecord(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id uuid.UUID) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.DNSRecord(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id uuid.UUID) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.DNSRecord(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...uuid.UUID) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.DNSRecord(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...uuid.UUID) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.DNSRecord(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id uuid.UUID) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.DNSRecord(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id uuid.UUID) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.DNSRecord(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id uuid.UUID) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.DNSRecord(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id uuid.UUID) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.DNSRecord(sql.FieldLTE(FieldID, id)) } -// HclID applies equality check predicate on the "hcl_id" field. It's identical to HclIDEQ. -func HclID(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldHclID), v)) - }) +// HCLID applies equality check predicate on the "hcl_id" field. It's identical to HCLIDEQ. +func HCLID(v string) predicate.DNSRecord { + return predicate.DNSRecord(sql.FieldEQ(FieldHCLID, v)) } // Name applies equality check predicate on the "name" field. It's identical to NameEQ. func Name(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldName), v)) - }) + return predicate.DNSRecord(sql.FieldEQ(FieldName, v)) } // Type applies equality check predicate on the "type" field. It's identical to TypeEQ. func Type(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldType), v)) - }) + return predicate.DNSRecord(sql.FieldEQ(FieldType, v)) } // Zone applies equality check predicate on the "zone" field. It's identical to ZoneEQ. func Zone(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldZone), v)) - }) + return predicate.DNSRecord(sql.FieldEQ(FieldZone, v)) } // Disabled applies equality check predicate on the "disabled" field. It's identical to DisabledEQ. func Disabled(v bool) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldDisabled), v)) - }) + return predicate.DNSRecord(sql.FieldEQ(FieldDisabled, v)) } -// HclIDEQ applies the EQ predicate on the "hcl_id" field. -func HclIDEQ(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldHclID), v)) - }) +// HCLIDEQ applies the EQ predicate on the "hcl_id" field. +func HCLIDEQ(v string) predicate.DNSRecord { + return predicate.DNSRecord(sql.FieldEQ(FieldHCLID, v)) } -// HclIDNEQ applies the NEQ predicate on the "hcl_id" field. -func HclIDNEQ(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldHclID), v)) - }) +// HCLIDNEQ applies the NEQ predicate on the "hcl_id" field. +func HCLIDNEQ(v string) predicate.DNSRecord { + return predicate.DNSRecord(sql.FieldNEQ(FieldHCLID, v)) } -// HclIDIn applies the In predicate on the "hcl_id" field. -func HclIDIn(vs ...string) predicate.DNSRecord { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.DNSRecord(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldHclID), v...)) - }) +// HCLIDIn applies the In predicate on the "hcl_id" field. +func HCLIDIn(vs ...string) predicate.DNSRecord { + return predicate.DNSRecord(sql.FieldIn(FieldHCLID, vs...)) } -// HclIDNotIn applies the NotIn predicate on the "hcl_id" field. -func HclIDNotIn(vs ...string) predicate.DNSRecord { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.DNSRecord(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldHclID), v...)) - }) +// HCLIDNotIn applies the NotIn predicate on the "hcl_id" field. +func HCLIDNotIn(vs ...string) predicate.DNSRecord { + return predicate.DNSRecord(sql.FieldNotIn(FieldHCLID, vs...)) } -// HclIDGT applies the GT predicate on the "hcl_id" field. -func HclIDGT(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldHclID), v)) - }) +// HCLIDGT applies the GT predicate on the "hcl_id" field. +func HCLIDGT(v string) predicate.DNSRecord { + return predicate.DNSRecord(sql.FieldGT(FieldHCLID, v)) } -// HclIDGTE applies the GTE predicate on the "hcl_id" field. -func HclIDGTE(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldHclID), v)) - }) +// HCLIDGTE applies the GTE predicate on the "hcl_id" field. +func HCLIDGTE(v string) predicate.DNSRecord { + return predicate.DNSRecord(sql.FieldGTE(FieldHCLID, v)) } -// HclIDLT applies the LT predicate on the "hcl_id" field. -func HclIDLT(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldHclID), v)) - }) +// HCLIDLT applies the LT predicate on the "hcl_id" field. +func HCLIDLT(v string) predicate.DNSRecord { + return predicate.DNSRecord(sql.FieldLT(FieldHCLID, v)) } -// HclIDLTE applies the LTE predicate on the "hcl_id" field. -func HclIDLTE(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldHclID), v)) - }) +// HCLIDLTE applies the LTE predicate on the "hcl_id" field. +func HCLIDLTE(v string) predicate.DNSRecord { + return predicate.DNSRecord(sql.FieldLTE(FieldHCLID, v)) } -// HclIDContains applies the Contains predicate on the "hcl_id" field. -func HclIDContains(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldHclID), v)) - }) +// HCLIDContains applies the Contains predicate on the "hcl_id" field. +func HCLIDContains(v string) predicate.DNSRecord { + return predicate.DNSRecord(sql.FieldContains(FieldHCLID, v)) } -// HclIDHasPrefix applies the HasPrefix predicate on the "hcl_id" field. -func HclIDHasPrefix(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldHclID), v)) - }) +// HCLIDHasPrefix applies the HasPrefix predicate on the "hcl_id" field. +func HCLIDHasPrefix(v string) predicate.DNSRecord { + return predicate.DNSRecord(sql.FieldHasPrefix(FieldHCLID, v)) } -// HclIDHasSuffix applies the HasSuffix predicate on the "hcl_id" field. -func HclIDHasSuffix(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldHclID), v)) - }) +// HCLIDHasSuffix applies the HasSuffix predicate on the "hcl_id" field. +func HCLIDHasSuffix(v string) predicate.DNSRecord { + return predicate.DNSRecord(sql.FieldHasSuffix(FieldHCLID, v)) } -// HclIDEqualFold applies the EqualFold predicate on the "hcl_id" field. -func HclIDEqualFold(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldHclID), v)) - }) +// HCLIDEqualFold applies the EqualFold predicate on the "hcl_id" field. +func HCLIDEqualFold(v string) predicate.DNSRecord { + return predicate.DNSRecord(sql.FieldEqualFold(FieldHCLID, v)) } -// HclIDContainsFold applies the ContainsFold predicate on the "hcl_id" field. -func HclIDContainsFold(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldHclID), v)) - }) +// HCLIDContainsFold applies the ContainsFold predicate on the "hcl_id" field. +func HCLIDContainsFold(v string) predicate.DNSRecord { + return predicate.DNSRecord(sql.FieldContainsFold(FieldHCLID, v)) } // NameEQ applies the EQ predicate on the "name" field. func NameEQ(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldName), v)) - }) + return predicate.DNSRecord(sql.FieldEQ(FieldName, v)) } // NameNEQ applies the NEQ predicate on the "name" field. func NameNEQ(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldName), v)) - }) + return predicate.DNSRecord(sql.FieldNEQ(FieldName, v)) } // NameIn applies the In predicate on the "name" field. func NameIn(vs ...string) predicate.DNSRecord { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.DNSRecord(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldName), v...)) - }) + return predicate.DNSRecord(sql.FieldIn(FieldName, vs...)) } // NameNotIn applies the NotIn predicate on the "name" field. func NameNotIn(vs ...string) predicate.DNSRecord { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.DNSRecord(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldName), v...)) - }) + return predicate.DNSRecord(sql.FieldNotIn(FieldName, vs...)) } // NameGT applies the GT predicate on the "name" field. func NameGT(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldName), v)) - }) + return predicate.DNSRecord(sql.FieldGT(FieldName, v)) } // NameGTE applies the GTE predicate on the "name" field. func NameGTE(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldName), v)) - }) + return predicate.DNSRecord(sql.FieldGTE(FieldName, v)) } // NameLT applies the LT predicate on the "name" field. func NameLT(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldName), v)) - }) + return predicate.DNSRecord(sql.FieldLT(FieldName, v)) } // NameLTE applies the LTE predicate on the "name" field. func NameLTE(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldName), v)) - }) + return predicate.DNSRecord(sql.FieldLTE(FieldName, v)) } // NameContains applies the Contains predicate on the "name" field. func NameContains(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldName), v)) - }) + return predicate.DNSRecord(sql.FieldContains(FieldName, v)) } // NameHasPrefix applies the HasPrefix predicate on the "name" field. func NameHasPrefix(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldName), v)) - }) + return predicate.DNSRecord(sql.FieldHasPrefix(FieldName, v)) } // NameHasSuffix applies the HasSuffix predicate on the "name" field. func NameHasSuffix(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldName), v)) - }) + return predicate.DNSRecord(sql.FieldHasSuffix(FieldName, v)) } // NameEqualFold applies the EqualFold predicate on the "name" field. func NameEqualFold(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldName), v)) - }) + return predicate.DNSRecord(sql.FieldEqualFold(FieldName, v)) } // NameContainsFold applies the ContainsFold predicate on the "name" field. func NameContainsFold(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldName), v)) - }) + return predicate.DNSRecord(sql.FieldContainsFold(FieldName, v)) } // TypeEQ applies the EQ predicate on the "type" field. func TypeEQ(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldType), v)) - }) + return predicate.DNSRecord(sql.FieldEQ(FieldType, v)) } // TypeNEQ applies the NEQ predicate on the "type" field. func TypeNEQ(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldType), v)) - }) + return predicate.DNSRecord(sql.FieldNEQ(FieldType, v)) } // TypeIn applies the In predicate on the "type" field. func TypeIn(vs ...string) predicate.DNSRecord { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.DNSRecord(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldType), v...)) - }) + return predicate.DNSRecord(sql.FieldIn(FieldType, vs...)) } // TypeNotIn applies the NotIn predicate on the "type" field. func TypeNotIn(vs ...string) predicate.DNSRecord { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.DNSRecord(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldType), v...)) - }) + return predicate.DNSRecord(sql.FieldNotIn(FieldType, vs...)) } // TypeGT applies the GT predicate on the "type" field. func TypeGT(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldType), v)) - }) + return predicate.DNSRecord(sql.FieldGT(FieldType, v)) } // TypeGTE applies the GTE predicate on the "type" field. func TypeGTE(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldType), v)) - }) + return predicate.DNSRecord(sql.FieldGTE(FieldType, v)) } // TypeLT applies the LT predicate on the "type" field. func TypeLT(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldType), v)) - }) + return predicate.DNSRecord(sql.FieldLT(FieldType, v)) } // TypeLTE applies the LTE predicate on the "type" field. func TypeLTE(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldType), v)) - }) + return predicate.DNSRecord(sql.FieldLTE(FieldType, v)) } // TypeContains applies the Contains predicate on the "type" field. func TypeContains(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldType), v)) - }) + return predicate.DNSRecord(sql.FieldContains(FieldType, v)) } // TypeHasPrefix applies the HasPrefix predicate on the "type" field. func TypeHasPrefix(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldType), v)) - }) + return predicate.DNSRecord(sql.FieldHasPrefix(FieldType, v)) } // TypeHasSuffix applies the HasSuffix predicate on the "type" field. func TypeHasSuffix(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldType), v)) - }) + return predicate.DNSRecord(sql.FieldHasSuffix(FieldType, v)) } // TypeEqualFold applies the EqualFold predicate on the "type" field. func TypeEqualFold(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldType), v)) - }) + return predicate.DNSRecord(sql.FieldEqualFold(FieldType, v)) } // TypeContainsFold applies the ContainsFold predicate on the "type" field. func TypeContainsFold(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldType), v)) - }) + return predicate.DNSRecord(sql.FieldContainsFold(FieldType, v)) } // ZoneEQ applies the EQ predicate on the "zone" field. func ZoneEQ(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldZone), v)) - }) + return predicate.DNSRecord(sql.FieldEQ(FieldZone, v)) } // ZoneNEQ applies the NEQ predicate on the "zone" field. func ZoneNEQ(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldZone), v)) - }) + return predicate.DNSRecord(sql.FieldNEQ(FieldZone, v)) } // ZoneIn applies the In predicate on the "zone" field. func ZoneIn(vs ...string) predicate.DNSRecord { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.DNSRecord(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldZone), v...)) - }) + return predicate.DNSRecord(sql.FieldIn(FieldZone, vs...)) } // ZoneNotIn applies the NotIn predicate on the "zone" field. func ZoneNotIn(vs ...string) predicate.DNSRecord { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.DNSRecord(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldZone), v...)) - }) + return predicate.DNSRecord(sql.FieldNotIn(FieldZone, vs...)) } // ZoneGT applies the GT predicate on the "zone" field. func ZoneGT(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldZone), v)) - }) + return predicate.DNSRecord(sql.FieldGT(FieldZone, v)) } // ZoneGTE applies the GTE predicate on the "zone" field. func ZoneGTE(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldZone), v)) - }) + return predicate.DNSRecord(sql.FieldGTE(FieldZone, v)) } // ZoneLT applies the LT predicate on the "zone" field. func ZoneLT(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldZone), v)) - }) + return predicate.DNSRecord(sql.FieldLT(FieldZone, v)) } // ZoneLTE applies the LTE predicate on the "zone" field. func ZoneLTE(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldZone), v)) - }) + return predicate.DNSRecord(sql.FieldLTE(FieldZone, v)) } // ZoneContains applies the Contains predicate on the "zone" field. func ZoneContains(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldZone), v)) - }) + return predicate.DNSRecord(sql.FieldContains(FieldZone, v)) } // ZoneHasPrefix applies the HasPrefix predicate on the "zone" field. func ZoneHasPrefix(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldZone), v)) - }) + return predicate.DNSRecord(sql.FieldHasPrefix(FieldZone, v)) } // ZoneHasSuffix applies the HasSuffix predicate on the "zone" field. func ZoneHasSuffix(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldZone), v)) - }) + return predicate.DNSRecord(sql.FieldHasSuffix(FieldZone, v)) } // ZoneEqualFold applies the EqualFold predicate on the "zone" field. func ZoneEqualFold(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldZone), v)) - }) + return predicate.DNSRecord(sql.FieldEqualFold(FieldZone, v)) } // ZoneContainsFold applies the ContainsFold predicate on the "zone" field. func ZoneContainsFold(v string) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldZone), v)) - }) + return predicate.DNSRecord(sql.FieldContainsFold(FieldZone, v)) } // DisabledEQ applies the EQ predicate on the "disabled" field. func DisabledEQ(v bool) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldDisabled), v)) - }) + return predicate.DNSRecord(sql.FieldEQ(FieldDisabled, v)) } // DisabledNEQ applies the NEQ predicate on the "disabled" field. func DisabledNEQ(v bool) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldDisabled), v)) - }) + return predicate.DNSRecord(sql.FieldNEQ(FieldDisabled, v)) } // HasDNSRecordToEnvironment applies the HasEdge predicate on the "DNSRecordToEnvironment" edge. @@ -590,7 +354,6 @@ func HasDNSRecordToEnvironment() predicate.DNSRecord { return predicate.DNSRecord(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(DNSRecordToEnvironmentTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, true, DNSRecordToEnvironmentTable, DNSRecordToEnvironmentColumn), ) sqlgraph.HasNeighbors(s, step) @@ -600,11 +363,7 @@ func HasDNSRecordToEnvironment() predicate.DNSRecord { // HasDNSRecordToEnvironmentWith applies the HasEdge predicate on the "DNSRecordToEnvironment" edge with a given conditions (other predicates). func HasDNSRecordToEnvironmentWith(preds ...predicate.Environment) predicate.DNSRecord { return predicate.DNSRecord(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(DNSRecordToEnvironmentInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, DNSRecordToEnvironmentTable, DNSRecordToEnvironmentColumn), - ) + step := newDNSRecordToEnvironmentStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -615,32 +374,15 @@ func HasDNSRecordToEnvironmentWith(preds ...predicate.Environment) predicate.DNS // And groups predicates with the AND operator between them. func And(predicates ...predicate.DNSRecord) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.DNSRecord(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.DNSRecord) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.DNSRecord(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.DNSRecord) predicate.DNSRecord { - return predicate.DNSRecord(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.DNSRecord(sql.NotPredicates(p)) } diff --git a/ent/dnsrecord_create.go b/ent/dnsrecord_create.go index 5ba42677..bc810cd7 100755 --- a/ent/dnsrecord_create.go +++ b/ent/dnsrecord_create.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -21,9 +21,9 @@ type DNSRecordCreate struct { hooks []Hook } -// SetHclID sets the "hcl_id" field. -func (drc *DNSRecordCreate) SetHclID(s string) *DNSRecordCreate { - drc.mutation.SetHclID(s) +// SetHCLID sets the "hcl_id" field. +func (drc *DNSRecordCreate) SetHCLID(s string) *DNSRecordCreate { + drc.mutation.SetHCLID(s) return drc } @@ -109,44 +109,8 @@ func (drc *DNSRecordCreate) Mutation() *DNSRecordMutation { // Save creates the DNSRecord in the database. func (drc *DNSRecordCreate) Save(ctx context.Context) (*DNSRecord, error) { - var ( - err error - node *DNSRecord - ) drc.defaults() - if len(drc.hooks) == 0 { - if err = drc.check(); err != nil { - return nil, err - } - node, err = drc.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*DNSRecordMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = drc.check(); err != nil { - return nil, err - } - drc.mutation = mutation - if node, err = drc.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(drc.hooks) - 1; i >= 0; i-- { - if drc.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = drc.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, drc.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, drc.sqlSave, drc.mutation, drc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -181,7 +145,7 @@ func (drc *DNSRecordCreate) defaults() { // check runs all checks and user-defined validators on the builder. func (drc *DNSRecordCreate) check() error { - if _, ok := drc.mutation.HclID(); !ok { + if _, ok := drc.mutation.HCLID(); !ok { return &ValidationError{Name: "hcl_id", err: errors.New(`ent: missing required field "DNSRecord.hcl_id"`)} } if _, ok := drc.mutation.Name(); !ok { @@ -209,10 +173,13 @@ func (drc *DNSRecordCreate) check() error { } func (drc *DNSRecordCreate) sqlSave(ctx context.Context) (*DNSRecord, error) { + if err := drc.check(); err != nil { + return nil, err + } _node, _spec := drc.createSpec() if err := sqlgraph.CreateNode(ctx, drc.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } @@ -223,86 +190,50 @@ func (drc *DNSRecordCreate) sqlSave(ctx context.Context) (*DNSRecord, error) { return nil, err } } + drc.mutation.id = &_node.ID + drc.mutation.done = true return _node, nil } func (drc *DNSRecordCreate) createSpec() (*DNSRecord, *sqlgraph.CreateSpec) { var ( _node = &DNSRecord{config: drc.config} - _spec = &sqlgraph.CreateSpec{ - Table: dnsrecord.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: dnsrecord.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(dnsrecord.Table, sqlgraph.NewFieldSpec(dnsrecord.FieldID, field.TypeUUID)) ) if id, ok := drc.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } - if value, ok := drc.mutation.HclID(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: dnsrecord.FieldHclID, - }) - _node.HclID = value + if value, ok := drc.mutation.HCLID(); ok { + _spec.SetField(dnsrecord.FieldHCLID, field.TypeString, value) + _node.HCLID = value } if value, ok := drc.mutation.Name(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: dnsrecord.FieldName, - }) + _spec.SetField(dnsrecord.FieldName, field.TypeString, value) _node.Name = value } if value, ok := drc.mutation.Values(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: dnsrecord.FieldValues, - }) + _spec.SetField(dnsrecord.FieldValues, field.TypeJSON, value) _node.Values = value } if value, ok := drc.mutation.GetType(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: dnsrecord.FieldType, - }) + _spec.SetField(dnsrecord.FieldType, field.TypeString, value) _node.Type = value } if value, ok := drc.mutation.Zone(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: dnsrecord.FieldZone, - }) + _spec.SetField(dnsrecord.FieldZone, field.TypeString, value) _node.Zone = value } if value, ok := drc.mutation.Vars(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: dnsrecord.FieldVars, - }) + _spec.SetField(dnsrecord.FieldVars, field.TypeJSON, value) _node.Vars = value } if value, ok := drc.mutation.Disabled(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: dnsrecord.FieldDisabled, - }) + _spec.SetField(dnsrecord.FieldDisabled, field.TypeBool, value) _node.Disabled = value } if value, ok := drc.mutation.Tags(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: dnsrecord.FieldTags, - }) + _spec.SetField(dnsrecord.FieldTags, field.TypeJSON, value) _node.Tags = value } if nodes := drc.mutation.DNSRecordToEnvironmentIDs(); len(nodes) > 0 { @@ -313,10 +244,7 @@ func (drc *DNSRecordCreate) createSpec() (*DNSRecord, *sqlgraph.CreateSpec) { Columns: []string{dnsrecord.DNSRecordToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -331,11 +259,15 @@ func (drc *DNSRecordCreate) createSpec() (*DNSRecord, *sqlgraph.CreateSpec) { // DNSRecordCreateBulk is the builder for creating many DNSRecord entities in bulk. type DNSRecordCreateBulk struct { config + err error builders []*DNSRecordCreate } // Save creates the DNSRecord entities in the database. func (drcb *DNSRecordCreateBulk) Save(ctx context.Context) ([]*DNSRecord, error) { + if drcb.err != nil { + return nil, drcb.err + } specs := make([]*sqlgraph.CreateSpec, len(drcb.builders)) nodes := make([]*DNSRecord, len(drcb.builders)) mutators := make([]Mutator, len(drcb.builders)) @@ -352,8 +284,8 @@ func (drcb *DNSRecordCreateBulk) Save(ctx context.Context) ([]*DNSRecord, error) return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, drcb.builders[i+1].mutation) } else { @@ -361,7 +293,7 @@ func (drcb *DNSRecordCreateBulk) Save(ctx context.Context) ([]*DNSRecord, error) // Invoke the actual operation on the latest mutation in the chain. if err = sqlgraph.BatchCreate(ctx, drcb.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } } } diff --git a/ent/dnsrecord_delete.go b/ent/dnsrecord_delete.go index d3c6fabf..0cdc7cc9 100755 --- a/ent/dnsrecord_delete.go +++ b/ent/dnsrecord_delete.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (drd *DNSRecordDelete) Where(ps ...predicate.DNSRecord) *DNSRecordDelete { // Exec executes the deletion query and returns how many vertices were deleted. func (drd *DNSRecordDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(drd.hooks) == 0 { - affected, err = drd.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*DNSRecordMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - drd.mutation = mutation - affected, err = drd.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(drd.hooks) - 1; i >= 0; i-- { - if drd.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = drd.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, drd.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, drd.sqlExec, drd.mutation, drd.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (drd *DNSRecordDelete) ExecX(ctx context.Context) int { } func (drd *DNSRecordDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: dnsrecord.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: dnsrecord.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(dnsrecord.Table, sqlgraph.NewFieldSpec(dnsrecord.FieldID, field.TypeUUID)) if ps := drd.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -84,7 +48,12 @@ func (drd *DNSRecordDelete) sqlExec(ctx context.Context) (int, error) { } } } - return sqlgraph.DeleteNodes(ctx, drd.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, drd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + drd.mutation.done = true + return affected, err } // DNSRecordDeleteOne is the builder for deleting a single DNSRecord entity. @@ -92,6 +61,12 @@ type DNSRecordDeleteOne struct { drd *DNSRecordDelete } +// Where appends a list predicates to the DNSRecordDelete builder. +func (drdo *DNSRecordDeleteOne) Where(ps ...predicate.DNSRecord) *DNSRecordDeleteOne { + drdo.drd.mutation.Where(ps...) + return drdo +} + // Exec executes the deletion query. func (drdo *DNSRecordDeleteOne) Exec(ctx context.Context) error { n, err := drdo.drd.Exec(ctx) @@ -107,5 +82,7 @@ func (drdo *DNSRecordDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (drdo *DNSRecordDeleteOne) ExecX(ctx context.Context) { - drdo.drd.ExecX(ctx) + if err := drdo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/ent/dnsrecord_query.go b/ent/dnsrecord_query.go index dea2d81e..0305760a 100755 --- a/ent/dnsrecord_query.go +++ b/ent/dnsrecord_query.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "errors" "fmt" "math" @@ -20,15 +19,14 @@ import ( // DNSRecordQuery is the builder for querying DNSRecord entities. type DNSRecordQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string - predicates []predicate.DNSRecord - // eager-loading edges. + ctx *QueryContext + order []dnsrecord.OrderOption + inters []Interceptor + predicates []predicate.DNSRecord withDNSRecordToEnvironment *EnvironmentQuery withFKs bool + modifiers []func(*sql.Selector) + loadTotal []func(context.Context, []*DNSRecord) error // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -40,34 +38,34 @@ func (drq *DNSRecordQuery) Where(ps ...predicate.DNSRecord) *DNSRecordQuery { return drq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (drq *DNSRecordQuery) Limit(limit int) *DNSRecordQuery { - drq.limit = &limit + drq.ctx.Limit = &limit return drq } -// Offset adds an offset step to the query. +// Offset to start from. func (drq *DNSRecordQuery) Offset(offset int) *DNSRecordQuery { - drq.offset = &offset + drq.ctx.Offset = &offset return drq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (drq *DNSRecordQuery) Unique(unique bool) *DNSRecordQuery { - drq.unique = &unique + drq.ctx.Unique = &unique return drq } -// Order adds an order step to the query. -func (drq *DNSRecordQuery) Order(o ...OrderFunc) *DNSRecordQuery { +// Order specifies how the records should be ordered. +func (drq *DNSRecordQuery) Order(o ...dnsrecord.OrderOption) *DNSRecordQuery { drq.order = append(drq.order, o...) return drq } // QueryDNSRecordToEnvironment chains the current query on the "DNSRecordToEnvironment" edge. func (drq *DNSRecordQuery) QueryDNSRecordToEnvironment() *EnvironmentQuery { - query := &EnvironmentQuery{config: drq.config} + query := (&EnvironmentClient{config: drq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := drq.prepareQuery(ctx); err != nil { return nil, err @@ -90,7 +88,7 @@ func (drq *DNSRecordQuery) QueryDNSRecordToEnvironment() *EnvironmentQuery { // First returns the first DNSRecord entity from the query. // Returns a *NotFoundError when no DNSRecord was found. func (drq *DNSRecordQuery) First(ctx context.Context) (*DNSRecord, error) { - nodes, err := drq.Limit(1).All(ctx) + nodes, err := drq.Limit(1).All(setContextOp(ctx, drq.ctx, "First")) if err != nil { return nil, err } @@ -113,7 +111,7 @@ func (drq *DNSRecordQuery) FirstX(ctx context.Context) *DNSRecord { // Returns a *NotFoundError when no DNSRecord ID was found. func (drq *DNSRecordQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = drq.Limit(1).IDs(ctx); err != nil { + if ids, err = drq.Limit(1).IDs(setContextOp(ctx, drq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -136,7 +134,7 @@ func (drq *DNSRecordQuery) FirstIDX(ctx context.Context) uuid.UUID { // Returns a *NotSingularError when more than one DNSRecord entity is found. // Returns a *NotFoundError when no DNSRecord entities are found. func (drq *DNSRecordQuery) Only(ctx context.Context) (*DNSRecord, error) { - nodes, err := drq.Limit(2).All(ctx) + nodes, err := drq.Limit(2).All(setContextOp(ctx, drq.ctx, "Only")) if err != nil { return nil, err } @@ -164,7 +162,7 @@ func (drq *DNSRecordQuery) OnlyX(ctx context.Context) *DNSRecord { // Returns a *NotFoundError when no entities are found. func (drq *DNSRecordQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = drq.Limit(2).IDs(ctx); err != nil { + if ids, err = drq.Limit(2).IDs(setContextOp(ctx, drq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -189,10 +187,12 @@ func (drq *DNSRecordQuery) OnlyIDX(ctx context.Context) uuid.UUID { // All executes the query and returns a list of DNSRecords. func (drq *DNSRecordQuery) All(ctx context.Context) ([]*DNSRecord, error) { + ctx = setContextOp(ctx, drq.ctx, "All") if err := drq.prepareQuery(ctx); err != nil { return nil, err } - return drq.sqlAll(ctx) + qr := querierAll[[]*DNSRecord, *DNSRecordQuery]() + return withInterceptors[[]*DNSRecord](ctx, drq, qr, drq.inters) } // AllX is like All, but panics if an error occurs. @@ -205,9 +205,12 @@ func (drq *DNSRecordQuery) AllX(ctx context.Context) []*DNSRecord { } // IDs executes the query and returns a list of DNSRecord IDs. -func (drq *DNSRecordQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { - var ids []uuid.UUID - if err := drq.Select(dnsrecord.FieldID).Scan(ctx, &ids); err != nil { +func (drq *DNSRecordQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if drq.ctx.Unique == nil && drq.path != nil { + drq.Unique(true) + } + ctx = setContextOp(ctx, drq.ctx, "IDs") + if err = drq.Select(dnsrecord.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -224,10 +227,11 @@ func (drq *DNSRecordQuery) IDsX(ctx context.Context) []uuid.UUID { // Count returns the count of the given query. func (drq *DNSRecordQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, drq.ctx, "Count") if err := drq.prepareQuery(ctx); err != nil { return 0, err } - return drq.sqlCount(ctx) + return withInterceptors[int](ctx, drq, querierCount[*DNSRecordQuery](), drq.inters) } // CountX is like Count, but panics if an error occurs. @@ -241,10 +245,15 @@ func (drq *DNSRecordQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (drq *DNSRecordQuery) Exist(ctx context.Context) (bool, error) { - if err := drq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, drq.ctx, "Exist") + switch _, err := drq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return drq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -264,22 +273,21 @@ func (drq *DNSRecordQuery) Clone() *DNSRecordQuery { } return &DNSRecordQuery{ config: drq.config, - limit: drq.limit, - offset: drq.offset, - order: append([]OrderFunc{}, drq.order...), + ctx: drq.ctx.Clone(), + order: append([]dnsrecord.OrderOption{}, drq.order...), + inters: append([]Interceptor{}, drq.inters...), predicates: append([]predicate.DNSRecord{}, drq.predicates...), withDNSRecordToEnvironment: drq.withDNSRecordToEnvironment.Clone(), // clone intermediate query. - sql: drq.sql.Clone(), - path: drq.path, - unique: drq.unique, + sql: drq.sql.Clone(), + path: drq.path, } } // WithDNSRecordToEnvironment tells the query-builder to eager-load the nodes that are connected to // the "DNSRecordToEnvironment" edge. The optional arguments are used to configure the query builder of the edge. func (drq *DNSRecordQuery) WithDNSRecordToEnvironment(opts ...func(*EnvironmentQuery)) *DNSRecordQuery { - query := &EnvironmentQuery{config: drq.config} + query := (&EnvironmentClient{config: drq.config}).Query() for _, opt := range opts { opt(query) } @@ -293,25 +301,21 @@ func (drq *DNSRecordQuery) WithDNSRecordToEnvironment(opts ...func(*EnvironmentQ // Example: // // var v []struct { -// HclID string `json:"hcl_id,omitempty" hcl:"id,label"` +// HCLID string `json:"hcl_id,omitempty" hcl:"id,label"` // Count int `json:"count,omitempty"` // } // // client.DNSRecord.Query(). -// GroupBy(dnsrecord.FieldHclID). +// GroupBy(dnsrecord.FieldHCLID). // Aggregate(ent.Count()). // Scan(ctx, &v) -// func (drq *DNSRecordQuery) GroupBy(field string, fields ...string) *DNSRecordGroupBy { - group := &DNSRecordGroupBy{config: drq.config} - group.fields = append([]string{field}, fields...) - group.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := drq.prepareQuery(ctx); err != nil { - return nil, err - } - return drq.sqlQuery(ctx), nil - } - return group + drq.ctx.Fields = append([]string{field}, fields...) + grbuild := &DNSRecordGroupBy{build: drq} + grbuild.flds = &drq.ctx.Fields + grbuild.label = dnsrecord.Label + grbuild.scan = grbuild.Scan + return grbuild } // Select allows the selection one or more fields/columns for the given query, @@ -320,20 +324,37 @@ func (drq *DNSRecordQuery) GroupBy(field string, fields ...string) *DNSRecordGro // Example: // // var v []struct { -// HclID string `json:"hcl_id,omitempty" hcl:"id,label"` +// HCLID string `json:"hcl_id,omitempty" hcl:"id,label"` // } // // client.DNSRecord.Query(). -// Select(dnsrecord.FieldHclID). +// Select(dnsrecord.FieldHCLID). // Scan(ctx, &v) -// func (drq *DNSRecordQuery) Select(fields ...string) *DNSRecordSelect { - drq.fields = append(drq.fields, fields...) - return &DNSRecordSelect{DNSRecordQuery: drq} + drq.ctx.Fields = append(drq.ctx.Fields, fields...) + sbuild := &DNSRecordSelect{DNSRecordQuery: drq} + sbuild.label = dnsrecord.Label + sbuild.flds, sbuild.scan = &drq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a DNSRecordSelect configured with the given aggregations. +func (drq *DNSRecordQuery) Aggregate(fns ...AggregateFunc) *DNSRecordSelect { + return drq.Select().Aggregate(fns...) } func (drq *DNSRecordQuery) prepareQuery(ctx context.Context) error { - for _, f := range drq.fields { + for _, inter := range drq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, drq); err != nil { + return err + } + } + } + for _, f := range drq.ctx.Fields { if !dnsrecord.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -348,7 +369,7 @@ func (drq *DNSRecordQuery) prepareQuery(ctx context.Context) error { return nil } -func (drq *DNSRecordQuery) sqlAll(ctx context.Context) ([]*DNSRecord, error) { +func (drq *DNSRecordQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*DNSRecord, error) { var ( nodes = []*DNSRecord{} withFKs = drq.withFKs @@ -363,92 +384,95 @@ func (drq *DNSRecordQuery) sqlAll(ctx context.Context) ([]*DNSRecord, error) { if withFKs { _spec.Node.Columns = append(_spec.Node.Columns, dnsrecord.ForeignKeys...) } - _spec.ScanValues = func(columns []string) ([]interface{}, error) { + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*DNSRecord).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { node := &DNSRecord{config: drq.config} nodes = append(nodes, node) - return node.scanValues(columns) - } - _spec.Assign = func(columns []string, values []interface{}) error { - if len(nodes) == 0 { - return fmt.Errorf("ent: Assign called without calling ScanValues") - } - node := nodes[len(nodes)-1] node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(drq.modifiers) > 0 { + _spec.Modifiers = drq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } if err := sqlgraph.QueryNodes(ctx, drq.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := drq.withDNSRecordToEnvironment; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*DNSRecord) - for i := range nodes { - if nodes[i].environment_environment_to_dns_record == nil { - continue - } - fk := *nodes[i].environment_environment_to_dns_record - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) - } - query.Where(environment.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + if err := drq.loadDNSRecordToEnvironment(ctx, query, nodes, nil, + func(n *DNSRecord, e *Environment) { n.Edges.DNSRecordToEnvironment = e }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "environment_environment_to_dns_record" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.DNSRecordToEnvironment = n - } + } + for i := range drq.loadTotal { + if err := drq.loadTotal[i](ctx, nodes); err != nil { + return nil, err } } - return nodes, nil } -func (drq *DNSRecordQuery) sqlCount(ctx context.Context) (int, error) { - _spec := drq.querySpec() - _spec.Node.Columns = drq.fields - if len(drq.fields) > 0 { - _spec.Unique = drq.unique != nil && *drq.unique +func (drq *DNSRecordQuery) loadDNSRecordToEnvironment(ctx context.Context, query *EnvironmentQuery, nodes []*DNSRecord, init func(*DNSRecord), assign func(*DNSRecord, *Environment)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*DNSRecord) + for i := range nodes { + if nodes[i].environment_environment_to_dns_record == nil { + continue + } + fk := *nodes[i].environment_environment_to_dns_record + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) } - return sqlgraph.CountNodes(ctx, drq.driver, _spec) + if len(ids) == 0 { + return nil + } + query.Where(environment.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "environment_environment_to_dns_record" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil } -func (drq *DNSRecordQuery) sqlExist(ctx context.Context) (bool, error) { - n, err := drq.sqlCount(ctx) - if err != nil { - return false, fmt.Errorf("ent: check existence: %w", err) +func (drq *DNSRecordQuery) sqlCount(ctx context.Context) (int, error) { + _spec := drq.querySpec() + if len(drq.modifiers) > 0 { + _spec.Modifiers = drq.modifiers } - return n > 0, nil + _spec.Node.Columns = drq.ctx.Fields + if len(drq.ctx.Fields) > 0 { + _spec.Unique = drq.ctx.Unique != nil && *drq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, drq.driver, _spec) } func (drq *DNSRecordQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: dnsrecord.Table, - Columns: dnsrecord.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: dnsrecord.FieldID, - }, - }, - From: drq.sql, - Unique: true, - } - if unique := drq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(dnsrecord.Table, dnsrecord.Columns, sqlgraph.NewFieldSpec(dnsrecord.FieldID, field.TypeUUID)) + _spec.From = drq.sql + if unique := drq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if drq.path != nil { + _spec.Unique = true } - if fields := drq.fields; len(fields) > 0 { + if fields := drq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, dnsrecord.FieldID) for i := range fields { @@ -464,10 +488,10 @@ func (drq *DNSRecordQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := drq.limit; limit != nil { + if limit := drq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := drq.offset; offset != nil { + if offset := drq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := drq.order; len(ps) > 0 { @@ -483,7 +507,7 @@ func (drq *DNSRecordQuery) querySpec() *sqlgraph.QuerySpec { func (drq *DNSRecordQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(drq.driver.Dialect()) t1 := builder.Table(dnsrecord.Table) - columns := drq.fields + columns := drq.ctx.Fields if len(columns) == 0 { columns = dnsrecord.Columns } @@ -492,7 +516,7 @@ func (drq *DNSRecordQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = drq.sql selector.Select(selector.Columns(columns...)...) } - if drq.unique != nil && *drq.unique { + if drq.ctx.Unique != nil && *drq.ctx.Unique { selector.Distinct() } for _, p := range drq.predicates { @@ -501,12 +525,12 @@ func (drq *DNSRecordQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range drq.order { p(selector) } - if offset := drq.offset; offset != nil { + if offset := drq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := drq.limit; limit != nil { + if limit := drq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -514,12 +538,8 @@ func (drq *DNSRecordQuery) sqlQuery(ctx context.Context) *sql.Selector { // DNSRecordGroupBy is the group-by builder for DNSRecord entities. type DNSRecordGroupBy struct { - config - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) + selector + build *DNSRecordQuery } // Aggregate adds the given aggregation functions to the group-by query. @@ -528,471 +548,77 @@ func (drgb *DNSRecordGroupBy) Aggregate(fns ...AggregateFunc) *DNSRecordGroupBy return drgb } -// Scan applies the group-by query and scans the result into the given value. -func (drgb *DNSRecordGroupBy) Scan(ctx context.Context, v interface{}) error { - query, err := drgb.path(ctx) - if err != nil { +// Scan applies the selector query and scans the result into the given value. +func (drgb *DNSRecordGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, drgb.build.ctx, "GroupBy") + if err := drgb.build.prepareQuery(ctx); err != nil { return err } - drgb.sql = query - return drgb.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (drgb *DNSRecordGroupBy) ScanX(ctx context.Context, v interface{}) { - if err := drgb.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from group-by. -// It is only allowed when executing a group-by query with one field. -func (drgb *DNSRecordGroupBy) Strings(ctx context.Context) ([]string, error) { - if len(drgb.fields) > 1 { - return nil, errors.New("ent: DNSRecordGroupBy.Strings is not achievable when grouping more than 1 field") - } - var v []string - if err := drgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (drgb *DNSRecordGroupBy) StringsX(ctx context.Context) []string { - v, err := drgb.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (drgb *DNSRecordGroupBy) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = drgb.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{dnsrecord.Label} - default: - err = fmt.Errorf("ent: DNSRecordGroupBy.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (drgb *DNSRecordGroupBy) StringX(ctx context.Context) string { - v, err := drgb.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from group-by. -// It is only allowed when executing a group-by query with one field. -func (drgb *DNSRecordGroupBy) Ints(ctx context.Context) ([]int, error) { - if len(drgb.fields) > 1 { - return nil, errors.New("ent: DNSRecordGroupBy.Ints is not achievable when grouping more than 1 field") - } - var v []int - if err := drgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (drgb *DNSRecordGroupBy) IntsX(ctx context.Context) []int { - v, err := drgb.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (drgb *DNSRecordGroupBy) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = drgb.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{dnsrecord.Label} - default: - err = fmt.Errorf("ent: DNSRecordGroupBy.Ints returned %d results when one was expected", len(v)) - } - return + return scanWithInterceptors[*DNSRecordQuery, *DNSRecordGroupBy](ctx, drgb.build, drgb, drgb.build.inters, v) } -// IntX is like Int, but panics if an error occurs. -func (drgb *DNSRecordGroupBy) IntX(ctx context.Context) int { - v, err := drgb.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from group-by. -// It is only allowed when executing a group-by query with one field. -func (drgb *DNSRecordGroupBy) Float64s(ctx context.Context) ([]float64, error) { - if len(drgb.fields) > 1 { - return nil, errors.New("ent: DNSRecordGroupBy.Float64s is not achievable when grouping more than 1 field") - } - var v []float64 - if err := drgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (drgb *DNSRecordGroupBy) Float64sX(ctx context.Context) []float64 { - v, err := drgb.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (drgb *DNSRecordGroupBy) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = drgb.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{dnsrecord.Label} - default: - err = fmt.Errorf("ent: DNSRecordGroupBy.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (drgb *DNSRecordGroupBy) Float64X(ctx context.Context) float64 { - v, err := drgb.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from group-by. -// It is only allowed when executing a group-by query with one field. -func (drgb *DNSRecordGroupBy) Bools(ctx context.Context) ([]bool, error) { - if len(drgb.fields) > 1 { - return nil, errors.New("ent: DNSRecordGroupBy.Bools is not achievable when grouping more than 1 field") - } - var v []bool - if err := drgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (drgb *DNSRecordGroupBy) BoolsX(ctx context.Context) []bool { - v, err := drgb.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (drgb *DNSRecordGroupBy) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = drgb.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{dnsrecord.Label} - default: - err = fmt.Errorf("ent: DNSRecordGroupBy.Bools returned %d results when one was expected", len(v)) - } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (drgb *DNSRecordGroupBy) BoolX(ctx context.Context) bool { - v, err := drgb.Bool(ctx) - if err != nil { - panic(err) +func (drgb *DNSRecordGroupBy) sqlScan(ctx context.Context, root *DNSRecordQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(drgb.fns)) + for _, fn := range drgb.fns { + aggregation = append(aggregation, fn(selector)) } - return v -} - -func (drgb *DNSRecordGroupBy) sqlScan(ctx context.Context, v interface{}) error { - for _, f := range drgb.fields { - if !dnsrecord.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*drgb.flds)+len(drgb.fns)) + for _, f := range *drgb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := drgb.sqlQuery() + selector.GroupBy(selector.Columns(*drgb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := drgb.driver.Query(ctx, query, args, rows); err != nil { + if err := drgb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (drgb *DNSRecordGroupBy) sqlQuery() *sql.Selector { - selector := drgb.sql.Select() - aggregation := make([]string, 0, len(drgb.fns)) - for _, fn := range drgb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(drgb.fields)+len(drgb.fns)) - for _, f := range drgb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(drgb.fields...)...) -} - // DNSRecordSelect is the builder for selecting fields of DNSRecord entities. type DNSRecordSelect struct { *DNSRecordQuery - // intermediate query (i.e. traversal path). - sql *sql.Selector + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (drs *DNSRecordSelect) Aggregate(fns ...AggregateFunc) *DNSRecordSelect { + drs.fns = append(drs.fns, fns...) + return drs } // Scan applies the selector query and scans the result into the given value. -func (drs *DNSRecordSelect) Scan(ctx context.Context, v interface{}) error { +func (drs *DNSRecordSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, drs.ctx, "Select") if err := drs.prepareQuery(ctx); err != nil { return err } - drs.sql = drs.DNSRecordQuery.sqlQuery(ctx) - return drs.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (drs *DNSRecordSelect) ScanX(ctx context.Context, v interface{}) { - if err := drs.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from a selector. It is only allowed when selecting one field. -func (drs *DNSRecordSelect) Strings(ctx context.Context) ([]string, error) { - if len(drs.fields) > 1 { - return nil, errors.New("ent: DNSRecordSelect.Strings is not achievable when selecting more than 1 field") - } - var v []string - if err := drs.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (drs *DNSRecordSelect) StringsX(ctx context.Context) []string { - v, err := drs.Strings(ctx) - if err != nil { - panic(err) - } - return v + return scanWithInterceptors[*DNSRecordQuery, *DNSRecordSelect](ctx, drs.DNSRecordQuery, drs, drs.inters, v) } -// String returns a single string from a selector. It is only allowed when selecting one field. -func (drs *DNSRecordSelect) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = drs.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{dnsrecord.Label} - default: - err = fmt.Errorf("ent: DNSRecordSelect.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (drs *DNSRecordSelect) StringX(ctx context.Context) string { - v, err := drs.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from a selector. It is only allowed when selecting one field. -func (drs *DNSRecordSelect) Ints(ctx context.Context) ([]int, error) { - if len(drs.fields) > 1 { - return nil, errors.New("ent: DNSRecordSelect.Ints is not achievable when selecting more than 1 field") - } - var v []int - if err := drs.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (drs *DNSRecordSelect) IntsX(ctx context.Context) []int { - v, err := drs.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a selector. It is only allowed when selecting one field. -func (drs *DNSRecordSelect) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = drs.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{dnsrecord.Label} - default: - err = fmt.Errorf("ent: DNSRecordSelect.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (drs *DNSRecordSelect) IntX(ctx context.Context) int { - v, err := drs.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. -func (drs *DNSRecordSelect) Float64s(ctx context.Context) ([]float64, error) { - if len(drs.fields) > 1 { - return nil, errors.New("ent: DNSRecordSelect.Float64s is not achievable when selecting more than 1 field") - } - var v []float64 - if err := drs.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (drs *DNSRecordSelect) Float64sX(ctx context.Context) []float64 { - v, err := drs.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. -func (drs *DNSRecordSelect) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = drs.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{dnsrecord.Label} - default: - err = fmt.Errorf("ent: DNSRecordSelect.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (drs *DNSRecordSelect) Float64X(ctx context.Context) float64 { - v, err := drs.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from a selector. It is only allowed when selecting one field. -func (drs *DNSRecordSelect) Bools(ctx context.Context) ([]bool, error) { - if len(drs.fields) > 1 { - return nil, errors.New("ent: DNSRecordSelect.Bools is not achievable when selecting more than 1 field") - } - var v []bool - if err := drs.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (drs *DNSRecordSelect) BoolsX(ctx context.Context) []bool { - v, err := drs.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a selector. It is only allowed when selecting one field. -func (drs *DNSRecordSelect) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = drs.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{dnsrecord.Label} - default: - err = fmt.Errorf("ent: DNSRecordSelect.Bools returned %d results when one was expected", len(v)) +func (drs *DNSRecordSelect) sqlScan(ctx context.Context, root *DNSRecordQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(drs.fns)) + for _, fn := range drs.fns { + aggregation = append(aggregation, fn(selector)) } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (drs *DNSRecordSelect) BoolX(ctx context.Context) bool { - v, err := drs.Bool(ctx) - if err != nil { - panic(err) + switch n := len(*drs.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) } - return v -} - -func (drs *DNSRecordSelect) sqlScan(ctx context.Context, v interface{}) error { rows := &sql.Rows{} - query, args := drs.sql.Query() + query, args := selector.Query() if err := drs.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/ent/dnsrecord_update.go b/ent/dnsrecord_update.go index 8e0d3320..12626210 100755 --- a/ent/dnsrecord_update.go +++ b/ent/dnsrecord_update.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -9,6 +9,7 @@ import ( "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/dialect/sql/sqljson" "entgo.io/ent/schema/field" "github.com/gen0cide/laforge/ent/dnsrecord" "github.com/gen0cide/laforge/ent/environment" @@ -29,9 +30,17 @@ func (dru *DNSRecordUpdate) Where(ps ...predicate.DNSRecord) *DNSRecordUpdate { return dru } -// SetHclID sets the "hcl_id" field. -func (dru *DNSRecordUpdate) SetHclID(s string) *DNSRecordUpdate { - dru.mutation.SetHclID(s) +// SetHCLID sets the "hcl_id" field. +func (dru *DNSRecordUpdate) SetHCLID(s string) *DNSRecordUpdate { + dru.mutation.SetHCLID(s) + return dru +} + +// SetNillableHCLID sets the "hcl_id" field if the given value is not nil. +func (dru *DNSRecordUpdate) SetNillableHCLID(s *string) *DNSRecordUpdate { + if s != nil { + dru.SetHCLID(*s) + } return dru } @@ -41,24 +50,54 @@ func (dru *DNSRecordUpdate) SetName(s string) *DNSRecordUpdate { return dru } +// SetNillableName sets the "name" field if the given value is not nil. +func (dru *DNSRecordUpdate) SetNillableName(s *string) *DNSRecordUpdate { + if s != nil { + dru.SetName(*s) + } + return dru +} + // SetValues sets the "values" field. func (dru *DNSRecordUpdate) SetValues(s []string) *DNSRecordUpdate { dru.mutation.SetValues(s) return dru } +// AppendValues appends s to the "values" field. +func (dru *DNSRecordUpdate) AppendValues(s []string) *DNSRecordUpdate { + dru.mutation.AppendValues(s) + return dru +} + // SetType sets the "type" field. func (dru *DNSRecordUpdate) SetType(s string) *DNSRecordUpdate { dru.mutation.SetType(s) return dru } +// SetNillableType sets the "type" field if the given value is not nil. +func (dru *DNSRecordUpdate) SetNillableType(s *string) *DNSRecordUpdate { + if s != nil { + dru.SetType(*s) + } + return dru +} + // SetZone sets the "zone" field. func (dru *DNSRecordUpdate) SetZone(s string) *DNSRecordUpdate { dru.mutation.SetZone(s) return dru } +// SetNillableZone sets the "zone" field if the given value is not nil. +func (dru *DNSRecordUpdate) SetNillableZone(s *string) *DNSRecordUpdate { + if s != nil { + dru.SetZone(*s) + } + return dru +} + // SetVars sets the "vars" field. func (dru *DNSRecordUpdate) SetVars(m map[string]string) *DNSRecordUpdate { dru.mutation.SetVars(m) @@ -71,6 +110,14 @@ func (dru *DNSRecordUpdate) SetDisabled(b bool) *DNSRecordUpdate { return dru } +// SetNillableDisabled sets the "disabled" field if the given value is not nil. +func (dru *DNSRecordUpdate) SetNillableDisabled(b *bool) *DNSRecordUpdate { + if b != nil { + dru.SetDisabled(*b) + } + return dru +} + // SetTags sets the "tags" field. func (dru *DNSRecordUpdate) SetTags(m map[string]string) *DNSRecordUpdate { dru.mutation.SetTags(m) @@ -109,34 +156,7 @@ func (dru *DNSRecordUpdate) ClearDNSRecordToEnvironment() *DNSRecordUpdate { // Save executes the query and returns the number of nodes affected by the update operation. func (dru *DNSRecordUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(dru.hooks) == 0 { - affected, err = dru.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*DNSRecordMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - dru.mutation = mutation - affected, err = dru.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(dru.hooks) - 1; i >= 0; i-- { - if dru.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = dru.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, dru.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, dru.sqlSave, dru.mutation, dru.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -162,16 +182,7 @@ func (dru *DNSRecordUpdate) ExecX(ctx context.Context) { } func (dru *DNSRecordUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: dnsrecord.Table, - Columns: dnsrecord.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: dnsrecord.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(dnsrecord.Table, dnsrecord.Columns, sqlgraph.NewFieldSpec(dnsrecord.FieldID, field.TypeUUID)) if ps := dru.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -179,61 +190,34 @@ func (dru *DNSRecordUpdate) sqlSave(ctx context.Context) (n int, err error) { } } } - if value, ok := dru.mutation.HclID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: dnsrecord.FieldHclID, - }) + if value, ok := dru.mutation.HCLID(); ok { + _spec.SetField(dnsrecord.FieldHCLID, field.TypeString, value) } if value, ok := dru.mutation.Name(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: dnsrecord.FieldName, - }) + _spec.SetField(dnsrecord.FieldName, field.TypeString, value) } if value, ok := dru.mutation.Values(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: dnsrecord.FieldValues, + _spec.SetField(dnsrecord.FieldValues, field.TypeJSON, value) + } + if value, ok := dru.mutation.AppendedValues(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, dnsrecord.FieldValues, value) }) } if value, ok := dru.mutation.GetType(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: dnsrecord.FieldType, - }) + _spec.SetField(dnsrecord.FieldType, field.TypeString, value) } if value, ok := dru.mutation.Zone(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: dnsrecord.FieldZone, - }) + _spec.SetField(dnsrecord.FieldZone, field.TypeString, value) } if value, ok := dru.mutation.Vars(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: dnsrecord.FieldVars, - }) + _spec.SetField(dnsrecord.FieldVars, field.TypeJSON, value) } if value, ok := dru.mutation.Disabled(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: dnsrecord.FieldDisabled, - }) + _spec.SetField(dnsrecord.FieldDisabled, field.TypeBool, value) } if value, ok := dru.mutation.Tags(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: dnsrecord.FieldTags, - }) + _spec.SetField(dnsrecord.FieldTags, field.TypeJSON, value) } if dru.mutation.DNSRecordToEnvironmentCleared() { edge := &sqlgraph.EdgeSpec{ @@ -243,10 +227,7 @@ func (dru *DNSRecordUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{dnsrecord.DNSRecordToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -259,10 +240,7 @@ func (dru *DNSRecordUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{dnsrecord.DNSRecordToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -274,10 +252,11 @@ func (dru *DNSRecordUpdate) sqlSave(ctx context.Context) (n int, err error) { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{dnsrecord.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return 0, err } + dru.mutation.done = true return n, nil } @@ -289,9 +268,17 @@ type DNSRecordUpdateOne struct { mutation *DNSRecordMutation } -// SetHclID sets the "hcl_id" field. -func (druo *DNSRecordUpdateOne) SetHclID(s string) *DNSRecordUpdateOne { - druo.mutation.SetHclID(s) +// SetHCLID sets the "hcl_id" field. +func (druo *DNSRecordUpdateOne) SetHCLID(s string) *DNSRecordUpdateOne { + druo.mutation.SetHCLID(s) + return druo +} + +// SetNillableHCLID sets the "hcl_id" field if the given value is not nil. +func (druo *DNSRecordUpdateOne) SetNillableHCLID(s *string) *DNSRecordUpdateOne { + if s != nil { + druo.SetHCLID(*s) + } return druo } @@ -301,24 +288,54 @@ func (druo *DNSRecordUpdateOne) SetName(s string) *DNSRecordUpdateOne { return druo } +// SetNillableName sets the "name" field if the given value is not nil. +func (druo *DNSRecordUpdateOne) SetNillableName(s *string) *DNSRecordUpdateOne { + if s != nil { + druo.SetName(*s) + } + return druo +} + // SetValues sets the "values" field. func (druo *DNSRecordUpdateOne) SetValues(s []string) *DNSRecordUpdateOne { druo.mutation.SetValues(s) return druo } +// AppendValues appends s to the "values" field. +func (druo *DNSRecordUpdateOne) AppendValues(s []string) *DNSRecordUpdateOne { + druo.mutation.AppendValues(s) + return druo +} + // SetType sets the "type" field. func (druo *DNSRecordUpdateOne) SetType(s string) *DNSRecordUpdateOne { druo.mutation.SetType(s) return druo } +// SetNillableType sets the "type" field if the given value is not nil. +func (druo *DNSRecordUpdateOne) SetNillableType(s *string) *DNSRecordUpdateOne { + if s != nil { + druo.SetType(*s) + } + return druo +} + // SetZone sets the "zone" field. func (druo *DNSRecordUpdateOne) SetZone(s string) *DNSRecordUpdateOne { druo.mutation.SetZone(s) return druo } +// SetNillableZone sets the "zone" field if the given value is not nil. +func (druo *DNSRecordUpdateOne) SetNillableZone(s *string) *DNSRecordUpdateOne { + if s != nil { + druo.SetZone(*s) + } + return druo +} + // SetVars sets the "vars" field. func (druo *DNSRecordUpdateOne) SetVars(m map[string]string) *DNSRecordUpdateOne { druo.mutation.SetVars(m) @@ -331,6 +348,14 @@ func (druo *DNSRecordUpdateOne) SetDisabled(b bool) *DNSRecordUpdateOne { return druo } +// SetNillableDisabled sets the "disabled" field if the given value is not nil. +func (druo *DNSRecordUpdateOne) SetNillableDisabled(b *bool) *DNSRecordUpdateOne { + if b != nil { + druo.SetDisabled(*b) + } + return druo +} + // SetTags sets the "tags" field. func (druo *DNSRecordUpdateOne) SetTags(m map[string]string) *DNSRecordUpdateOne { druo.mutation.SetTags(m) @@ -367,6 +392,12 @@ func (druo *DNSRecordUpdateOne) ClearDNSRecordToEnvironment() *DNSRecordUpdateOn return druo } +// Where appends a list predicates to the DNSRecordUpdate builder. +func (druo *DNSRecordUpdateOne) Where(ps ...predicate.DNSRecord) *DNSRecordUpdateOne { + druo.mutation.Where(ps...) + return druo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (druo *DNSRecordUpdateOne) Select(field string, fields ...string) *DNSRecordUpdateOne { @@ -376,34 +407,7 @@ func (druo *DNSRecordUpdateOne) Select(field string, fields ...string) *DNSRecor // Save executes the query and returns the updated DNSRecord entity. func (druo *DNSRecordUpdateOne) Save(ctx context.Context) (*DNSRecord, error) { - var ( - err error - node *DNSRecord - ) - if len(druo.hooks) == 0 { - node, err = druo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*DNSRecordMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - druo.mutation = mutation - node, err = druo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(druo.hooks) - 1; i >= 0; i-- { - if druo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = druo.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, druo.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, druo.sqlSave, druo.mutation, druo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -429,16 +433,7 @@ func (druo *DNSRecordUpdateOne) ExecX(ctx context.Context) { } func (druo *DNSRecordUpdateOne) sqlSave(ctx context.Context) (_node *DNSRecord, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: dnsrecord.Table, - Columns: dnsrecord.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: dnsrecord.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(dnsrecord.Table, dnsrecord.Columns, sqlgraph.NewFieldSpec(dnsrecord.FieldID, field.TypeUUID)) id, ok := druo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "DNSRecord.id" for update`)} @@ -463,61 +458,34 @@ func (druo *DNSRecordUpdateOne) sqlSave(ctx context.Context) (_node *DNSRecord, } } } - if value, ok := druo.mutation.HclID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: dnsrecord.FieldHclID, - }) + if value, ok := druo.mutation.HCLID(); ok { + _spec.SetField(dnsrecord.FieldHCLID, field.TypeString, value) } if value, ok := druo.mutation.Name(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: dnsrecord.FieldName, - }) + _spec.SetField(dnsrecord.FieldName, field.TypeString, value) } if value, ok := druo.mutation.Values(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: dnsrecord.FieldValues, + _spec.SetField(dnsrecord.FieldValues, field.TypeJSON, value) + } + if value, ok := druo.mutation.AppendedValues(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, dnsrecord.FieldValues, value) }) } if value, ok := druo.mutation.GetType(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: dnsrecord.FieldType, - }) + _spec.SetField(dnsrecord.FieldType, field.TypeString, value) } if value, ok := druo.mutation.Zone(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: dnsrecord.FieldZone, - }) + _spec.SetField(dnsrecord.FieldZone, field.TypeString, value) } if value, ok := druo.mutation.Vars(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: dnsrecord.FieldVars, - }) + _spec.SetField(dnsrecord.FieldVars, field.TypeJSON, value) } if value, ok := druo.mutation.Disabled(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: dnsrecord.FieldDisabled, - }) + _spec.SetField(dnsrecord.FieldDisabled, field.TypeBool, value) } if value, ok := druo.mutation.Tags(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: dnsrecord.FieldTags, - }) + _spec.SetField(dnsrecord.FieldTags, field.TypeJSON, value) } if druo.mutation.DNSRecordToEnvironmentCleared() { edge := &sqlgraph.EdgeSpec{ @@ -527,10 +495,7 @@ func (druo *DNSRecordUpdateOne) sqlSave(ctx context.Context) (_node *DNSRecord, Columns: []string{dnsrecord.DNSRecordToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -543,10 +508,7 @@ func (druo *DNSRecordUpdateOne) sqlSave(ctx context.Context) (_node *DNSRecord, Columns: []string{dnsrecord.DNSRecordToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -561,9 +523,10 @@ func (druo *DNSRecordUpdateOne) sqlSave(ctx context.Context) (_node *DNSRecord, if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{dnsrecord.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } + druo.mutation.done = true return _node, nil } diff --git a/ent/ent.go b/ent/ent.go index c803a37f..4c8029ba 100755 --- a/ent/ent.go +++ b/ent/ent.go @@ -1,13 +1,17 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( + "context" "errors" "fmt" + "reflect" + "sync" "entgo.io/ent" "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/gen0cide/laforge/ent/adhocplan" "github.com/gen0cide/laforge/ent/agentstatus" "github.com/gen0cide/laforge/ent/agenttask" @@ -49,80 +53,109 @@ import ( // ent aliases to avoid import conflicts in user's code. type ( - Op = ent.Op - Hook = ent.Hook - Value = ent.Value - Query = ent.Query - Policy = ent.Policy - Mutator = ent.Mutator - Mutation = ent.Mutation - MutateFunc = ent.MutateFunc + Op = ent.Op + Hook = ent.Hook + Value = ent.Value + Query = ent.Query + QueryContext = ent.QueryContext + Querier = ent.Querier + QuerierFunc = ent.QuerierFunc + Interceptor = ent.Interceptor + InterceptFunc = ent.InterceptFunc + Traverser = ent.Traverser + TraverseFunc = ent.TraverseFunc + Policy = ent.Policy + Mutator = ent.Mutator + Mutation = ent.Mutation + MutateFunc = ent.MutateFunc ) +type clientCtxKey struct{} + +// FromContext returns a Client stored inside a context, or nil if there isn't one. +func FromContext(ctx context.Context) *Client { + c, _ := ctx.Value(clientCtxKey{}).(*Client) + return c +} + +// NewContext returns a new context with the given Client attached. +func NewContext(parent context.Context, c *Client) context.Context { + return context.WithValue(parent, clientCtxKey{}, c) +} + +type txCtxKey struct{} + +// TxFromContext returns a Tx stored inside a context, or nil if there isn't one. +func TxFromContext(ctx context.Context) *Tx { + tx, _ := ctx.Value(txCtxKey{}).(*Tx) + return tx +} + +// NewTxContext returns a new context with the given Tx attached. +func NewTxContext(parent context.Context, tx *Tx) context.Context { + return context.WithValue(parent, txCtxKey{}, tx) +} + // OrderFunc applies an ordering on the sql selector. +// Deprecated: Use Asc/Desc functions or the package builders instead. type OrderFunc func(*sql.Selector) -// columnChecker returns a function indicates if the column exists in the given column. -func columnChecker(table string) func(string) error { - checks := map[string]func(string) bool{ - adhocplan.Table: adhocplan.ValidColumn, - agentstatus.Table: agentstatus.ValidColumn, - agenttask.Table: agenttask.ValidColumn, - ansible.Table: ansible.ValidColumn, - authuser.Table: authuser.ValidColumn, - build.Table: build.ValidColumn, - buildcommit.Table: buildcommit.ValidColumn, - command.Table: command.ValidColumn, - competition.Table: competition.ValidColumn, - dns.Table: dns.ValidColumn, - dnsrecord.Table: dnsrecord.ValidColumn, - disk.Table: disk.ValidColumn, - environment.Table: environment.ValidColumn, - filedelete.Table: filedelete.ValidColumn, - filedownload.Table: filedownload.ValidColumn, - fileextract.Table: fileextract.ValidColumn, - finding.Table: finding.ValidColumn, - ginfilemiddleware.Table: ginfilemiddleware.ValidColumn, - host.Table: host.ValidColumn, - hostdependency.Table: hostdependency.ValidColumn, - identity.Table: identity.ValidColumn, - includednetwork.Table: includednetwork.ValidColumn, - network.Table: network.ValidColumn, - plan.Table: plan.ValidColumn, - plandiff.Table: plandiff.ValidColumn, - provisionedhost.Table: provisionedhost.ValidColumn, - provisionednetwork.Table: provisionednetwork.ValidColumn, - provisioningstep.Table: provisioningstep.ValidColumn, - repocommit.Table: repocommit.ValidColumn, - repository.Table: repository.ValidColumn, - script.Table: script.ValidColumn, - servertask.Table: servertask.ValidColumn, - status.Table: status.ValidColumn, - tag.Table: tag.ValidColumn, - team.Table: team.ValidColumn, - token.Table: token.ValidColumn, - user.Table: user.ValidColumn, - } - check, ok := checks[table] - if !ok { - return func(string) error { - return fmt.Errorf("unknown table %q", table) - } - } - return func(column string) error { - if !check(column) { - return fmt.Errorf("unknown column %q for table %q", column, table) - } - return nil - } +var ( + initCheck sync.Once + columnCheck sql.ColumnCheck +) + +// columnChecker checks if the column exists in the given table. +func checkColumn(table, column string) error { + initCheck.Do(func() { + columnCheck = sql.NewColumnCheck(map[string]func(string) bool{ + adhocplan.Table: adhocplan.ValidColumn, + agentstatus.Table: agentstatus.ValidColumn, + agenttask.Table: agenttask.ValidColumn, + ansible.Table: ansible.ValidColumn, + authuser.Table: authuser.ValidColumn, + build.Table: build.ValidColumn, + buildcommit.Table: buildcommit.ValidColumn, + command.Table: command.ValidColumn, + competition.Table: competition.ValidColumn, + dns.Table: dns.ValidColumn, + dnsrecord.Table: dnsrecord.ValidColumn, + disk.Table: disk.ValidColumn, + environment.Table: environment.ValidColumn, + filedelete.Table: filedelete.ValidColumn, + filedownload.Table: filedownload.ValidColumn, + fileextract.Table: fileextract.ValidColumn, + finding.Table: finding.ValidColumn, + ginfilemiddleware.Table: ginfilemiddleware.ValidColumn, + host.Table: host.ValidColumn, + hostdependency.Table: hostdependency.ValidColumn, + identity.Table: identity.ValidColumn, + includednetwork.Table: includednetwork.ValidColumn, + network.Table: network.ValidColumn, + plan.Table: plan.ValidColumn, + plandiff.Table: plandiff.ValidColumn, + provisionedhost.Table: provisionedhost.ValidColumn, + provisionednetwork.Table: provisionednetwork.ValidColumn, + provisioningstep.Table: provisioningstep.ValidColumn, + repocommit.Table: repocommit.ValidColumn, + repository.Table: repository.ValidColumn, + script.Table: script.ValidColumn, + servertask.Table: servertask.ValidColumn, + status.Table: status.ValidColumn, + tag.Table: tag.ValidColumn, + team.Table: team.ValidColumn, + token.Table: token.ValidColumn, + user.Table: user.ValidColumn, + }) + }) + return columnCheck(table, column) } // Asc applies the given fields in ASC order. -func Asc(fields ...string) OrderFunc { +func Asc(fields ...string) func(*sql.Selector) { return func(s *sql.Selector) { - check := columnChecker(s.TableName()) for _, f := range fields { - if err := check(f); err != nil { + if err := checkColumn(s.TableName(), f); err != nil { s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)}) } s.OrderBy(sql.Asc(s.C(f))) @@ -131,11 +164,10 @@ func Asc(fields ...string) OrderFunc { } // Desc applies the given fields in DESC order. -func Desc(fields ...string) OrderFunc { +func Desc(fields ...string) func(*sql.Selector) { return func(s *sql.Selector) { - check := columnChecker(s.TableName()) for _, f := range fields { - if err := check(f); err != nil { + if err := checkColumn(s.TableName(), f); err != nil { s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)}) } s.OrderBy(sql.Desc(s.C(f))) @@ -151,7 +183,6 @@ type AggregateFunc func(*sql.Selector) string // GroupBy(field1, field2). // Aggregate(ent.As(ent.Sum(field1), "sum_field1"), (ent.As(ent.Sum(field2), "sum_field2")). // Scan(ctx, &v) -// func As(fn AggregateFunc, end string) AggregateFunc { return func(s *sql.Selector) string { return sql.As(fn(s), end) @@ -168,8 +199,7 @@ func Count() AggregateFunc { // Max applies the "max" aggregation function on the given field of each group. func Max(field string) AggregateFunc { return func(s *sql.Selector) string { - check := columnChecker(s.TableName()) - if err := check(field); err != nil { + if err := checkColumn(s.TableName(), field); err != nil { s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) return "" } @@ -180,8 +210,7 @@ func Max(field string) AggregateFunc { // Mean applies the "mean" aggregation function on the given field of each group. func Mean(field string) AggregateFunc { return func(s *sql.Selector) string { - check := columnChecker(s.TableName()) - if err := check(field); err != nil { + if err := checkColumn(s.TableName(), field); err != nil { s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) return "" } @@ -192,8 +221,7 @@ func Mean(field string) AggregateFunc { // Min applies the "min" aggregation function on the given field of each group. func Min(field string) AggregateFunc { return func(s *sql.Selector) string { - check := columnChecker(s.TableName()) - if err := check(field); err != nil { + if err := checkColumn(s.TableName(), field); err != nil { s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) return "" } @@ -204,8 +232,7 @@ func Min(field string) AggregateFunc { // Sum applies the "sum" aggregation function on the given field of each group. func Sum(field string) AggregateFunc { return func(s *sql.Selector) string { - check := columnChecker(s.TableName()) - if err := check(field); err != nil { + if err := checkColumn(s.TableName(), field); err != nil { s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) return "" } @@ -329,3 +356,325 @@ func IsConstraintError(err error) bool { var e *ConstraintError return errors.As(err, &e) } + +// selector embedded by the different Select/GroupBy builders. +type selector struct { + label string + flds *[]string + fns []AggregateFunc + scan func(context.Context, any) error +} + +// ScanX is like Scan, but panics if an error occurs. +func (s *selector) ScanX(ctx context.Context, v any) { + if err := s.scan(ctx, v); err != nil { + panic(err) + } +} + +// Strings returns list of strings from a selector. It is only allowed when selecting one field. +func (s *selector) Strings(ctx context.Context) ([]string, error) { + if len(*s.flds) > 1 { + return nil, errors.New("ent: Strings is not achievable when selecting more than 1 field") + } + var v []string + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// StringsX is like Strings, but panics if an error occurs. +func (s *selector) StringsX(ctx context.Context) []string { + v, err := s.Strings(ctx) + if err != nil { + panic(err) + } + return v +} + +// String returns a single string from a selector. It is only allowed when selecting one field. +func (s *selector) String(ctx context.Context) (_ string, err error) { + var v []string + if v, err = s.Strings(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("ent: Strings returned %d results when one was expected", len(v)) + } + return +} + +// StringX is like String, but panics if an error occurs. +func (s *selector) StringX(ctx context.Context) string { + v, err := s.String(ctx) + if err != nil { + panic(err) + } + return v +} + +// Ints returns list of ints from a selector. It is only allowed when selecting one field. +func (s *selector) Ints(ctx context.Context) ([]int, error) { + if len(*s.flds) > 1 { + return nil, errors.New("ent: Ints is not achievable when selecting more than 1 field") + } + var v []int + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// IntsX is like Ints, but panics if an error occurs. +func (s *selector) IntsX(ctx context.Context) []int { + v, err := s.Ints(ctx) + if err != nil { + panic(err) + } + return v +} + +// Int returns a single int from a selector. It is only allowed when selecting one field. +func (s *selector) Int(ctx context.Context) (_ int, err error) { + var v []int + if v, err = s.Ints(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("ent: Ints returned %d results when one was expected", len(v)) + } + return +} + +// IntX is like Int, but panics if an error occurs. +func (s *selector) IntX(ctx context.Context) int { + v, err := s.Int(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. +func (s *selector) Float64s(ctx context.Context) ([]float64, error) { + if len(*s.flds) > 1 { + return nil, errors.New("ent: Float64s is not achievable when selecting more than 1 field") + } + var v []float64 + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// Float64sX is like Float64s, but panics if an error occurs. +func (s *selector) Float64sX(ctx context.Context) []float64 { + v, err := s.Float64s(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. +func (s *selector) Float64(ctx context.Context) (_ float64, err error) { + var v []float64 + if v, err = s.Float64s(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("ent: Float64s returned %d results when one was expected", len(v)) + } + return +} + +// Float64X is like Float64, but panics if an error occurs. +func (s *selector) Float64X(ctx context.Context) float64 { + v, err := s.Float64(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bools returns list of bools from a selector. It is only allowed when selecting one field. +func (s *selector) Bools(ctx context.Context) ([]bool, error) { + if len(*s.flds) > 1 { + return nil, errors.New("ent: Bools is not achievable when selecting more than 1 field") + } + var v []bool + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// BoolsX is like Bools, but panics if an error occurs. +func (s *selector) BoolsX(ctx context.Context) []bool { + v, err := s.Bools(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bool returns a single bool from a selector. It is only allowed when selecting one field. +func (s *selector) Bool(ctx context.Context) (_ bool, err error) { + var v []bool + if v, err = s.Bools(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("ent: Bools returned %d results when one was expected", len(v)) + } + return +} + +// BoolX is like Bool, but panics if an error occurs. +func (s *selector) BoolX(ctx context.Context) bool { + v, err := s.Bool(ctx) + if err != nil { + panic(err) + } + return v +} + +// withHooks invokes the builder operation with the given hooks, if any. +func withHooks[V Value, M any, PM interface { + *M + Mutation +}](ctx context.Context, exec func(context.Context) (V, error), mutation PM, hooks []Hook) (value V, err error) { + if len(hooks) == 0 { + return exec(ctx) + } + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutationT, ok := any(m).(PM) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + // Set the mutation to the builder. + *mutation = *mutationT + return exec(ctx) + }) + for i := len(hooks) - 1; i >= 0; i-- { + if hooks[i] == nil { + return value, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = hooks[i](mut) + } + v, err := mut.Mutate(ctx, mutation) + if err != nil { + return value, err + } + nv, ok := v.(V) + if !ok { + return value, fmt.Errorf("unexpected node type %T returned from %T", v, mutation) + } + return nv, nil +} + +// setContextOp returns a new context with the given QueryContext attached (including its op) in case it does not exist. +func setContextOp(ctx context.Context, qc *QueryContext, op string) context.Context { + if ent.QueryFromContext(ctx) == nil { + qc.Op = op + ctx = ent.NewQueryContext(ctx, qc) + } + return ctx +} + +func querierAll[V Value, Q interface { + sqlAll(context.Context, ...queryHook) (V, error) +}]() Querier { + return QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + query, ok := q.(Q) + if !ok { + return nil, fmt.Errorf("unexpected query type %T", q) + } + return query.sqlAll(ctx) + }) +} + +func querierCount[Q interface { + sqlCount(context.Context) (int, error) +}]() Querier { + return QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + query, ok := q.(Q) + if !ok { + return nil, fmt.Errorf("unexpected query type %T", q) + } + return query.sqlCount(ctx) + }) +} + +func withInterceptors[V Value](ctx context.Context, q Query, qr Querier, inters []Interceptor) (v V, err error) { + for i := len(inters) - 1; i >= 0; i-- { + qr = inters[i].Intercept(qr) + } + rv, err := qr.Query(ctx, q) + if err != nil { + return v, err + } + vt, ok := rv.(V) + if !ok { + return v, fmt.Errorf("unexpected type %T returned from %T. expected type: %T", vt, q, v) + } + return vt, nil +} + +func scanWithInterceptors[Q1 ent.Query, Q2 interface { + sqlScan(context.Context, Q1, any) error +}](ctx context.Context, rootQuery Q1, selectOrGroup Q2, inters []Interceptor, v any) error { + rv := reflect.ValueOf(v) + var qr Querier = QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + query, ok := q.(Q1) + if !ok { + return nil, fmt.Errorf("unexpected query type %T", q) + } + if err := selectOrGroup.sqlScan(ctx, query, v); err != nil { + return nil, err + } + if k := rv.Kind(); k == reflect.Pointer && rv.Elem().CanInterface() { + return rv.Elem().Interface(), nil + } + return v, nil + }) + for i := len(inters) - 1; i >= 0; i-- { + qr = inters[i].Intercept(qr) + } + vv, err := qr.Query(ctx, rootQuery) + if err != nil { + return err + } + switch rv2 := reflect.ValueOf(vv); { + case rv.IsNil(), rv2.IsNil(), rv.Kind() != reflect.Pointer: + case rv.Type() == rv2.Type(): + rv.Elem().Set(rv2.Elem()) + case rv.Elem().Type() == rv2.Type(): + rv.Elem().Set(rv2) + } + return nil +} + +// queryHook describes an internal hook for the different sqlAll methods. +type queryHook func(context.Context, *sqlgraph.QuerySpec) diff --git a/ent/enttest/enttest.go b/ent/enttest/enttest.go index a63d4038..f6604fb1 100755 --- a/ent/enttest/enttest.go +++ b/ent/enttest/enttest.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package enttest @@ -10,6 +10,7 @@ import ( _ "github.com/gen0cide/laforge/ent/runtime" "entgo.io/ent/dialect/sql/schema" + "github.com/gen0cide/laforge/ent/migrate" ) type ( @@ -17,7 +18,7 @@ type ( // testing.T and testing.B and used by enttest. TestingT interface { FailNow() - Error(...interface{}) + Error(...any) } // Option configures client creation. @@ -59,10 +60,7 @@ func Open(t TestingT, driverName, dataSourceName string, opts ...Option) *ent.Cl t.Error(err) t.FailNow() } - if err := c.Schema.Create(context.Background(), o.migrateOpts...); err != nil { - t.Error(err) - t.FailNow() - } + migrateSchema(t, c, o) return c } @@ -70,9 +68,17 @@ func Open(t TestingT, driverName, dataSourceName string, opts ...Option) *ent.Cl func NewClient(t TestingT, opts ...Option) *ent.Client { o := newOptions(opts) c := ent.NewClient(o.opts...) - if err := c.Schema.Create(context.Background(), o.migrateOpts...); err != nil { + migrateSchema(t, c, o) + return c +} +func migrateSchema(t TestingT, c *ent.Client, o *options) { + tables, err := schema.CopyTables(migrate.Tables) + if err != nil { + t.Error(err) + t.FailNow() + } + if err := migrate.Create(context.Background(), c.Schema, tables, o.migrateOpts...); err != nil { t.Error(err) t.FailNow() } - return c } diff --git a/ent/entviz.go b/ent/entviz.go index be82b3f2..25b58d50 100755 --- a/ent/entviz.go +++ b/ent/entviz.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent diff --git a/ent/environment.go b/ent/environment.go index 08b47ed9..c050038a 100755 --- a/ent/environment.go +++ b/ent/environment.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -7,6 +7,7 @@ import ( "fmt" "strings" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/gen0cide/laforge/ent/environment" "github.com/google/uuid" @@ -17,8 +18,8 @@ type Environment struct { config ` json:"-"` // ID of the ent. ID uuid.UUID `json:"id,omitempty"` - // HclID holds the value of the "hcl_id" field. - HclID string `json:"hcl_id,omitempty" hcl:"id,label"` + // HCLID holds the value of the "hcl_id" field. + HCLID string `json:"hcl_id,omitempty" hcl:"id,label"` // CompetitionID holds the value of the "competition_id" field. CompetitionID string `json:"competition_id,omitempty" hcl:"competition_id,attr"` // Name holds the value of the "name" field. @@ -43,6 +44,7 @@ type Environment struct { // The values are being populated by the EnvironmentQuery when eager-loading is set. Edges EnvironmentEdges `json:"edges"` + // vvvvvvvvvvvv CUSTOM vvvvvvvvvvvv // Edges put into the main struct to be loaded via hcl // EnvironmentToUser holds the value of the EnvironmentToUser edge. HCLEnvironmentToUser []*User `json:"EnvironmentToUser,omitempty" hcl:"maintainer,block"` @@ -82,8 +84,8 @@ type Environment struct { HCLEnvironmentToRepository []*Repository `json:"EnvironmentToRepository,omitempty"` // EnvironmentToServerTask holds the value of the EnvironmentToServerTask edge. HCLEnvironmentToServerTask []*ServerTask `json:"EnvironmentToServerTask,omitempty"` - // - + // ^^^^^^^^^^^^ CUSTOM ^^^^^^^^^^^^^ + selectValues sql.SelectValues } // EnvironmentEdges holds the relations/edges for other nodes in the graph. @@ -129,6 +131,28 @@ type EnvironmentEdges struct { // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. loadedTypes [19]bool + // totalCount holds the count of the edges above. + totalCount [19]map[string]int + + namedEnvironmentToUser map[string][]*User + namedEnvironmentToHost map[string][]*Host + namedEnvironmentToCompetition map[string][]*Competition + namedEnvironmentToIdentity map[string][]*Identity + namedEnvironmentToCommand map[string][]*Command + namedEnvironmentToScript map[string][]*Script + namedEnvironmentToFileDownload map[string][]*FileDownload + namedEnvironmentToFileDelete map[string][]*FileDelete + namedEnvironmentToFileExtract map[string][]*FileExtract + namedEnvironmentToIncludedNetwork map[string][]*IncludedNetwork + namedEnvironmentToFinding map[string][]*Finding + namedEnvironmentToDNSRecord map[string][]*DNSRecord + namedEnvironmentToDNS map[string][]*DNS + namedEnvironmentToNetwork map[string][]*Network + namedEnvironmentToHostDependency map[string][]*HostDependency + namedEnvironmentToAnsible map[string][]*Ansible + namedEnvironmentToBuild map[string][]*Build + namedEnvironmentToRepository map[string][]*Repository + namedEnvironmentToServerTask map[string][]*ServerTask } // EnvironmentToUserOrErr returns the EnvironmentToUser value or an error if the edge @@ -303,20 +327,20 @@ func (e EnvironmentEdges) EnvironmentToServerTaskOrErr() ([]*ServerTask, error) } // scanValues returns the types for scanning values from sql.Rows. -func (*Environment) scanValues(columns []string) ([]interface{}, error) { - values := make([]interface{}, len(columns)) +func (*Environment) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) for i := range columns { switch columns[i] { case environment.FieldAdminCidrs, environment.FieldExposedVdiPorts, environment.FieldConfig, environment.FieldTags: values[i] = new([]byte) case environment.FieldTeamCount, environment.FieldRevision: values[i] = new(sql.NullInt64) - case environment.FieldHclID, environment.FieldCompetitionID, environment.FieldName, environment.FieldDescription, environment.FieldBuilder: + case environment.FieldHCLID, environment.FieldCompetitionID, environment.FieldName, environment.FieldDescription, environment.FieldBuilder: values[i] = new(sql.NullString) case environment.FieldID: values[i] = new(uuid.UUID) default: - return nil, fmt.Errorf("unexpected column %q for type Environment", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -324,7 +348,7 @@ func (*Environment) scanValues(columns []string) ([]interface{}, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the Environment fields. -func (e *Environment) assignValues(columns []string, values []interface{}) error { +func (e *Environment) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -336,11 +360,11 @@ func (e *Environment) assignValues(columns []string, values []interface{}) error } else if value != nil { e.ID = *value } - case environment.FieldHclID: + case environment.FieldHCLID: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field hcl_id", values[i]) } else if value.Valid { - e.HclID = value.String + e.HCLID = value.String } case environment.FieldCompetitionID: if value, ok := values[i].(*sql.NullString); !ok { @@ -410,121 +434,129 @@ func (e *Environment) assignValues(columns []string, values []interface{}) error return fmt.Errorf("unmarshal field tags: %w", err) } } + default: + e.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the Environment. +// This includes values selected through modifiers, order, etc. +func (e *Environment) Value(name string) (ent.Value, error) { + return e.selectValues.Get(name) +} + // QueryEnvironmentToUser queries the "EnvironmentToUser" edge of the Environment entity. func (e *Environment) QueryEnvironmentToUser() *UserQuery { - return (&EnvironmentClient{config: e.config}).QueryEnvironmentToUser(e) + return NewEnvironmentClient(e.config).QueryEnvironmentToUser(e) } // QueryEnvironmentToHost queries the "EnvironmentToHost" edge of the Environment entity. func (e *Environment) QueryEnvironmentToHost() *HostQuery { - return (&EnvironmentClient{config: e.config}).QueryEnvironmentToHost(e) + return NewEnvironmentClient(e.config).QueryEnvironmentToHost(e) } // QueryEnvironmentToCompetition queries the "EnvironmentToCompetition" edge of the Environment entity. func (e *Environment) QueryEnvironmentToCompetition() *CompetitionQuery { - return (&EnvironmentClient{config: e.config}).QueryEnvironmentToCompetition(e) + return NewEnvironmentClient(e.config).QueryEnvironmentToCompetition(e) } // QueryEnvironmentToIdentity queries the "EnvironmentToIdentity" edge of the Environment entity. func (e *Environment) QueryEnvironmentToIdentity() *IdentityQuery { - return (&EnvironmentClient{config: e.config}).QueryEnvironmentToIdentity(e) + return NewEnvironmentClient(e.config).QueryEnvironmentToIdentity(e) } // QueryEnvironmentToCommand queries the "EnvironmentToCommand" edge of the Environment entity. func (e *Environment) QueryEnvironmentToCommand() *CommandQuery { - return (&EnvironmentClient{config: e.config}).QueryEnvironmentToCommand(e) + return NewEnvironmentClient(e.config).QueryEnvironmentToCommand(e) } // QueryEnvironmentToScript queries the "EnvironmentToScript" edge of the Environment entity. func (e *Environment) QueryEnvironmentToScript() *ScriptQuery { - return (&EnvironmentClient{config: e.config}).QueryEnvironmentToScript(e) + return NewEnvironmentClient(e.config).QueryEnvironmentToScript(e) } // QueryEnvironmentToFileDownload queries the "EnvironmentToFileDownload" edge of the Environment entity. func (e *Environment) QueryEnvironmentToFileDownload() *FileDownloadQuery { - return (&EnvironmentClient{config: e.config}).QueryEnvironmentToFileDownload(e) + return NewEnvironmentClient(e.config).QueryEnvironmentToFileDownload(e) } // QueryEnvironmentToFileDelete queries the "EnvironmentToFileDelete" edge of the Environment entity. func (e *Environment) QueryEnvironmentToFileDelete() *FileDeleteQuery { - return (&EnvironmentClient{config: e.config}).QueryEnvironmentToFileDelete(e) + return NewEnvironmentClient(e.config).QueryEnvironmentToFileDelete(e) } // QueryEnvironmentToFileExtract queries the "EnvironmentToFileExtract" edge of the Environment entity. func (e *Environment) QueryEnvironmentToFileExtract() *FileExtractQuery { - return (&EnvironmentClient{config: e.config}).QueryEnvironmentToFileExtract(e) + return NewEnvironmentClient(e.config).QueryEnvironmentToFileExtract(e) } // QueryEnvironmentToIncludedNetwork queries the "EnvironmentToIncludedNetwork" edge of the Environment entity. func (e *Environment) QueryEnvironmentToIncludedNetwork() *IncludedNetworkQuery { - return (&EnvironmentClient{config: e.config}).QueryEnvironmentToIncludedNetwork(e) + return NewEnvironmentClient(e.config).QueryEnvironmentToIncludedNetwork(e) } // QueryEnvironmentToFinding queries the "EnvironmentToFinding" edge of the Environment entity. func (e *Environment) QueryEnvironmentToFinding() *FindingQuery { - return (&EnvironmentClient{config: e.config}).QueryEnvironmentToFinding(e) + return NewEnvironmentClient(e.config).QueryEnvironmentToFinding(e) } // QueryEnvironmentToDNSRecord queries the "EnvironmentToDNSRecord" edge of the Environment entity. func (e *Environment) QueryEnvironmentToDNSRecord() *DNSRecordQuery { - return (&EnvironmentClient{config: e.config}).QueryEnvironmentToDNSRecord(e) + return NewEnvironmentClient(e.config).QueryEnvironmentToDNSRecord(e) } // QueryEnvironmentToDNS queries the "EnvironmentToDNS" edge of the Environment entity. func (e *Environment) QueryEnvironmentToDNS() *DNSQuery { - return (&EnvironmentClient{config: e.config}).QueryEnvironmentToDNS(e) + return NewEnvironmentClient(e.config).QueryEnvironmentToDNS(e) } // QueryEnvironmentToNetwork queries the "EnvironmentToNetwork" edge of the Environment entity. func (e *Environment) QueryEnvironmentToNetwork() *NetworkQuery { - return (&EnvironmentClient{config: e.config}).QueryEnvironmentToNetwork(e) + return NewEnvironmentClient(e.config).QueryEnvironmentToNetwork(e) } // QueryEnvironmentToHostDependency queries the "EnvironmentToHostDependency" edge of the Environment entity. func (e *Environment) QueryEnvironmentToHostDependency() *HostDependencyQuery { - return (&EnvironmentClient{config: e.config}).QueryEnvironmentToHostDependency(e) + return NewEnvironmentClient(e.config).QueryEnvironmentToHostDependency(e) } // QueryEnvironmentToAnsible queries the "EnvironmentToAnsible" edge of the Environment entity. func (e *Environment) QueryEnvironmentToAnsible() *AnsibleQuery { - return (&EnvironmentClient{config: e.config}).QueryEnvironmentToAnsible(e) + return NewEnvironmentClient(e.config).QueryEnvironmentToAnsible(e) } // QueryEnvironmentToBuild queries the "EnvironmentToBuild" edge of the Environment entity. func (e *Environment) QueryEnvironmentToBuild() *BuildQuery { - return (&EnvironmentClient{config: e.config}).QueryEnvironmentToBuild(e) + return NewEnvironmentClient(e.config).QueryEnvironmentToBuild(e) } // QueryEnvironmentToRepository queries the "EnvironmentToRepository" edge of the Environment entity. func (e *Environment) QueryEnvironmentToRepository() *RepositoryQuery { - return (&EnvironmentClient{config: e.config}).QueryEnvironmentToRepository(e) + return NewEnvironmentClient(e.config).QueryEnvironmentToRepository(e) } // QueryEnvironmentToServerTask queries the "EnvironmentToServerTask" edge of the Environment entity. func (e *Environment) QueryEnvironmentToServerTask() *ServerTaskQuery { - return (&EnvironmentClient{config: e.config}).QueryEnvironmentToServerTask(e) + return NewEnvironmentClient(e.config).QueryEnvironmentToServerTask(e) } // Update returns a builder for updating this Environment. // Note that you need to call Environment.Unwrap() before calling this method if this Environment // was returned from a transaction, and the transaction was committed or rolled back. func (e *Environment) Update() *EnvironmentUpdateOne { - return (&EnvironmentClient{config: e.config}).UpdateOne(e) + return NewEnvironmentClient(e.config).UpdateOne(e) } // Unwrap unwraps the Environment entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. func (e *Environment) Unwrap() *Environment { - tx, ok := e.config.driver.(*txDriver) + _tx, ok := e.config.driver.(*txDriver) if !ok { panic("ent: Environment is not a transactional entity") } - e.config.driver = tx.drv + e.config.driver = _tx.drv return e } @@ -532,38 +564,498 @@ func (e *Environment) Unwrap() *Environment { func (e *Environment) String() string { var builder strings.Builder builder.WriteString("Environment(") - builder.WriteString(fmt.Sprintf("id=%v", e.ID)) - builder.WriteString(", hcl_id=") - builder.WriteString(e.HclID) - builder.WriteString(", competition_id=") + builder.WriteString(fmt.Sprintf("id=%v, ", e.ID)) + builder.WriteString("hcl_id=") + builder.WriteString(e.HCLID) + builder.WriteString(", ") + builder.WriteString("competition_id=") builder.WriteString(e.CompetitionID) - builder.WriteString(", name=") + builder.WriteString(", ") + builder.WriteString("name=") builder.WriteString(e.Name) - builder.WriteString(", description=") + builder.WriteString(", ") + builder.WriteString("description=") builder.WriteString(e.Description) - builder.WriteString(", builder=") + builder.WriteString(", ") + builder.WriteString("builder=") builder.WriteString(e.Builder) - builder.WriteString(", team_count=") + builder.WriteString(", ") + builder.WriteString("team_count=") builder.WriteString(fmt.Sprintf("%v", e.TeamCount)) - builder.WriteString(", revision=") + builder.WriteString(", ") + builder.WriteString("revision=") builder.WriteString(fmt.Sprintf("%v", e.Revision)) - builder.WriteString(", admin_cidrs=") + builder.WriteString(", ") + builder.WriteString("admin_cidrs=") builder.WriteString(fmt.Sprintf("%v", e.AdminCidrs)) - builder.WriteString(", exposed_vdi_ports=") + builder.WriteString(", ") + builder.WriteString("exposed_vdi_ports=") builder.WriteString(fmt.Sprintf("%v", e.ExposedVdiPorts)) - builder.WriteString(", config=") + builder.WriteString(", ") + builder.WriteString("config=") builder.WriteString(fmt.Sprintf("%v", e.Config)) - builder.WriteString(", tags=") + builder.WriteString(", ") + builder.WriteString("tags=") builder.WriteString(fmt.Sprintf("%v", e.Tags)) builder.WriteByte(')') return builder.String() } -// Environments is a parsable slice of Environment. -type Environments []*Environment +// NamedEnvironmentToUser returns the EnvironmentToUser named value or an error if the edge was not +// loaded in eager-loading with this name. +func (e *Environment) NamedEnvironmentToUser(name string) ([]*User, error) { + if e.Edges.namedEnvironmentToUser == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := e.Edges.namedEnvironmentToUser[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} -func (e Environments) config(cfg config) { - for _i := range e { - e[_i].config = cfg +func (e *Environment) appendNamedEnvironmentToUser(name string, edges ...*User) { + if e.Edges.namedEnvironmentToUser == nil { + e.Edges.namedEnvironmentToUser = make(map[string][]*User) + } + if len(edges) == 0 { + e.Edges.namedEnvironmentToUser[name] = []*User{} + } else { + e.Edges.namedEnvironmentToUser[name] = append(e.Edges.namedEnvironmentToUser[name], edges...) + } +} + +// NamedEnvironmentToHost returns the EnvironmentToHost named value or an error if the edge was not +// loaded in eager-loading with this name. +func (e *Environment) NamedEnvironmentToHost(name string) ([]*Host, error) { + if e.Edges.namedEnvironmentToHost == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := e.Edges.namedEnvironmentToHost[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} + +func (e *Environment) appendNamedEnvironmentToHost(name string, edges ...*Host) { + if e.Edges.namedEnvironmentToHost == nil { + e.Edges.namedEnvironmentToHost = make(map[string][]*Host) + } + if len(edges) == 0 { + e.Edges.namedEnvironmentToHost[name] = []*Host{} + } else { + e.Edges.namedEnvironmentToHost[name] = append(e.Edges.namedEnvironmentToHost[name], edges...) + } +} + +// NamedEnvironmentToCompetition returns the EnvironmentToCompetition named value or an error if the edge was not +// loaded in eager-loading with this name. +func (e *Environment) NamedEnvironmentToCompetition(name string) ([]*Competition, error) { + if e.Edges.namedEnvironmentToCompetition == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := e.Edges.namedEnvironmentToCompetition[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} + +func (e *Environment) appendNamedEnvironmentToCompetition(name string, edges ...*Competition) { + if e.Edges.namedEnvironmentToCompetition == nil { + e.Edges.namedEnvironmentToCompetition = make(map[string][]*Competition) + } + if len(edges) == 0 { + e.Edges.namedEnvironmentToCompetition[name] = []*Competition{} + } else { + e.Edges.namedEnvironmentToCompetition[name] = append(e.Edges.namedEnvironmentToCompetition[name], edges...) + } +} + +// NamedEnvironmentToIdentity returns the EnvironmentToIdentity named value or an error if the edge was not +// loaded in eager-loading with this name. +func (e *Environment) NamedEnvironmentToIdentity(name string) ([]*Identity, error) { + if e.Edges.namedEnvironmentToIdentity == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := e.Edges.namedEnvironmentToIdentity[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} + +func (e *Environment) appendNamedEnvironmentToIdentity(name string, edges ...*Identity) { + if e.Edges.namedEnvironmentToIdentity == nil { + e.Edges.namedEnvironmentToIdentity = make(map[string][]*Identity) + } + if len(edges) == 0 { + e.Edges.namedEnvironmentToIdentity[name] = []*Identity{} + } else { + e.Edges.namedEnvironmentToIdentity[name] = append(e.Edges.namedEnvironmentToIdentity[name], edges...) + } +} + +// NamedEnvironmentToCommand returns the EnvironmentToCommand named value or an error if the edge was not +// loaded in eager-loading with this name. +func (e *Environment) NamedEnvironmentToCommand(name string) ([]*Command, error) { + if e.Edges.namedEnvironmentToCommand == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := e.Edges.namedEnvironmentToCommand[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} + +func (e *Environment) appendNamedEnvironmentToCommand(name string, edges ...*Command) { + if e.Edges.namedEnvironmentToCommand == nil { + e.Edges.namedEnvironmentToCommand = make(map[string][]*Command) + } + if len(edges) == 0 { + e.Edges.namedEnvironmentToCommand[name] = []*Command{} + } else { + e.Edges.namedEnvironmentToCommand[name] = append(e.Edges.namedEnvironmentToCommand[name], edges...) + } +} + +// NamedEnvironmentToScript returns the EnvironmentToScript named value or an error if the edge was not +// loaded in eager-loading with this name. +func (e *Environment) NamedEnvironmentToScript(name string) ([]*Script, error) { + if e.Edges.namedEnvironmentToScript == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := e.Edges.namedEnvironmentToScript[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} + +func (e *Environment) appendNamedEnvironmentToScript(name string, edges ...*Script) { + if e.Edges.namedEnvironmentToScript == nil { + e.Edges.namedEnvironmentToScript = make(map[string][]*Script) + } + if len(edges) == 0 { + e.Edges.namedEnvironmentToScript[name] = []*Script{} + } else { + e.Edges.namedEnvironmentToScript[name] = append(e.Edges.namedEnvironmentToScript[name], edges...) + } +} + +// NamedEnvironmentToFileDownload returns the EnvironmentToFileDownload named value or an error if the edge was not +// loaded in eager-loading with this name. +func (e *Environment) NamedEnvironmentToFileDownload(name string) ([]*FileDownload, error) { + if e.Edges.namedEnvironmentToFileDownload == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := e.Edges.namedEnvironmentToFileDownload[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} + +func (e *Environment) appendNamedEnvironmentToFileDownload(name string, edges ...*FileDownload) { + if e.Edges.namedEnvironmentToFileDownload == nil { + e.Edges.namedEnvironmentToFileDownload = make(map[string][]*FileDownload) + } + if len(edges) == 0 { + e.Edges.namedEnvironmentToFileDownload[name] = []*FileDownload{} + } else { + e.Edges.namedEnvironmentToFileDownload[name] = append(e.Edges.namedEnvironmentToFileDownload[name], edges...) + } +} + +// NamedEnvironmentToFileDelete returns the EnvironmentToFileDelete named value or an error if the edge was not +// loaded in eager-loading with this name. +func (e *Environment) NamedEnvironmentToFileDelete(name string) ([]*FileDelete, error) { + if e.Edges.namedEnvironmentToFileDelete == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := e.Edges.namedEnvironmentToFileDelete[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} + +func (e *Environment) appendNamedEnvironmentToFileDelete(name string, edges ...*FileDelete) { + if e.Edges.namedEnvironmentToFileDelete == nil { + e.Edges.namedEnvironmentToFileDelete = make(map[string][]*FileDelete) + } + if len(edges) == 0 { + e.Edges.namedEnvironmentToFileDelete[name] = []*FileDelete{} + } else { + e.Edges.namedEnvironmentToFileDelete[name] = append(e.Edges.namedEnvironmentToFileDelete[name], edges...) + } +} + +// NamedEnvironmentToFileExtract returns the EnvironmentToFileExtract named value or an error if the edge was not +// loaded in eager-loading with this name. +func (e *Environment) NamedEnvironmentToFileExtract(name string) ([]*FileExtract, error) { + if e.Edges.namedEnvironmentToFileExtract == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := e.Edges.namedEnvironmentToFileExtract[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} + +func (e *Environment) appendNamedEnvironmentToFileExtract(name string, edges ...*FileExtract) { + if e.Edges.namedEnvironmentToFileExtract == nil { + e.Edges.namedEnvironmentToFileExtract = make(map[string][]*FileExtract) + } + if len(edges) == 0 { + e.Edges.namedEnvironmentToFileExtract[name] = []*FileExtract{} + } else { + e.Edges.namedEnvironmentToFileExtract[name] = append(e.Edges.namedEnvironmentToFileExtract[name], edges...) + } +} + +// NamedEnvironmentToIncludedNetwork returns the EnvironmentToIncludedNetwork named value or an error if the edge was not +// loaded in eager-loading with this name. +func (e *Environment) NamedEnvironmentToIncludedNetwork(name string) ([]*IncludedNetwork, error) { + if e.Edges.namedEnvironmentToIncludedNetwork == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := e.Edges.namedEnvironmentToIncludedNetwork[name] + if !ok { + return nil, &NotLoadedError{edge: name} } + return nodes, nil } + +func (e *Environment) appendNamedEnvironmentToIncludedNetwork(name string, edges ...*IncludedNetwork) { + if e.Edges.namedEnvironmentToIncludedNetwork == nil { + e.Edges.namedEnvironmentToIncludedNetwork = make(map[string][]*IncludedNetwork) + } + if len(edges) == 0 { + e.Edges.namedEnvironmentToIncludedNetwork[name] = []*IncludedNetwork{} + } else { + e.Edges.namedEnvironmentToIncludedNetwork[name] = append(e.Edges.namedEnvironmentToIncludedNetwork[name], edges...) + } +} + +// NamedEnvironmentToFinding returns the EnvironmentToFinding named value or an error if the edge was not +// loaded in eager-loading with this name. +func (e *Environment) NamedEnvironmentToFinding(name string) ([]*Finding, error) { + if e.Edges.namedEnvironmentToFinding == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := e.Edges.namedEnvironmentToFinding[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} + +func (e *Environment) appendNamedEnvironmentToFinding(name string, edges ...*Finding) { + if e.Edges.namedEnvironmentToFinding == nil { + e.Edges.namedEnvironmentToFinding = make(map[string][]*Finding) + } + if len(edges) == 0 { + e.Edges.namedEnvironmentToFinding[name] = []*Finding{} + } else { + e.Edges.namedEnvironmentToFinding[name] = append(e.Edges.namedEnvironmentToFinding[name], edges...) + } +} + +// NamedEnvironmentToDNSRecord returns the EnvironmentToDNSRecord named value or an error if the edge was not +// loaded in eager-loading with this name. +func (e *Environment) NamedEnvironmentToDNSRecord(name string) ([]*DNSRecord, error) { + if e.Edges.namedEnvironmentToDNSRecord == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := e.Edges.namedEnvironmentToDNSRecord[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} + +func (e *Environment) appendNamedEnvironmentToDNSRecord(name string, edges ...*DNSRecord) { + if e.Edges.namedEnvironmentToDNSRecord == nil { + e.Edges.namedEnvironmentToDNSRecord = make(map[string][]*DNSRecord) + } + if len(edges) == 0 { + e.Edges.namedEnvironmentToDNSRecord[name] = []*DNSRecord{} + } else { + e.Edges.namedEnvironmentToDNSRecord[name] = append(e.Edges.namedEnvironmentToDNSRecord[name], edges...) + } +} + +// NamedEnvironmentToDNS returns the EnvironmentToDNS named value or an error if the edge was not +// loaded in eager-loading with this name. +func (e *Environment) NamedEnvironmentToDNS(name string) ([]*DNS, error) { + if e.Edges.namedEnvironmentToDNS == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := e.Edges.namedEnvironmentToDNS[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} + +func (e *Environment) appendNamedEnvironmentToDNS(name string, edges ...*DNS) { + if e.Edges.namedEnvironmentToDNS == nil { + e.Edges.namedEnvironmentToDNS = make(map[string][]*DNS) + } + if len(edges) == 0 { + e.Edges.namedEnvironmentToDNS[name] = []*DNS{} + } else { + e.Edges.namedEnvironmentToDNS[name] = append(e.Edges.namedEnvironmentToDNS[name], edges...) + } +} + +// NamedEnvironmentToNetwork returns the EnvironmentToNetwork named value or an error if the edge was not +// loaded in eager-loading with this name. +func (e *Environment) NamedEnvironmentToNetwork(name string) ([]*Network, error) { + if e.Edges.namedEnvironmentToNetwork == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := e.Edges.namedEnvironmentToNetwork[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} + +func (e *Environment) appendNamedEnvironmentToNetwork(name string, edges ...*Network) { + if e.Edges.namedEnvironmentToNetwork == nil { + e.Edges.namedEnvironmentToNetwork = make(map[string][]*Network) + } + if len(edges) == 0 { + e.Edges.namedEnvironmentToNetwork[name] = []*Network{} + } else { + e.Edges.namedEnvironmentToNetwork[name] = append(e.Edges.namedEnvironmentToNetwork[name], edges...) + } +} + +// NamedEnvironmentToHostDependency returns the EnvironmentToHostDependency named value or an error if the edge was not +// loaded in eager-loading with this name. +func (e *Environment) NamedEnvironmentToHostDependency(name string) ([]*HostDependency, error) { + if e.Edges.namedEnvironmentToHostDependency == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := e.Edges.namedEnvironmentToHostDependency[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} + +func (e *Environment) appendNamedEnvironmentToHostDependency(name string, edges ...*HostDependency) { + if e.Edges.namedEnvironmentToHostDependency == nil { + e.Edges.namedEnvironmentToHostDependency = make(map[string][]*HostDependency) + } + if len(edges) == 0 { + e.Edges.namedEnvironmentToHostDependency[name] = []*HostDependency{} + } else { + e.Edges.namedEnvironmentToHostDependency[name] = append(e.Edges.namedEnvironmentToHostDependency[name], edges...) + } +} + +// NamedEnvironmentToAnsible returns the EnvironmentToAnsible named value or an error if the edge was not +// loaded in eager-loading with this name. +func (e *Environment) NamedEnvironmentToAnsible(name string) ([]*Ansible, error) { + if e.Edges.namedEnvironmentToAnsible == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := e.Edges.namedEnvironmentToAnsible[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} + +func (e *Environment) appendNamedEnvironmentToAnsible(name string, edges ...*Ansible) { + if e.Edges.namedEnvironmentToAnsible == nil { + e.Edges.namedEnvironmentToAnsible = make(map[string][]*Ansible) + } + if len(edges) == 0 { + e.Edges.namedEnvironmentToAnsible[name] = []*Ansible{} + } else { + e.Edges.namedEnvironmentToAnsible[name] = append(e.Edges.namedEnvironmentToAnsible[name], edges...) + } +} + +// NamedEnvironmentToBuild returns the EnvironmentToBuild named value or an error if the edge was not +// loaded in eager-loading with this name. +func (e *Environment) NamedEnvironmentToBuild(name string) ([]*Build, error) { + if e.Edges.namedEnvironmentToBuild == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := e.Edges.namedEnvironmentToBuild[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} + +func (e *Environment) appendNamedEnvironmentToBuild(name string, edges ...*Build) { + if e.Edges.namedEnvironmentToBuild == nil { + e.Edges.namedEnvironmentToBuild = make(map[string][]*Build) + } + if len(edges) == 0 { + e.Edges.namedEnvironmentToBuild[name] = []*Build{} + } else { + e.Edges.namedEnvironmentToBuild[name] = append(e.Edges.namedEnvironmentToBuild[name], edges...) + } +} + +// NamedEnvironmentToRepository returns the EnvironmentToRepository named value or an error if the edge was not +// loaded in eager-loading with this name. +func (e *Environment) NamedEnvironmentToRepository(name string) ([]*Repository, error) { + if e.Edges.namedEnvironmentToRepository == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := e.Edges.namedEnvironmentToRepository[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} + +func (e *Environment) appendNamedEnvironmentToRepository(name string, edges ...*Repository) { + if e.Edges.namedEnvironmentToRepository == nil { + e.Edges.namedEnvironmentToRepository = make(map[string][]*Repository) + } + if len(edges) == 0 { + e.Edges.namedEnvironmentToRepository[name] = []*Repository{} + } else { + e.Edges.namedEnvironmentToRepository[name] = append(e.Edges.namedEnvironmentToRepository[name], edges...) + } +} + +// NamedEnvironmentToServerTask returns the EnvironmentToServerTask named value or an error if the edge was not +// loaded in eager-loading with this name. +func (e *Environment) NamedEnvironmentToServerTask(name string) ([]*ServerTask, error) { + if e.Edges.namedEnvironmentToServerTask == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := e.Edges.namedEnvironmentToServerTask[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} + +func (e *Environment) appendNamedEnvironmentToServerTask(name string, edges ...*ServerTask) { + if e.Edges.namedEnvironmentToServerTask == nil { + e.Edges.namedEnvironmentToServerTask = make(map[string][]*ServerTask) + } + if len(edges) == 0 { + e.Edges.namedEnvironmentToServerTask[name] = []*ServerTask{} + } else { + e.Edges.namedEnvironmentToServerTask[name] = append(e.Edges.namedEnvironmentToServerTask[name], edges...) + } +} + +// Environments is a parsable slice of Environment. +type Environments []*Environment diff --git a/ent/environment/environment.go b/ent/environment/environment.go index 6761cb8d..d68cc9c1 100755 --- a/ent/environment/environment.go +++ b/ent/environment/environment.go @@ -1,8 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package environment import ( + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -11,8 +13,8 @@ const ( Label = "environment" // FieldID holds the string denoting the id field in the database. FieldID = "id" - // FieldHclID holds the string denoting the hcl_id field in the database. - FieldHclID = "hcl_id" + // FieldHCLID holds the string denoting the hcl_id field in the database. + FieldHCLID = "hcl_id" // FieldCompetitionID holds the string denoting the competition_id field in the database. FieldCompetitionID = "competition_id" // FieldName holds the string denoting the name field in the database. @@ -203,7 +205,7 @@ const ( // Columns holds all SQL columns for environment fields. var Columns = []string{ FieldID, - FieldHclID, + FieldHCLID, FieldCompetitionID, FieldName, FieldDescription, @@ -245,3 +247,445 @@ var ( // DefaultID holds the default value on creation for the "id" field. DefaultID func() uuid.UUID ) + +// OrderOption defines the ordering options for the Environment queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByHCLID orders the results by the hcl_id field. +func ByHCLID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldHCLID, opts...).ToFunc() +} + +// ByCompetitionID orders the results by the competition_id field. +func ByCompetitionID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCompetitionID, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByDescription orders the results by the description field. +func ByDescription(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDescription, opts...).ToFunc() +} + +// ByBuilder orders the results by the builder field. +func ByBuilder(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldBuilder, opts...).ToFunc() +} + +// ByTeamCount orders the results by the team_count field. +func ByTeamCount(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldTeamCount, opts...).ToFunc() +} + +// ByRevision orders the results by the revision field. +func ByRevision(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldRevision, opts...).ToFunc() +} + +// ByEnvironmentToUserCount orders the results by EnvironmentToUser count. +func ByEnvironmentToUserCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newEnvironmentToUserStep(), opts...) + } +} + +// ByEnvironmentToUser orders the results by EnvironmentToUser terms. +func ByEnvironmentToUser(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newEnvironmentToUserStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByEnvironmentToHostCount orders the results by EnvironmentToHost count. +func ByEnvironmentToHostCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newEnvironmentToHostStep(), opts...) + } +} + +// ByEnvironmentToHost orders the results by EnvironmentToHost terms. +func ByEnvironmentToHost(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newEnvironmentToHostStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByEnvironmentToCompetitionCount orders the results by EnvironmentToCompetition count. +func ByEnvironmentToCompetitionCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newEnvironmentToCompetitionStep(), opts...) + } +} + +// ByEnvironmentToCompetition orders the results by EnvironmentToCompetition terms. +func ByEnvironmentToCompetition(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newEnvironmentToCompetitionStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByEnvironmentToIdentityCount orders the results by EnvironmentToIdentity count. +func ByEnvironmentToIdentityCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newEnvironmentToIdentityStep(), opts...) + } +} + +// ByEnvironmentToIdentity orders the results by EnvironmentToIdentity terms. +func ByEnvironmentToIdentity(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newEnvironmentToIdentityStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByEnvironmentToCommandCount orders the results by EnvironmentToCommand count. +func ByEnvironmentToCommandCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newEnvironmentToCommandStep(), opts...) + } +} + +// ByEnvironmentToCommand orders the results by EnvironmentToCommand terms. +func ByEnvironmentToCommand(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newEnvironmentToCommandStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByEnvironmentToScriptCount orders the results by EnvironmentToScript count. +func ByEnvironmentToScriptCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newEnvironmentToScriptStep(), opts...) + } +} + +// ByEnvironmentToScript orders the results by EnvironmentToScript terms. +func ByEnvironmentToScript(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newEnvironmentToScriptStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByEnvironmentToFileDownloadCount orders the results by EnvironmentToFileDownload count. +func ByEnvironmentToFileDownloadCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newEnvironmentToFileDownloadStep(), opts...) + } +} + +// ByEnvironmentToFileDownload orders the results by EnvironmentToFileDownload terms. +func ByEnvironmentToFileDownload(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newEnvironmentToFileDownloadStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByEnvironmentToFileDeleteCount orders the results by EnvironmentToFileDelete count. +func ByEnvironmentToFileDeleteCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newEnvironmentToFileDeleteStep(), opts...) + } +} + +// ByEnvironmentToFileDelete orders the results by EnvironmentToFileDelete terms. +func ByEnvironmentToFileDelete(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newEnvironmentToFileDeleteStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByEnvironmentToFileExtractCount orders the results by EnvironmentToFileExtract count. +func ByEnvironmentToFileExtractCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newEnvironmentToFileExtractStep(), opts...) + } +} + +// ByEnvironmentToFileExtract orders the results by EnvironmentToFileExtract terms. +func ByEnvironmentToFileExtract(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newEnvironmentToFileExtractStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByEnvironmentToIncludedNetworkCount orders the results by EnvironmentToIncludedNetwork count. +func ByEnvironmentToIncludedNetworkCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newEnvironmentToIncludedNetworkStep(), opts...) + } +} + +// ByEnvironmentToIncludedNetwork orders the results by EnvironmentToIncludedNetwork terms. +func ByEnvironmentToIncludedNetwork(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newEnvironmentToIncludedNetworkStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByEnvironmentToFindingCount orders the results by EnvironmentToFinding count. +func ByEnvironmentToFindingCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newEnvironmentToFindingStep(), opts...) + } +} + +// ByEnvironmentToFinding orders the results by EnvironmentToFinding terms. +func ByEnvironmentToFinding(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newEnvironmentToFindingStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByEnvironmentToDNSRecordCount orders the results by EnvironmentToDNSRecord count. +func ByEnvironmentToDNSRecordCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newEnvironmentToDNSRecordStep(), opts...) + } +} + +// ByEnvironmentToDNSRecord orders the results by EnvironmentToDNSRecord terms. +func ByEnvironmentToDNSRecord(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newEnvironmentToDNSRecordStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByEnvironmentToDNSCount orders the results by EnvironmentToDNS count. +func ByEnvironmentToDNSCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newEnvironmentToDNSStep(), opts...) + } +} + +// ByEnvironmentToDNS orders the results by EnvironmentToDNS terms. +func ByEnvironmentToDNS(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newEnvironmentToDNSStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByEnvironmentToNetworkCount orders the results by EnvironmentToNetwork count. +func ByEnvironmentToNetworkCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newEnvironmentToNetworkStep(), opts...) + } +} + +// ByEnvironmentToNetwork orders the results by EnvironmentToNetwork terms. +func ByEnvironmentToNetwork(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newEnvironmentToNetworkStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByEnvironmentToHostDependencyCount orders the results by EnvironmentToHostDependency count. +func ByEnvironmentToHostDependencyCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newEnvironmentToHostDependencyStep(), opts...) + } +} + +// ByEnvironmentToHostDependency orders the results by EnvironmentToHostDependency terms. +func ByEnvironmentToHostDependency(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newEnvironmentToHostDependencyStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByEnvironmentToAnsibleCount orders the results by EnvironmentToAnsible count. +func ByEnvironmentToAnsibleCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newEnvironmentToAnsibleStep(), opts...) + } +} + +// ByEnvironmentToAnsible orders the results by EnvironmentToAnsible terms. +func ByEnvironmentToAnsible(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newEnvironmentToAnsibleStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByEnvironmentToBuildCount orders the results by EnvironmentToBuild count. +func ByEnvironmentToBuildCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newEnvironmentToBuildStep(), opts...) + } +} + +// ByEnvironmentToBuild orders the results by EnvironmentToBuild terms. +func ByEnvironmentToBuild(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newEnvironmentToBuildStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByEnvironmentToRepositoryCount orders the results by EnvironmentToRepository count. +func ByEnvironmentToRepositoryCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newEnvironmentToRepositoryStep(), opts...) + } +} + +// ByEnvironmentToRepository orders the results by EnvironmentToRepository terms. +func ByEnvironmentToRepository(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newEnvironmentToRepositoryStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByEnvironmentToServerTaskCount orders the results by EnvironmentToServerTask count. +func ByEnvironmentToServerTaskCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newEnvironmentToServerTaskStep(), opts...) + } +} + +// ByEnvironmentToServerTask orders the results by EnvironmentToServerTask terms. +func ByEnvironmentToServerTask(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newEnvironmentToServerTaskStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newEnvironmentToUserStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(EnvironmentToUserInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, EnvironmentToUserTable, EnvironmentToUserPrimaryKey...), + ) +} +func newEnvironmentToHostStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(EnvironmentToHostInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, EnvironmentToHostTable, EnvironmentToHostColumn), + ) +} +func newEnvironmentToCompetitionStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(EnvironmentToCompetitionInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, EnvironmentToCompetitionTable, EnvironmentToCompetitionColumn), + ) +} +func newEnvironmentToIdentityStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(EnvironmentToIdentityInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, EnvironmentToIdentityTable, EnvironmentToIdentityColumn), + ) +} +func newEnvironmentToCommandStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(EnvironmentToCommandInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, EnvironmentToCommandTable, EnvironmentToCommandColumn), + ) +} +func newEnvironmentToScriptStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(EnvironmentToScriptInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, EnvironmentToScriptTable, EnvironmentToScriptColumn), + ) +} +func newEnvironmentToFileDownloadStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(EnvironmentToFileDownloadInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, EnvironmentToFileDownloadTable, EnvironmentToFileDownloadColumn), + ) +} +func newEnvironmentToFileDeleteStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(EnvironmentToFileDeleteInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, EnvironmentToFileDeleteTable, EnvironmentToFileDeleteColumn), + ) +} +func newEnvironmentToFileExtractStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(EnvironmentToFileExtractInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, EnvironmentToFileExtractTable, EnvironmentToFileExtractColumn), + ) +} +func newEnvironmentToIncludedNetworkStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(EnvironmentToIncludedNetworkInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, EnvironmentToIncludedNetworkTable, EnvironmentToIncludedNetworkPrimaryKey...), + ) +} +func newEnvironmentToFindingStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(EnvironmentToFindingInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, EnvironmentToFindingTable, EnvironmentToFindingColumn), + ) +} +func newEnvironmentToDNSRecordStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(EnvironmentToDNSRecordInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, EnvironmentToDNSRecordTable, EnvironmentToDNSRecordColumn), + ) +} +func newEnvironmentToDNSStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(EnvironmentToDNSInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, EnvironmentToDNSTable, EnvironmentToDNSPrimaryKey...), + ) +} +func newEnvironmentToNetworkStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(EnvironmentToNetworkInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, EnvironmentToNetworkTable, EnvironmentToNetworkColumn), + ) +} +func newEnvironmentToHostDependencyStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(EnvironmentToHostDependencyInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, EnvironmentToHostDependencyTable, EnvironmentToHostDependencyColumn), + ) +} +func newEnvironmentToAnsibleStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(EnvironmentToAnsibleInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, EnvironmentToAnsibleTable, EnvironmentToAnsibleColumn), + ) +} +func newEnvironmentToBuildStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(EnvironmentToBuildInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, EnvironmentToBuildTable, EnvironmentToBuildColumn), + ) +} +func newEnvironmentToRepositoryStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(EnvironmentToRepositoryInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, EnvironmentToRepositoryTable, EnvironmentToRepositoryPrimaryKey...), + ) +} +func newEnvironmentToServerTaskStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(EnvironmentToServerTaskInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, EnvironmentToServerTaskTable, EnvironmentToServerTaskColumn), + ) +} diff --git a/ent/environment/where.go b/ent/environment/where.go index 8d453a29..046cc55f 100755 --- a/ent/environment/where.go +++ b/ent/environment/where.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package environment @@ -11,841 +11,487 @@ import ( // ID filters vertices based on their ID field. func ID(id uuid.UUID) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Environment(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id uuid.UUID) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Environment(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id uuid.UUID) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.Environment(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...uuid.UUID) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.Environment(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...uuid.UUID) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.Environment(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id uuid.UUID) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.Environment(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id uuid.UUID) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.Environment(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id uuid.UUID) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.Environment(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id uuid.UUID) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.Environment(sql.FieldLTE(FieldID, id)) } -// HclID applies equality check predicate on the "hcl_id" field. It's identical to HclIDEQ. -func HclID(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldHclID), v)) - }) +// HCLID applies equality check predicate on the "hcl_id" field. It's identical to HCLIDEQ. +func HCLID(v string) predicate.Environment { + return predicate.Environment(sql.FieldEQ(FieldHCLID, v)) } // CompetitionID applies equality check predicate on the "competition_id" field. It's identical to CompetitionIDEQ. func CompetitionID(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCompetitionID), v)) - }) + return predicate.Environment(sql.FieldEQ(FieldCompetitionID, v)) } // Name applies equality check predicate on the "name" field. It's identical to NameEQ. func Name(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldName), v)) - }) + return predicate.Environment(sql.FieldEQ(FieldName, v)) } // Description applies equality check predicate on the "description" field. It's identical to DescriptionEQ. func Description(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldDescription), v)) - }) + return predicate.Environment(sql.FieldEQ(FieldDescription, v)) } // Builder applies equality check predicate on the "builder" field. It's identical to BuilderEQ. func Builder(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldBuilder), v)) - }) + return predicate.Environment(sql.FieldEQ(FieldBuilder, v)) } // TeamCount applies equality check predicate on the "team_count" field. It's identical to TeamCountEQ. func TeamCount(v int) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldTeamCount), v)) - }) + return predicate.Environment(sql.FieldEQ(FieldTeamCount, v)) } // Revision applies equality check predicate on the "revision" field. It's identical to RevisionEQ. func Revision(v int) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldRevision), v)) - }) + return predicate.Environment(sql.FieldEQ(FieldRevision, v)) } -// HclIDEQ applies the EQ predicate on the "hcl_id" field. -func HclIDEQ(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldHclID), v)) - }) +// HCLIDEQ applies the EQ predicate on the "hcl_id" field. +func HCLIDEQ(v string) predicate.Environment { + return predicate.Environment(sql.FieldEQ(FieldHCLID, v)) } -// HclIDNEQ applies the NEQ predicate on the "hcl_id" field. -func HclIDNEQ(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldHclID), v)) - }) +// HCLIDNEQ applies the NEQ predicate on the "hcl_id" field. +func HCLIDNEQ(v string) predicate.Environment { + return predicate.Environment(sql.FieldNEQ(FieldHCLID, v)) } -// HclIDIn applies the In predicate on the "hcl_id" field. -func HclIDIn(vs ...string) predicate.Environment { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Environment(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldHclID), v...)) - }) +// HCLIDIn applies the In predicate on the "hcl_id" field. +func HCLIDIn(vs ...string) predicate.Environment { + return predicate.Environment(sql.FieldIn(FieldHCLID, vs...)) } -// HclIDNotIn applies the NotIn predicate on the "hcl_id" field. -func HclIDNotIn(vs ...string) predicate.Environment { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Environment(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldHclID), v...)) - }) +// HCLIDNotIn applies the NotIn predicate on the "hcl_id" field. +func HCLIDNotIn(vs ...string) predicate.Environment { + return predicate.Environment(sql.FieldNotIn(FieldHCLID, vs...)) } -// HclIDGT applies the GT predicate on the "hcl_id" field. -func HclIDGT(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldHclID), v)) - }) +// HCLIDGT applies the GT predicate on the "hcl_id" field. +func HCLIDGT(v string) predicate.Environment { + return predicate.Environment(sql.FieldGT(FieldHCLID, v)) } -// HclIDGTE applies the GTE predicate on the "hcl_id" field. -func HclIDGTE(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldHclID), v)) - }) +// HCLIDGTE applies the GTE predicate on the "hcl_id" field. +func HCLIDGTE(v string) predicate.Environment { + return predicate.Environment(sql.FieldGTE(FieldHCLID, v)) } -// HclIDLT applies the LT predicate on the "hcl_id" field. -func HclIDLT(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldHclID), v)) - }) +// HCLIDLT applies the LT predicate on the "hcl_id" field. +func HCLIDLT(v string) predicate.Environment { + return predicate.Environment(sql.FieldLT(FieldHCLID, v)) } -// HclIDLTE applies the LTE predicate on the "hcl_id" field. -func HclIDLTE(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldHclID), v)) - }) +// HCLIDLTE applies the LTE predicate on the "hcl_id" field. +func HCLIDLTE(v string) predicate.Environment { + return predicate.Environment(sql.FieldLTE(FieldHCLID, v)) } -// HclIDContains applies the Contains predicate on the "hcl_id" field. -func HclIDContains(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldHclID), v)) - }) +// HCLIDContains applies the Contains predicate on the "hcl_id" field. +func HCLIDContains(v string) predicate.Environment { + return predicate.Environment(sql.FieldContains(FieldHCLID, v)) } -// HclIDHasPrefix applies the HasPrefix predicate on the "hcl_id" field. -func HclIDHasPrefix(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldHclID), v)) - }) +// HCLIDHasPrefix applies the HasPrefix predicate on the "hcl_id" field. +func HCLIDHasPrefix(v string) predicate.Environment { + return predicate.Environment(sql.FieldHasPrefix(FieldHCLID, v)) } -// HclIDHasSuffix applies the HasSuffix predicate on the "hcl_id" field. -func HclIDHasSuffix(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldHclID), v)) - }) +// HCLIDHasSuffix applies the HasSuffix predicate on the "hcl_id" field. +func HCLIDHasSuffix(v string) predicate.Environment { + return predicate.Environment(sql.FieldHasSuffix(FieldHCLID, v)) } -// HclIDEqualFold applies the EqualFold predicate on the "hcl_id" field. -func HclIDEqualFold(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldHclID), v)) - }) +// HCLIDEqualFold applies the EqualFold predicate on the "hcl_id" field. +func HCLIDEqualFold(v string) predicate.Environment { + return predicate.Environment(sql.FieldEqualFold(FieldHCLID, v)) } -// HclIDContainsFold applies the ContainsFold predicate on the "hcl_id" field. -func HclIDContainsFold(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldHclID), v)) - }) +// HCLIDContainsFold applies the ContainsFold predicate on the "hcl_id" field. +func HCLIDContainsFold(v string) predicate.Environment { + return predicate.Environment(sql.FieldContainsFold(FieldHCLID, v)) } // CompetitionIDEQ applies the EQ predicate on the "competition_id" field. func CompetitionIDEQ(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCompetitionID), v)) - }) + return predicate.Environment(sql.FieldEQ(FieldCompetitionID, v)) } // CompetitionIDNEQ applies the NEQ predicate on the "competition_id" field. func CompetitionIDNEQ(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldCompetitionID), v)) - }) + return predicate.Environment(sql.FieldNEQ(FieldCompetitionID, v)) } // CompetitionIDIn applies the In predicate on the "competition_id" field. func CompetitionIDIn(vs ...string) predicate.Environment { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Environment(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldCompetitionID), v...)) - }) + return predicate.Environment(sql.FieldIn(FieldCompetitionID, vs...)) } // CompetitionIDNotIn applies the NotIn predicate on the "competition_id" field. func CompetitionIDNotIn(vs ...string) predicate.Environment { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Environment(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldCompetitionID), v...)) - }) + return predicate.Environment(sql.FieldNotIn(FieldCompetitionID, vs...)) } // CompetitionIDGT applies the GT predicate on the "competition_id" field. func CompetitionIDGT(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldCompetitionID), v)) - }) + return predicate.Environment(sql.FieldGT(FieldCompetitionID, v)) } // CompetitionIDGTE applies the GTE predicate on the "competition_id" field. func CompetitionIDGTE(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldCompetitionID), v)) - }) + return predicate.Environment(sql.FieldGTE(FieldCompetitionID, v)) } // CompetitionIDLT applies the LT predicate on the "competition_id" field. func CompetitionIDLT(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldCompetitionID), v)) - }) + return predicate.Environment(sql.FieldLT(FieldCompetitionID, v)) } // CompetitionIDLTE applies the LTE predicate on the "competition_id" field. func CompetitionIDLTE(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldCompetitionID), v)) - }) + return predicate.Environment(sql.FieldLTE(FieldCompetitionID, v)) } // CompetitionIDContains applies the Contains predicate on the "competition_id" field. func CompetitionIDContains(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldCompetitionID), v)) - }) + return predicate.Environment(sql.FieldContains(FieldCompetitionID, v)) } // CompetitionIDHasPrefix applies the HasPrefix predicate on the "competition_id" field. func CompetitionIDHasPrefix(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldCompetitionID), v)) - }) + return predicate.Environment(sql.FieldHasPrefix(FieldCompetitionID, v)) } // CompetitionIDHasSuffix applies the HasSuffix predicate on the "competition_id" field. func CompetitionIDHasSuffix(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldCompetitionID), v)) - }) + return predicate.Environment(sql.FieldHasSuffix(FieldCompetitionID, v)) } // CompetitionIDEqualFold applies the EqualFold predicate on the "competition_id" field. func CompetitionIDEqualFold(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldCompetitionID), v)) - }) + return predicate.Environment(sql.FieldEqualFold(FieldCompetitionID, v)) } // CompetitionIDContainsFold applies the ContainsFold predicate on the "competition_id" field. func CompetitionIDContainsFold(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldCompetitionID), v)) - }) + return predicate.Environment(sql.FieldContainsFold(FieldCompetitionID, v)) } // NameEQ applies the EQ predicate on the "name" field. func NameEQ(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldName), v)) - }) + return predicate.Environment(sql.FieldEQ(FieldName, v)) } // NameNEQ applies the NEQ predicate on the "name" field. func NameNEQ(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldName), v)) - }) + return predicate.Environment(sql.FieldNEQ(FieldName, v)) } // NameIn applies the In predicate on the "name" field. func NameIn(vs ...string) predicate.Environment { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Environment(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldName), v...)) - }) + return predicate.Environment(sql.FieldIn(FieldName, vs...)) } // NameNotIn applies the NotIn predicate on the "name" field. func NameNotIn(vs ...string) predicate.Environment { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Environment(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldName), v...)) - }) + return predicate.Environment(sql.FieldNotIn(FieldName, vs...)) } // NameGT applies the GT predicate on the "name" field. func NameGT(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldName), v)) - }) + return predicate.Environment(sql.FieldGT(FieldName, v)) } // NameGTE applies the GTE predicate on the "name" field. func NameGTE(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldName), v)) - }) + return predicate.Environment(sql.FieldGTE(FieldName, v)) } // NameLT applies the LT predicate on the "name" field. func NameLT(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldName), v)) - }) + return predicate.Environment(sql.FieldLT(FieldName, v)) } // NameLTE applies the LTE predicate on the "name" field. func NameLTE(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldName), v)) - }) + return predicate.Environment(sql.FieldLTE(FieldName, v)) } // NameContains applies the Contains predicate on the "name" field. func NameContains(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldName), v)) - }) + return predicate.Environment(sql.FieldContains(FieldName, v)) } // NameHasPrefix applies the HasPrefix predicate on the "name" field. func NameHasPrefix(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldName), v)) - }) + return predicate.Environment(sql.FieldHasPrefix(FieldName, v)) } // NameHasSuffix applies the HasSuffix predicate on the "name" field. func NameHasSuffix(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldName), v)) - }) + return predicate.Environment(sql.FieldHasSuffix(FieldName, v)) } // NameEqualFold applies the EqualFold predicate on the "name" field. func NameEqualFold(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldName), v)) - }) + return predicate.Environment(sql.FieldEqualFold(FieldName, v)) } // NameContainsFold applies the ContainsFold predicate on the "name" field. func NameContainsFold(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldName), v)) - }) + return predicate.Environment(sql.FieldContainsFold(FieldName, v)) } // DescriptionEQ applies the EQ predicate on the "description" field. func DescriptionEQ(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldDescription), v)) - }) + return predicate.Environment(sql.FieldEQ(FieldDescription, v)) } // DescriptionNEQ applies the NEQ predicate on the "description" field. func DescriptionNEQ(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldDescription), v)) - }) + return predicate.Environment(sql.FieldNEQ(FieldDescription, v)) } // DescriptionIn applies the In predicate on the "description" field. func DescriptionIn(vs ...string) predicate.Environment { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Environment(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldDescription), v...)) - }) + return predicate.Environment(sql.FieldIn(FieldDescription, vs...)) } // DescriptionNotIn applies the NotIn predicate on the "description" field. func DescriptionNotIn(vs ...string) predicate.Environment { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Environment(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldDescription), v...)) - }) + return predicate.Environment(sql.FieldNotIn(FieldDescription, vs...)) } // DescriptionGT applies the GT predicate on the "description" field. func DescriptionGT(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldDescription), v)) - }) + return predicate.Environment(sql.FieldGT(FieldDescription, v)) } // DescriptionGTE applies the GTE predicate on the "description" field. func DescriptionGTE(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldDescription), v)) - }) + return predicate.Environment(sql.FieldGTE(FieldDescription, v)) } // DescriptionLT applies the LT predicate on the "description" field. func DescriptionLT(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldDescription), v)) - }) + return predicate.Environment(sql.FieldLT(FieldDescription, v)) } // DescriptionLTE applies the LTE predicate on the "description" field. func DescriptionLTE(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldDescription), v)) - }) + return predicate.Environment(sql.FieldLTE(FieldDescription, v)) } // DescriptionContains applies the Contains predicate on the "description" field. func DescriptionContains(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldDescription), v)) - }) + return predicate.Environment(sql.FieldContains(FieldDescription, v)) } // DescriptionHasPrefix applies the HasPrefix predicate on the "description" field. func DescriptionHasPrefix(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldDescription), v)) - }) + return predicate.Environment(sql.FieldHasPrefix(FieldDescription, v)) } // DescriptionHasSuffix applies the HasSuffix predicate on the "description" field. func DescriptionHasSuffix(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldDescription), v)) - }) + return predicate.Environment(sql.FieldHasSuffix(FieldDescription, v)) } // DescriptionEqualFold applies the EqualFold predicate on the "description" field. func DescriptionEqualFold(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldDescription), v)) - }) + return predicate.Environment(sql.FieldEqualFold(FieldDescription, v)) } // DescriptionContainsFold applies the ContainsFold predicate on the "description" field. func DescriptionContainsFold(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldDescription), v)) - }) + return predicate.Environment(sql.FieldContainsFold(FieldDescription, v)) } // BuilderEQ applies the EQ predicate on the "builder" field. func BuilderEQ(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldBuilder), v)) - }) + return predicate.Environment(sql.FieldEQ(FieldBuilder, v)) } // BuilderNEQ applies the NEQ predicate on the "builder" field. func BuilderNEQ(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldBuilder), v)) - }) + return predicate.Environment(sql.FieldNEQ(FieldBuilder, v)) } // BuilderIn applies the In predicate on the "builder" field. func BuilderIn(vs ...string) predicate.Environment { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Environment(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldBuilder), v...)) - }) + return predicate.Environment(sql.FieldIn(FieldBuilder, vs...)) } // BuilderNotIn applies the NotIn predicate on the "builder" field. func BuilderNotIn(vs ...string) predicate.Environment { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Environment(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldBuilder), v...)) - }) + return predicate.Environment(sql.FieldNotIn(FieldBuilder, vs...)) } // BuilderGT applies the GT predicate on the "builder" field. func BuilderGT(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldBuilder), v)) - }) + return predicate.Environment(sql.FieldGT(FieldBuilder, v)) } // BuilderGTE applies the GTE predicate on the "builder" field. func BuilderGTE(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldBuilder), v)) - }) + return predicate.Environment(sql.FieldGTE(FieldBuilder, v)) } // BuilderLT applies the LT predicate on the "builder" field. func BuilderLT(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldBuilder), v)) - }) + return predicate.Environment(sql.FieldLT(FieldBuilder, v)) } // BuilderLTE applies the LTE predicate on the "builder" field. func BuilderLTE(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldBuilder), v)) - }) + return predicate.Environment(sql.FieldLTE(FieldBuilder, v)) } // BuilderContains applies the Contains predicate on the "builder" field. func BuilderContains(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldBuilder), v)) - }) + return predicate.Environment(sql.FieldContains(FieldBuilder, v)) } // BuilderHasPrefix applies the HasPrefix predicate on the "builder" field. func BuilderHasPrefix(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldBuilder), v)) - }) + return predicate.Environment(sql.FieldHasPrefix(FieldBuilder, v)) } // BuilderHasSuffix applies the HasSuffix predicate on the "builder" field. func BuilderHasSuffix(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldBuilder), v)) - }) + return predicate.Environment(sql.FieldHasSuffix(FieldBuilder, v)) } // BuilderEqualFold applies the EqualFold predicate on the "builder" field. func BuilderEqualFold(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldBuilder), v)) - }) + return predicate.Environment(sql.FieldEqualFold(FieldBuilder, v)) } // BuilderContainsFold applies the ContainsFold predicate on the "builder" field. func BuilderContainsFold(v string) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldBuilder), v)) - }) + return predicate.Environment(sql.FieldContainsFold(FieldBuilder, v)) } // TeamCountEQ applies the EQ predicate on the "team_count" field. func TeamCountEQ(v int) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldTeamCount), v)) - }) + return predicate.Environment(sql.FieldEQ(FieldTeamCount, v)) } // TeamCountNEQ applies the NEQ predicate on the "team_count" field. func TeamCountNEQ(v int) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldTeamCount), v)) - }) + return predicate.Environment(sql.FieldNEQ(FieldTeamCount, v)) } // TeamCountIn applies the In predicate on the "team_count" field. func TeamCountIn(vs ...int) predicate.Environment { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Environment(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldTeamCount), v...)) - }) + return predicate.Environment(sql.FieldIn(FieldTeamCount, vs...)) } // TeamCountNotIn applies the NotIn predicate on the "team_count" field. func TeamCountNotIn(vs ...int) predicate.Environment { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Environment(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldTeamCount), v...)) - }) + return predicate.Environment(sql.FieldNotIn(FieldTeamCount, vs...)) } // TeamCountGT applies the GT predicate on the "team_count" field. func TeamCountGT(v int) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldTeamCount), v)) - }) + return predicate.Environment(sql.FieldGT(FieldTeamCount, v)) } // TeamCountGTE applies the GTE predicate on the "team_count" field. func TeamCountGTE(v int) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldTeamCount), v)) - }) + return predicate.Environment(sql.FieldGTE(FieldTeamCount, v)) } // TeamCountLT applies the LT predicate on the "team_count" field. func TeamCountLT(v int) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldTeamCount), v)) - }) + return predicate.Environment(sql.FieldLT(FieldTeamCount, v)) } // TeamCountLTE applies the LTE predicate on the "team_count" field. func TeamCountLTE(v int) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldTeamCount), v)) - }) + return predicate.Environment(sql.FieldLTE(FieldTeamCount, v)) } // RevisionEQ applies the EQ predicate on the "revision" field. func RevisionEQ(v int) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldRevision), v)) - }) + return predicate.Environment(sql.FieldEQ(FieldRevision, v)) } // RevisionNEQ applies the NEQ predicate on the "revision" field. func RevisionNEQ(v int) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldRevision), v)) - }) + return predicate.Environment(sql.FieldNEQ(FieldRevision, v)) } // RevisionIn applies the In predicate on the "revision" field. func RevisionIn(vs ...int) predicate.Environment { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Environment(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldRevision), v...)) - }) + return predicate.Environment(sql.FieldIn(FieldRevision, vs...)) } // RevisionNotIn applies the NotIn predicate on the "revision" field. func RevisionNotIn(vs ...int) predicate.Environment { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Environment(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldRevision), v...)) - }) + return predicate.Environment(sql.FieldNotIn(FieldRevision, vs...)) } // RevisionGT applies the GT predicate on the "revision" field. func RevisionGT(v int) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldRevision), v)) - }) + return predicate.Environment(sql.FieldGT(FieldRevision, v)) } // RevisionGTE applies the GTE predicate on the "revision" field. func RevisionGTE(v int) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldRevision), v)) - }) + return predicate.Environment(sql.FieldGTE(FieldRevision, v)) } // RevisionLT applies the LT predicate on the "revision" field. func RevisionLT(v int) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldRevision), v)) - }) + return predicate.Environment(sql.FieldLT(FieldRevision, v)) } // RevisionLTE applies the LTE predicate on the "revision" field. func RevisionLTE(v int) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldRevision), v)) - }) + return predicate.Environment(sql.FieldLTE(FieldRevision, v)) } // HasEnvironmentToUser applies the HasEdge predicate on the "EnvironmentToUser" edge. @@ -853,7 +499,6 @@ func HasEnvironmentToUser() predicate.Environment { return predicate.Environment(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(EnvironmentToUserTable, FieldID), sqlgraph.Edge(sqlgraph.M2M, false, EnvironmentToUserTable, EnvironmentToUserPrimaryKey...), ) sqlgraph.HasNeighbors(s, step) @@ -863,11 +508,7 @@ func HasEnvironmentToUser() predicate.Environment { // HasEnvironmentToUserWith applies the HasEdge predicate on the "EnvironmentToUser" edge with a given conditions (other predicates). func HasEnvironmentToUserWith(preds ...predicate.User) predicate.Environment { return predicate.Environment(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(EnvironmentToUserInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2M, false, EnvironmentToUserTable, EnvironmentToUserPrimaryKey...), - ) + step := newEnvironmentToUserStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -881,7 +522,6 @@ func HasEnvironmentToHost() predicate.Environment { return predicate.Environment(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(EnvironmentToHostTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, false, EnvironmentToHostTable, EnvironmentToHostColumn), ) sqlgraph.HasNeighbors(s, step) @@ -891,11 +531,7 @@ func HasEnvironmentToHost() predicate.Environment { // HasEnvironmentToHostWith applies the HasEdge predicate on the "EnvironmentToHost" edge with a given conditions (other predicates). func HasEnvironmentToHostWith(preds ...predicate.Host) predicate.Environment { return predicate.Environment(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(EnvironmentToHostInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, EnvironmentToHostTable, EnvironmentToHostColumn), - ) + step := newEnvironmentToHostStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -909,7 +545,6 @@ func HasEnvironmentToCompetition() predicate.Environment { return predicate.Environment(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(EnvironmentToCompetitionTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, false, EnvironmentToCompetitionTable, EnvironmentToCompetitionColumn), ) sqlgraph.HasNeighbors(s, step) @@ -919,11 +554,7 @@ func HasEnvironmentToCompetition() predicate.Environment { // HasEnvironmentToCompetitionWith applies the HasEdge predicate on the "EnvironmentToCompetition" edge with a given conditions (other predicates). func HasEnvironmentToCompetitionWith(preds ...predicate.Competition) predicate.Environment { return predicate.Environment(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(EnvironmentToCompetitionInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, EnvironmentToCompetitionTable, EnvironmentToCompetitionColumn), - ) + step := newEnvironmentToCompetitionStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -937,7 +568,6 @@ func HasEnvironmentToIdentity() predicate.Environment { return predicate.Environment(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(EnvironmentToIdentityTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, false, EnvironmentToIdentityTable, EnvironmentToIdentityColumn), ) sqlgraph.HasNeighbors(s, step) @@ -947,11 +577,7 @@ func HasEnvironmentToIdentity() predicate.Environment { // HasEnvironmentToIdentityWith applies the HasEdge predicate on the "EnvironmentToIdentity" edge with a given conditions (other predicates). func HasEnvironmentToIdentityWith(preds ...predicate.Identity) predicate.Environment { return predicate.Environment(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(EnvironmentToIdentityInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, EnvironmentToIdentityTable, EnvironmentToIdentityColumn), - ) + step := newEnvironmentToIdentityStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -965,7 +591,6 @@ func HasEnvironmentToCommand() predicate.Environment { return predicate.Environment(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(EnvironmentToCommandTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, false, EnvironmentToCommandTable, EnvironmentToCommandColumn), ) sqlgraph.HasNeighbors(s, step) @@ -975,11 +600,7 @@ func HasEnvironmentToCommand() predicate.Environment { // HasEnvironmentToCommandWith applies the HasEdge predicate on the "EnvironmentToCommand" edge with a given conditions (other predicates). func HasEnvironmentToCommandWith(preds ...predicate.Command) predicate.Environment { return predicate.Environment(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(EnvironmentToCommandInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, EnvironmentToCommandTable, EnvironmentToCommandColumn), - ) + step := newEnvironmentToCommandStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -993,7 +614,6 @@ func HasEnvironmentToScript() predicate.Environment { return predicate.Environment(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(EnvironmentToScriptTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, false, EnvironmentToScriptTable, EnvironmentToScriptColumn), ) sqlgraph.HasNeighbors(s, step) @@ -1003,11 +623,7 @@ func HasEnvironmentToScript() predicate.Environment { // HasEnvironmentToScriptWith applies the HasEdge predicate on the "EnvironmentToScript" edge with a given conditions (other predicates). func HasEnvironmentToScriptWith(preds ...predicate.Script) predicate.Environment { return predicate.Environment(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(EnvironmentToScriptInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, EnvironmentToScriptTable, EnvironmentToScriptColumn), - ) + step := newEnvironmentToScriptStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -1021,7 +637,6 @@ func HasEnvironmentToFileDownload() predicate.Environment { return predicate.Environment(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(EnvironmentToFileDownloadTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, false, EnvironmentToFileDownloadTable, EnvironmentToFileDownloadColumn), ) sqlgraph.HasNeighbors(s, step) @@ -1031,11 +646,7 @@ func HasEnvironmentToFileDownload() predicate.Environment { // HasEnvironmentToFileDownloadWith applies the HasEdge predicate on the "EnvironmentToFileDownload" edge with a given conditions (other predicates). func HasEnvironmentToFileDownloadWith(preds ...predicate.FileDownload) predicate.Environment { return predicate.Environment(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(EnvironmentToFileDownloadInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, EnvironmentToFileDownloadTable, EnvironmentToFileDownloadColumn), - ) + step := newEnvironmentToFileDownloadStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -1049,7 +660,6 @@ func HasEnvironmentToFileDelete() predicate.Environment { return predicate.Environment(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(EnvironmentToFileDeleteTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, false, EnvironmentToFileDeleteTable, EnvironmentToFileDeleteColumn), ) sqlgraph.HasNeighbors(s, step) @@ -1059,11 +669,7 @@ func HasEnvironmentToFileDelete() predicate.Environment { // HasEnvironmentToFileDeleteWith applies the HasEdge predicate on the "EnvironmentToFileDelete" edge with a given conditions (other predicates). func HasEnvironmentToFileDeleteWith(preds ...predicate.FileDelete) predicate.Environment { return predicate.Environment(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(EnvironmentToFileDeleteInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, EnvironmentToFileDeleteTable, EnvironmentToFileDeleteColumn), - ) + step := newEnvironmentToFileDeleteStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -1077,7 +683,6 @@ func HasEnvironmentToFileExtract() predicate.Environment { return predicate.Environment(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(EnvironmentToFileExtractTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, false, EnvironmentToFileExtractTable, EnvironmentToFileExtractColumn), ) sqlgraph.HasNeighbors(s, step) @@ -1087,11 +692,7 @@ func HasEnvironmentToFileExtract() predicate.Environment { // HasEnvironmentToFileExtractWith applies the HasEdge predicate on the "EnvironmentToFileExtract" edge with a given conditions (other predicates). func HasEnvironmentToFileExtractWith(preds ...predicate.FileExtract) predicate.Environment { return predicate.Environment(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(EnvironmentToFileExtractInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, EnvironmentToFileExtractTable, EnvironmentToFileExtractColumn), - ) + step := newEnvironmentToFileExtractStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -1105,7 +706,6 @@ func HasEnvironmentToIncludedNetwork() predicate.Environment { return predicate.Environment(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(EnvironmentToIncludedNetworkTable, FieldID), sqlgraph.Edge(sqlgraph.M2M, false, EnvironmentToIncludedNetworkTable, EnvironmentToIncludedNetworkPrimaryKey...), ) sqlgraph.HasNeighbors(s, step) @@ -1115,11 +715,7 @@ func HasEnvironmentToIncludedNetwork() predicate.Environment { // HasEnvironmentToIncludedNetworkWith applies the HasEdge predicate on the "EnvironmentToIncludedNetwork" edge with a given conditions (other predicates). func HasEnvironmentToIncludedNetworkWith(preds ...predicate.IncludedNetwork) predicate.Environment { return predicate.Environment(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(EnvironmentToIncludedNetworkInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2M, false, EnvironmentToIncludedNetworkTable, EnvironmentToIncludedNetworkPrimaryKey...), - ) + step := newEnvironmentToIncludedNetworkStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -1133,7 +729,6 @@ func HasEnvironmentToFinding() predicate.Environment { return predicate.Environment(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(EnvironmentToFindingTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, false, EnvironmentToFindingTable, EnvironmentToFindingColumn), ) sqlgraph.HasNeighbors(s, step) @@ -1143,11 +738,7 @@ func HasEnvironmentToFinding() predicate.Environment { // HasEnvironmentToFindingWith applies the HasEdge predicate on the "EnvironmentToFinding" edge with a given conditions (other predicates). func HasEnvironmentToFindingWith(preds ...predicate.Finding) predicate.Environment { return predicate.Environment(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(EnvironmentToFindingInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, EnvironmentToFindingTable, EnvironmentToFindingColumn), - ) + step := newEnvironmentToFindingStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -1161,7 +752,6 @@ func HasEnvironmentToDNSRecord() predicate.Environment { return predicate.Environment(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(EnvironmentToDNSRecordTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, false, EnvironmentToDNSRecordTable, EnvironmentToDNSRecordColumn), ) sqlgraph.HasNeighbors(s, step) @@ -1171,11 +761,7 @@ func HasEnvironmentToDNSRecord() predicate.Environment { // HasEnvironmentToDNSRecordWith applies the HasEdge predicate on the "EnvironmentToDNSRecord" edge with a given conditions (other predicates). func HasEnvironmentToDNSRecordWith(preds ...predicate.DNSRecord) predicate.Environment { return predicate.Environment(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(EnvironmentToDNSRecordInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, EnvironmentToDNSRecordTable, EnvironmentToDNSRecordColumn), - ) + step := newEnvironmentToDNSRecordStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -1189,7 +775,6 @@ func HasEnvironmentToDNS() predicate.Environment { return predicate.Environment(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(EnvironmentToDNSTable, FieldID), sqlgraph.Edge(sqlgraph.M2M, false, EnvironmentToDNSTable, EnvironmentToDNSPrimaryKey...), ) sqlgraph.HasNeighbors(s, step) @@ -1199,11 +784,7 @@ func HasEnvironmentToDNS() predicate.Environment { // HasEnvironmentToDNSWith applies the HasEdge predicate on the "EnvironmentToDNS" edge with a given conditions (other predicates). func HasEnvironmentToDNSWith(preds ...predicate.DNS) predicate.Environment { return predicate.Environment(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(EnvironmentToDNSInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2M, false, EnvironmentToDNSTable, EnvironmentToDNSPrimaryKey...), - ) + step := newEnvironmentToDNSStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -1217,7 +798,6 @@ func HasEnvironmentToNetwork() predicate.Environment { return predicate.Environment(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(EnvironmentToNetworkTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, false, EnvironmentToNetworkTable, EnvironmentToNetworkColumn), ) sqlgraph.HasNeighbors(s, step) @@ -1227,11 +807,7 @@ func HasEnvironmentToNetwork() predicate.Environment { // HasEnvironmentToNetworkWith applies the HasEdge predicate on the "EnvironmentToNetwork" edge with a given conditions (other predicates). func HasEnvironmentToNetworkWith(preds ...predicate.Network) predicate.Environment { return predicate.Environment(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(EnvironmentToNetworkInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, EnvironmentToNetworkTable, EnvironmentToNetworkColumn), - ) + step := newEnvironmentToNetworkStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -1245,7 +821,6 @@ func HasEnvironmentToHostDependency() predicate.Environment { return predicate.Environment(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(EnvironmentToHostDependencyTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, false, EnvironmentToHostDependencyTable, EnvironmentToHostDependencyColumn), ) sqlgraph.HasNeighbors(s, step) @@ -1255,11 +830,7 @@ func HasEnvironmentToHostDependency() predicate.Environment { // HasEnvironmentToHostDependencyWith applies the HasEdge predicate on the "EnvironmentToHostDependency" edge with a given conditions (other predicates). func HasEnvironmentToHostDependencyWith(preds ...predicate.HostDependency) predicate.Environment { return predicate.Environment(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(EnvironmentToHostDependencyInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, EnvironmentToHostDependencyTable, EnvironmentToHostDependencyColumn), - ) + step := newEnvironmentToHostDependencyStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -1273,7 +844,6 @@ func HasEnvironmentToAnsible() predicate.Environment { return predicate.Environment(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(EnvironmentToAnsibleTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, false, EnvironmentToAnsibleTable, EnvironmentToAnsibleColumn), ) sqlgraph.HasNeighbors(s, step) @@ -1283,11 +853,7 @@ func HasEnvironmentToAnsible() predicate.Environment { // HasEnvironmentToAnsibleWith applies the HasEdge predicate on the "EnvironmentToAnsible" edge with a given conditions (other predicates). func HasEnvironmentToAnsibleWith(preds ...predicate.Ansible) predicate.Environment { return predicate.Environment(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(EnvironmentToAnsibleInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, EnvironmentToAnsibleTable, EnvironmentToAnsibleColumn), - ) + step := newEnvironmentToAnsibleStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -1301,7 +867,6 @@ func HasEnvironmentToBuild() predicate.Environment { return predicate.Environment(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(EnvironmentToBuildTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, true, EnvironmentToBuildTable, EnvironmentToBuildColumn), ) sqlgraph.HasNeighbors(s, step) @@ -1311,11 +876,7 @@ func HasEnvironmentToBuild() predicate.Environment { // HasEnvironmentToBuildWith applies the HasEdge predicate on the "EnvironmentToBuild" edge with a given conditions (other predicates). func HasEnvironmentToBuildWith(preds ...predicate.Build) predicate.Environment { return predicate.Environment(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(EnvironmentToBuildInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, true, EnvironmentToBuildTable, EnvironmentToBuildColumn), - ) + step := newEnvironmentToBuildStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -1329,7 +890,6 @@ func HasEnvironmentToRepository() predicate.Environment { return predicate.Environment(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(EnvironmentToRepositoryTable, FieldID), sqlgraph.Edge(sqlgraph.M2M, true, EnvironmentToRepositoryTable, EnvironmentToRepositoryPrimaryKey...), ) sqlgraph.HasNeighbors(s, step) @@ -1339,11 +899,7 @@ func HasEnvironmentToRepository() predicate.Environment { // HasEnvironmentToRepositoryWith applies the HasEdge predicate on the "EnvironmentToRepository" edge with a given conditions (other predicates). func HasEnvironmentToRepositoryWith(preds ...predicate.Repository) predicate.Environment { return predicate.Environment(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(EnvironmentToRepositoryInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2M, true, EnvironmentToRepositoryTable, EnvironmentToRepositoryPrimaryKey...), - ) + step := newEnvironmentToRepositoryStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -1357,7 +913,6 @@ func HasEnvironmentToServerTask() predicate.Environment { return predicate.Environment(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(EnvironmentToServerTaskTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, true, EnvironmentToServerTaskTable, EnvironmentToServerTaskColumn), ) sqlgraph.HasNeighbors(s, step) @@ -1367,11 +922,7 @@ func HasEnvironmentToServerTask() predicate.Environment { // HasEnvironmentToServerTaskWith applies the HasEdge predicate on the "EnvironmentToServerTask" edge with a given conditions (other predicates). func HasEnvironmentToServerTaskWith(preds ...predicate.ServerTask) predicate.Environment { return predicate.Environment(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(EnvironmentToServerTaskInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, true, EnvironmentToServerTaskTable, EnvironmentToServerTaskColumn), - ) + step := newEnvironmentToServerTaskStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -1382,32 +933,15 @@ func HasEnvironmentToServerTaskWith(preds ...predicate.ServerTask) predicate.Env // And groups predicates with the AND operator between them. func And(predicates ...predicate.Environment) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Environment(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.Environment) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Environment(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.Environment) predicate.Environment { - return predicate.Environment(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.Environment(sql.NotPredicates(p)) } diff --git a/ent/environment_create.go b/ent/environment_create.go index b4b1dee7..3cce2af6 100755 --- a/ent/environment_create.go +++ b/ent/environment_create.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -39,9 +39,9 @@ type EnvironmentCreate struct { hooks []Hook } -// SetHclID sets the "hcl_id" field. -func (ec *EnvironmentCreate) SetHclID(s string) *EnvironmentCreate { - ec.mutation.SetHclID(s) +// SetHCLID sets the "hcl_id" field. +func (ec *EnvironmentCreate) SetHCLID(s string) *EnvironmentCreate { + ec.mutation.SetHCLID(s) return ec } @@ -411,44 +411,8 @@ func (ec *EnvironmentCreate) Mutation() *EnvironmentMutation { // Save creates the Environment in the database. func (ec *EnvironmentCreate) Save(ctx context.Context) (*Environment, error) { - var ( - err error - node *Environment - ) ec.defaults() - if len(ec.hooks) == 0 { - if err = ec.check(); err != nil { - return nil, err - } - node, err = ec.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*EnvironmentMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = ec.check(); err != nil { - return nil, err - } - ec.mutation = mutation - if node, err = ec.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(ec.hooks) - 1; i >= 0; i-- { - if ec.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = ec.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, ec.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, ec.sqlSave, ec.mutation, ec.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -483,7 +447,7 @@ func (ec *EnvironmentCreate) defaults() { // check runs all checks and user-defined validators on the builder. func (ec *EnvironmentCreate) check() error { - if _, ok := ec.mutation.HclID(); !ok { + if _, ok := ec.mutation.HCLID(); !ok { return &ValidationError{Name: "hcl_id", err: errors.New(`ent: missing required field "Environment.hcl_id"`)} } if _, ok := ec.mutation.CompetitionID(); !ok { @@ -520,10 +484,13 @@ func (ec *EnvironmentCreate) check() error { } func (ec *EnvironmentCreate) sqlSave(ctx context.Context) (*Environment, error) { + if err := ec.check(); err != nil { + return nil, err + } _node, _spec := ec.createSpec() if err := sqlgraph.CreateNode(ctx, ec.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } @@ -534,110 +501,62 @@ func (ec *EnvironmentCreate) sqlSave(ctx context.Context) (*Environment, error) return nil, err } } + ec.mutation.id = &_node.ID + ec.mutation.done = true return _node, nil } func (ec *EnvironmentCreate) createSpec() (*Environment, *sqlgraph.CreateSpec) { var ( _node = &Environment{config: ec.config} - _spec = &sqlgraph.CreateSpec{ - Table: environment.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(environment.Table, sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID)) ) if id, ok := ec.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } - if value, ok := ec.mutation.HclID(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: environment.FieldHclID, - }) - _node.HclID = value + if value, ok := ec.mutation.HCLID(); ok { + _spec.SetField(environment.FieldHCLID, field.TypeString, value) + _node.HCLID = value } if value, ok := ec.mutation.CompetitionID(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: environment.FieldCompetitionID, - }) + _spec.SetField(environment.FieldCompetitionID, field.TypeString, value) _node.CompetitionID = value } if value, ok := ec.mutation.Name(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: environment.FieldName, - }) + _spec.SetField(environment.FieldName, field.TypeString, value) _node.Name = value } if value, ok := ec.mutation.Description(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: environment.FieldDescription, - }) + _spec.SetField(environment.FieldDescription, field.TypeString, value) _node.Description = value } if value, ok := ec.mutation.Builder(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: environment.FieldBuilder, - }) + _spec.SetField(environment.FieldBuilder, field.TypeString, value) _node.Builder = value } if value, ok := ec.mutation.TeamCount(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: environment.FieldTeamCount, - }) + _spec.SetField(environment.FieldTeamCount, field.TypeInt, value) _node.TeamCount = value } if value, ok := ec.mutation.Revision(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: environment.FieldRevision, - }) + _spec.SetField(environment.FieldRevision, field.TypeInt, value) _node.Revision = value } if value, ok := ec.mutation.AdminCidrs(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: environment.FieldAdminCidrs, - }) + _spec.SetField(environment.FieldAdminCidrs, field.TypeJSON, value) _node.AdminCidrs = value } if value, ok := ec.mutation.ExposedVdiPorts(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: environment.FieldExposedVdiPorts, - }) + _spec.SetField(environment.FieldExposedVdiPorts, field.TypeJSON, value) _node.ExposedVdiPorts = value } if value, ok := ec.mutation.Config(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: environment.FieldConfig, - }) + _spec.SetField(environment.FieldConfig, field.TypeJSON, value) _node.Config = value } if value, ok := ec.mutation.Tags(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: environment.FieldTags, - }) + _spec.SetField(environment.FieldTags, field.TypeJSON, value) _node.Tags = value } if nodes := ec.mutation.EnvironmentToUserIDs(); len(nodes) > 0 { @@ -648,10 +567,7 @@ func (ec *EnvironmentCreate) createSpec() (*Environment, *sqlgraph.CreateSpec) { Columns: environment.EnvironmentToUserPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -667,10 +583,7 @@ func (ec *EnvironmentCreate) createSpec() (*Environment, *sqlgraph.CreateSpec) { Columns: []string{environment.EnvironmentToHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: host.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(host.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -686,10 +599,7 @@ func (ec *EnvironmentCreate) createSpec() (*Environment, *sqlgraph.CreateSpec) { Columns: []string{environment.EnvironmentToCompetitionColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: competition.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(competition.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -705,10 +615,7 @@ func (ec *EnvironmentCreate) createSpec() (*Environment, *sqlgraph.CreateSpec) { Columns: []string{environment.EnvironmentToIdentityColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: identity.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(identity.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -724,10 +631,7 @@ func (ec *EnvironmentCreate) createSpec() (*Environment, *sqlgraph.CreateSpec) { Columns: []string{environment.EnvironmentToCommandColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: command.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(command.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -743,10 +647,7 @@ func (ec *EnvironmentCreate) createSpec() (*Environment, *sqlgraph.CreateSpec) { Columns: []string{environment.EnvironmentToScriptColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: script.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(script.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -762,10 +663,7 @@ func (ec *EnvironmentCreate) createSpec() (*Environment, *sqlgraph.CreateSpec) { Columns: []string{environment.EnvironmentToFileDownloadColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: filedownload.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(filedownload.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -781,10 +679,7 @@ func (ec *EnvironmentCreate) createSpec() (*Environment, *sqlgraph.CreateSpec) { Columns: []string{environment.EnvironmentToFileDeleteColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: filedelete.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(filedelete.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -800,10 +695,7 @@ func (ec *EnvironmentCreate) createSpec() (*Environment, *sqlgraph.CreateSpec) { Columns: []string{environment.EnvironmentToFileExtractColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: fileextract.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(fileextract.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -819,10 +711,7 @@ func (ec *EnvironmentCreate) createSpec() (*Environment, *sqlgraph.CreateSpec) { Columns: environment.EnvironmentToIncludedNetworkPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: includednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(includednetwork.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -838,10 +727,7 @@ func (ec *EnvironmentCreate) createSpec() (*Environment, *sqlgraph.CreateSpec) { Columns: []string{environment.EnvironmentToFindingColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: finding.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(finding.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -857,10 +743,7 @@ func (ec *EnvironmentCreate) createSpec() (*Environment, *sqlgraph.CreateSpec) { Columns: []string{environment.EnvironmentToDNSRecordColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: dnsrecord.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(dnsrecord.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -876,10 +759,7 @@ func (ec *EnvironmentCreate) createSpec() (*Environment, *sqlgraph.CreateSpec) { Columns: environment.EnvironmentToDNSPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: dns.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(dns.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -895,10 +775,7 @@ func (ec *EnvironmentCreate) createSpec() (*Environment, *sqlgraph.CreateSpec) { Columns: []string{environment.EnvironmentToNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: network.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(network.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -914,10 +791,7 @@ func (ec *EnvironmentCreate) createSpec() (*Environment, *sqlgraph.CreateSpec) { Columns: []string{environment.EnvironmentToHostDependencyColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: hostdependency.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(hostdependency.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -933,10 +807,7 @@ func (ec *EnvironmentCreate) createSpec() (*Environment, *sqlgraph.CreateSpec) { Columns: []string{environment.EnvironmentToAnsibleColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: ansible.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(ansible.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -952,10 +823,7 @@ func (ec *EnvironmentCreate) createSpec() (*Environment, *sqlgraph.CreateSpec) { Columns: []string{environment.EnvironmentToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -971,10 +839,7 @@ func (ec *EnvironmentCreate) createSpec() (*Environment, *sqlgraph.CreateSpec) { Columns: environment.EnvironmentToRepositoryPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: repository.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(repository.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -990,10 +855,7 @@ func (ec *EnvironmentCreate) createSpec() (*Environment, *sqlgraph.CreateSpec) { Columns: []string{environment.EnvironmentToServerTaskColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: servertask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(servertask.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1007,11 +869,15 @@ func (ec *EnvironmentCreate) createSpec() (*Environment, *sqlgraph.CreateSpec) { // EnvironmentCreateBulk is the builder for creating many Environment entities in bulk. type EnvironmentCreateBulk struct { config + err error builders []*EnvironmentCreate } // Save creates the Environment entities in the database. func (ecb *EnvironmentCreateBulk) Save(ctx context.Context) ([]*Environment, error) { + if ecb.err != nil { + return nil, ecb.err + } specs := make([]*sqlgraph.CreateSpec, len(ecb.builders)) nodes := make([]*Environment, len(ecb.builders)) mutators := make([]Mutator, len(ecb.builders)) @@ -1028,8 +894,8 @@ func (ecb *EnvironmentCreateBulk) Save(ctx context.Context) ([]*Environment, err return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, ecb.builders[i+1].mutation) } else { @@ -1037,7 +903,7 @@ func (ecb *EnvironmentCreateBulk) Save(ctx context.Context) ([]*Environment, err // Invoke the actual operation on the latest mutation in the chain. if err = sqlgraph.BatchCreate(ctx, ecb.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } } } diff --git a/ent/environment_delete.go b/ent/environment_delete.go index fdb49f24..56b528ff 100755 --- a/ent/environment_delete.go +++ b/ent/environment_delete.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (ed *EnvironmentDelete) Where(ps ...predicate.Environment) *EnvironmentDele // Exec executes the deletion query and returns how many vertices were deleted. func (ed *EnvironmentDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(ed.hooks) == 0 { - affected, err = ed.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*EnvironmentMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - ed.mutation = mutation - affected, err = ed.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(ed.hooks) - 1; i >= 0; i-- { - if ed.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = ed.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, ed.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, ed.sqlExec, ed.mutation, ed.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (ed *EnvironmentDelete) ExecX(ctx context.Context) int { } func (ed *EnvironmentDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: environment.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(environment.Table, sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID)) if ps := ed.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -84,7 +48,12 @@ func (ed *EnvironmentDelete) sqlExec(ctx context.Context) (int, error) { } } } - return sqlgraph.DeleteNodes(ctx, ed.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, ed.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + ed.mutation.done = true + return affected, err } // EnvironmentDeleteOne is the builder for deleting a single Environment entity. @@ -92,6 +61,12 @@ type EnvironmentDeleteOne struct { ed *EnvironmentDelete } +// Where appends a list predicates to the EnvironmentDelete builder. +func (edo *EnvironmentDeleteOne) Where(ps ...predicate.Environment) *EnvironmentDeleteOne { + edo.ed.mutation.Where(ps...) + return edo +} + // Exec executes the deletion query. func (edo *EnvironmentDeleteOne) Exec(ctx context.Context) error { n, err := edo.ed.Exec(ctx) @@ -107,5 +82,7 @@ func (edo *EnvironmentDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (edo *EnvironmentDeleteOne) ExecX(ctx context.Context) { - edo.ed.ExecX(ctx) + if err := edo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/ent/environment_query.go b/ent/environment_query.go index 3fe9446d..24cc87b4 100755 --- a/ent/environment_query.go +++ b/ent/environment_query.go @@ -1,11 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" "database/sql/driver" - "errors" "fmt" "math" @@ -39,32 +38,50 @@ import ( // EnvironmentQuery is the builder for querying Environment entities. type EnvironmentQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string - predicates []predicate.Environment - // eager-loading edges. - withEnvironmentToUser *UserQuery - withEnvironmentToHost *HostQuery - withEnvironmentToCompetition *CompetitionQuery - withEnvironmentToIdentity *IdentityQuery - withEnvironmentToCommand *CommandQuery - withEnvironmentToScript *ScriptQuery - withEnvironmentToFileDownload *FileDownloadQuery - withEnvironmentToFileDelete *FileDeleteQuery - withEnvironmentToFileExtract *FileExtractQuery - withEnvironmentToIncludedNetwork *IncludedNetworkQuery - withEnvironmentToFinding *FindingQuery - withEnvironmentToDNSRecord *DNSRecordQuery - withEnvironmentToDNS *DNSQuery - withEnvironmentToNetwork *NetworkQuery - withEnvironmentToHostDependency *HostDependencyQuery - withEnvironmentToAnsible *AnsibleQuery - withEnvironmentToBuild *BuildQuery - withEnvironmentToRepository *RepositoryQuery - withEnvironmentToServerTask *ServerTaskQuery + ctx *QueryContext + order []environment.OrderOption + inters []Interceptor + predicates []predicate.Environment + withEnvironmentToUser *UserQuery + withEnvironmentToHost *HostQuery + withEnvironmentToCompetition *CompetitionQuery + withEnvironmentToIdentity *IdentityQuery + withEnvironmentToCommand *CommandQuery + withEnvironmentToScript *ScriptQuery + withEnvironmentToFileDownload *FileDownloadQuery + withEnvironmentToFileDelete *FileDeleteQuery + withEnvironmentToFileExtract *FileExtractQuery + withEnvironmentToIncludedNetwork *IncludedNetworkQuery + withEnvironmentToFinding *FindingQuery + withEnvironmentToDNSRecord *DNSRecordQuery + withEnvironmentToDNS *DNSQuery + withEnvironmentToNetwork *NetworkQuery + withEnvironmentToHostDependency *HostDependencyQuery + withEnvironmentToAnsible *AnsibleQuery + withEnvironmentToBuild *BuildQuery + withEnvironmentToRepository *RepositoryQuery + withEnvironmentToServerTask *ServerTaskQuery + modifiers []func(*sql.Selector) + loadTotal []func(context.Context, []*Environment) error + withNamedEnvironmentToUser map[string]*UserQuery + withNamedEnvironmentToHost map[string]*HostQuery + withNamedEnvironmentToCompetition map[string]*CompetitionQuery + withNamedEnvironmentToIdentity map[string]*IdentityQuery + withNamedEnvironmentToCommand map[string]*CommandQuery + withNamedEnvironmentToScript map[string]*ScriptQuery + withNamedEnvironmentToFileDownload map[string]*FileDownloadQuery + withNamedEnvironmentToFileDelete map[string]*FileDeleteQuery + withNamedEnvironmentToFileExtract map[string]*FileExtractQuery + withNamedEnvironmentToIncludedNetwork map[string]*IncludedNetworkQuery + withNamedEnvironmentToFinding map[string]*FindingQuery + withNamedEnvironmentToDNSRecord map[string]*DNSRecordQuery + withNamedEnvironmentToDNS map[string]*DNSQuery + withNamedEnvironmentToNetwork map[string]*NetworkQuery + withNamedEnvironmentToHostDependency map[string]*HostDependencyQuery + withNamedEnvironmentToAnsible map[string]*AnsibleQuery + withNamedEnvironmentToBuild map[string]*BuildQuery + withNamedEnvironmentToRepository map[string]*RepositoryQuery + withNamedEnvironmentToServerTask map[string]*ServerTaskQuery // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -76,34 +93,34 @@ func (eq *EnvironmentQuery) Where(ps ...predicate.Environment) *EnvironmentQuery return eq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (eq *EnvironmentQuery) Limit(limit int) *EnvironmentQuery { - eq.limit = &limit + eq.ctx.Limit = &limit return eq } -// Offset adds an offset step to the query. +// Offset to start from. func (eq *EnvironmentQuery) Offset(offset int) *EnvironmentQuery { - eq.offset = &offset + eq.ctx.Offset = &offset return eq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (eq *EnvironmentQuery) Unique(unique bool) *EnvironmentQuery { - eq.unique = &unique + eq.ctx.Unique = &unique return eq } -// Order adds an order step to the query. -func (eq *EnvironmentQuery) Order(o ...OrderFunc) *EnvironmentQuery { +// Order specifies how the records should be ordered. +func (eq *EnvironmentQuery) Order(o ...environment.OrderOption) *EnvironmentQuery { eq.order = append(eq.order, o...) return eq } // QueryEnvironmentToUser chains the current query on the "EnvironmentToUser" edge. func (eq *EnvironmentQuery) QueryEnvironmentToUser() *UserQuery { - query := &UserQuery{config: eq.config} + query := (&UserClient{config: eq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := eq.prepareQuery(ctx); err != nil { return nil, err @@ -125,7 +142,7 @@ func (eq *EnvironmentQuery) QueryEnvironmentToUser() *UserQuery { // QueryEnvironmentToHost chains the current query on the "EnvironmentToHost" edge. func (eq *EnvironmentQuery) QueryEnvironmentToHost() *HostQuery { - query := &HostQuery{config: eq.config} + query := (&HostClient{config: eq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := eq.prepareQuery(ctx); err != nil { return nil, err @@ -147,7 +164,7 @@ func (eq *EnvironmentQuery) QueryEnvironmentToHost() *HostQuery { // QueryEnvironmentToCompetition chains the current query on the "EnvironmentToCompetition" edge. func (eq *EnvironmentQuery) QueryEnvironmentToCompetition() *CompetitionQuery { - query := &CompetitionQuery{config: eq.config} + query := (&CompetitionClient{config: eq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := eq.prepareQuery(ctx); err != nil { return nil, err @@ -169,7 +186,7 @@ func (eq *EnvironmentQuery) QueryEnvironmentToCompetition() *CompetitionQuery { // QueryEnvironmentToIdentity chains the current query on the "EnvironmentToIdentity" edge. func (eq *EnvironmentQuery) QueryEnvironmentToIdentity() *IdentityQuery { - query := &IdentityQuery{config: eq.config} + query := (&IdentityClient{config: eq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := eq.prepareQuery(ctx); err != nil { return nil, err @@ -191,7 +208,7 @@ func (eq *EnvironmentQuery) QueryEnvironmentToIdentity() *IdentityQuery { // QueryEnvironmentToCommand chains the current query on the "EnvironmentToCommand" edge. func (eq *EnvironmentQuery) QueryEnvironmentToCommand() *CommandQuery { - query := &CommandQuery{config: eq.config} + query := (&CommandClient{config: eq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := eq.prepareQuery(ctx); err != nil { return nil, err @@ -213,7 +230,7 @@ func (eq *EnvironmentQuery) QueryEnvironmentToCommand() *CommandQuery { // QueryEnvironmentToScript chains the current query on the "EnvironmentToScript" edge. func (eq *EnvironmentQuery) QueryEnvironmentToScript() *ScriptQuery { - query := &ScriptQuery{config: eq.config} + query := (&ScriptClient{config: eq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := eq.prepareQuery(ctx); err != nil { return nil, err @@ -235,7 +252,7 @@ func (eq *EnvironmentQuery) QueryEnvironmentToScript() *ScriptQuery { // QueryEnvironmentToFileDownload chains the current query on the "EnvironmentToFileDownload" edge. func (eq *EnvironmentQuery) QueryEnvironmentToFileDownload() *FileDownloadQuery { - query := &FileDownloadQuery{config: eq.config} + query := (&FileDownloadClient{config: eq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := eq.prepareQuery(ctx); err != nil { return nil, err @@ -257,7 +274,7 @@ func (eq *EnvironmentQuery) QueryEnvironmentToFileDownload() *FileDownloadQuery // QueryEnvironmentToFileDelete chains the current query on the "EnvironmentToFileDelete" edge. func (eq *EnvironmentQuery) QueryEnvironmentToFileDelete() *FileDeleteQuery { - query := &FileDeleteQuery{config: eq.config} + query := (&FileDeleteClient{config: eq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := eq.prepareQuery(ctx); err != nil { return nil, err @@ -279,7 +296,7 @@ func (eq *EnvironmentQuery) QueryEnvironmentToFileDelete() *FileDeleteQuery { // QueryEnvironmentToFileExtract chains the current query on the "EnvironmentToFileExtract" edge. func (eq *EnvironmentQuery) QueryEnvironmentToFileExtract() *FileExtractQuery { - query := &FileExtractQuery{config: eq.config} + query := (&FileExtractClient{config: eq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := eq.prepareQuery(ctx); err != nil { return nil, err @@ -301,7 +318,7 @@ func (eq *EnvironmentQuery) QueryEnvironmentToFileExtract() *FileExtractQuery { // QueryEnvironmentToIncludedNetwork chains the current query on the "EnvironmentToIncludedNetwork" edge. func (eq *EnvironmentQuery) QueryEnvironmentToIncludedNetwork() *IncludedNetworkQuery { - query := &IncludedNetworkQuery{config: eq.config} + query := (&IncludedNetworkClient{config: eq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := eq.prepareQuery(ctx); err != nil { return nil, err @@ -323,7 +340,7 @@ func (eq *EnvironmentQuery) QueryEnvironmentToIncludedNetwork() *IncludedNetwork // QueryEnvironmentToFinding chains the current query on the "EnvironmentToFinding" edge. func (eq *EnvironmentQuery) QueryEnvironmentToFinding() *FindingQuery { - query := &FindingQuery{config: eq.config} + query := (&FindingClient{config: eq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := eq.prepareQuery(ctx); err != nil { return nil, err @@ -345,7 +362,7 @@ func (eq *EnvironmentQuery) QueryEnvironmentToFinding() *FindingQuery { // QueryEnvironmentToDNSRecord chains the current query on the "EnvironmentToDNSRecord" edge. func (eq *EnvironmentQuery) QueryEnvironmentToDNSRecord() *DNSRecordQuery { - query := &DNSRecordQuery{config: eq.config} + query := (&DNSRecordClient{config: eq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := eq.prepareQuery(ctx); err != nil { return nil, err @@ -367,7 +384,7 @@ func (eq *EnvironmentQuery) QueryEnvironmentToDNSRecord() *DNSRecordQuery { // QueryEnvironmentToDNS chains the current query on the "EnvironmentToDNS" edge. func (eq *EnvironmentQuery) QueryEnvironmentToDNS() *DNSQuery { - query := &DNSQuery{config: eq.config} + query := (&DNSClient{config: eq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := eq.prepareQuery(ctx); err != nil { return nil, err @@ -389,7 +406,7 @@ func (eq *EnvironmentQuery) QueryEnvironmentToDNS() *DNSQuery { // QueryEnvironmentToNetwork chains the current query on the "EnvironmentToNetwork" edge. func (eq *EnvironmentQuery) QueryEnvironmentToNetwork() *NetworkQuery { - query := &NetworkQuery{config: eq.config} + query := (&NetworkClient{config: eq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := eq.prepareQuery(ctx); err != nil { return nil, err @@ -411,7 +428,7 @@ func (eq *EnvironmentQuery) QueryEnvironmentToNetwork() *NetworkQuery { // QueryEnvironmentToHostDependency chains the current query on the "EnvironmentToHostDependency" edge. func (eq *EnvironmentQuery) QueryEnvironmentToHostDependency() *HostDependencyQuery { - query := &HostDependencyQuery{config: eq.config} + query := (&HostDependencyClient{config: eq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := eq.prepareQuery(ctx); err != nil { return nil, err @@ -433,7 +450,7 @@ func (eq *EnvironmentQuery) QueryEnvironmentToHostDependency() *HostDependencyQu // QueryEnvironmentToAnsible chains the current query on the "EnvironmentToAnsible" edge. func (eq *EnvironmentQuery) QueryEnvironmentToAnsible() *AnsibleQuery { - query := &AnsibleQuery{config: eq.config} + query := (&AnsibleClient{config: eq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := eq.prepareQuery(ctx); err != nil { return nil, err @@ -455,7 +472,7 @@ func (eq *EnvironmentQuery) QueryEnvironmentToAnsible() *AnsibleQuery { // QueryEnvironmentToBuild chains the current query on the "EnvironmentToBuild" edge. func (eq *EnvironmentQuery) QueryEnvironmentToBuild() *BuildQuery { - query := &BuildQuery{config: eq.config} + query := (&BuildClient{config: eq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := eq.prepareQuery(ctx); err != nil { return nil, err @@ -477,7 +494,7 @@ func (eq *EnvironmentQuery) QueryEnvironmentToBuild() *BuildQuery { // QueryEnvironmentToRepository chains the current query on the "EnvironmentToRepository" edge. func (eq *EnvironmentQuery) QueryEnvironmentToRepository() *RepositoryQuery { - query := &RepositoryQuery{config: eq.config} + query := (&RepositoryClient{config: eq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := eq.prepareQuery(ctx); err != nil { return nil, err @@ -499,7 +516,7 @@ func (eq *EnvironmentQuery) QueryEnvironmentToRepository() *RepositoryQuery { // QueryEnvironmentToServerTask chains the current query on the "EnvironmentToServerTask" edge. func (eq *EnvironmentQuery) QueryEnvironmentToServerTask() *ServerTaskQuery { - query := &ServerTaskQuery{config: eq.config} + query := (&ServerTaskClient{config: eq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := eq.prepareQuery(ctx); err != nil { return nil, err @@ -522,7 +539,7 @@ func (eq *EnvironmentQuery) QueryEnvironmentToServerTask() *ServerTaskQuery { // First returns the first Environment entity from the query. // Returns a *NotFoundError when no Environment was found. func (eq *EnvironmentQuery) First(ctx context.Context) (*Environment, error) { - nodes, err := eq.Limit(1).All(ctx) + nodes, err := eq.Limit(1).All(setContextOp(ctx, eq.ctx, "First")) if err != nil { return nil, err } @@ -545,7 +562,7 @@ func (eq *EnvironmentQuery) FirstX(ctx context.Context) *Environment { // Returns a *NotFoundError when no Environment ID was found. func (eq *EnvironmentQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = eq.Limit(1).IDs(ctx); err != nil { + if ids, err = eq.Limit(1).IDs(setContextOp(ctx, eq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -568,7 +585,7 @@ func (eq *EnvironmentQuery) FirstIDX(ctx context.Context) uuid.UUID { // Returns a *NotSingularError when more than one Environment entity is found. // Returns a *NotFoundError when no Environment entities are found. func (eq *EnvironmentQuery) Only(ctx context.Context) (*Environment, error) { - nodes, err := eq.Limit(2).All(ctx) + nodes, err := eq.Limit(2).All(setContextOp(ctx, eq.ctx, "Only")) if err != nil { return nil, err } @@ -596,7 +613,7 @@ func (eq *EnvironmentQuery) OnlyX(ctx context.Context) *Environment { // Returns a *NotFoundError when no entities are found. func (eq *EnvironmentQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = eq.Limit(2).IDs(ctx); err != nil { + if ids, err = eq.Limit(2).IDs(setContextOp(ctx, eq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -621,10 +638,12 @@ func (eq *EnvironmentQuery) OnlyIDX(ctx context.Context) uuid.UUID { // All executes the query and returns a list of Environments. func (eq *EnvironmentQuery) All(ctx context.Context) ([]*Environment, error) { + ctx = setContextOp(ctx, eq.ctx, "All") if err := eq.prepareQuery(ctx); err != nil { return nil, err } - return eq.sqlAll(ctx) + qr := querierAll[[]*Environment, *EnvironmentQuery]() + return withInterceptors[[]*Environment](ctx, eq, qr, eq.inters) } // AllX is like All, but panics if an error occurs. @@ -637,9 +656,12 @@ func (eq *EnvironmentQuery) AllX(ctx context.Context) []*Environment { } // IDs executes the query and returns a list of Environment IDs. -func (eq *EnvironmentQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { - var ids []uuid.UUID - if err := eq.Select(environment.FieldID).Scan(ctx, &ids); err != nil { +func (eq *EnvironmentQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if eq.ctx.Unique == nil && eq.path != nil { + eq.Unique(true) + } + ctx = setContextOp(ctx, eq.ctx, "IDs") + if err = eq.Select(environment.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -656,10 +678,11 @@ func (eq *EnvironmentQuery) IDsX(ctx context.Context) []uuid.UUID { // Count returns the count of the given query. func (eq *EnvironmentQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, eq.ctx, "Count") if err := eq.prepareQuery(ctx); err != nil { return 0, err } - return eq.sqlCount(ctx) + return withInterceptors[int](ctx, eq, querierCount[*EnvironmentQuery](), eq.inters) } // CountX is like Count, but panics if an error occurs. @@ -673,10 +696,15 @@ func (eq *EnvironmentQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (eq *EnvironmentQuery) Exist(ctx context.Context) (bool, error) { - if err := eq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, eq.ctx, "Exist") + switch _, err := eq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return eq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -696,9 +724,9 @@ func (eq *EnvironmentQuery) Clone() *EnvironmentQuery { } return &EnvironmentQuery{ config: eq.config, - limit: eq.limit, - offset: eq.offset, - order: append([]OrderFunc{}, eq.order...), + ctx: eq.ctx.Clone(), + order: append([]environment.OrderOption{}, eq.order...), + inters: append([]Interceptor{}, eq.inters...), predicates: append([]predicate.Environment{}, eq.predicates...), withEnvironmentToUser: eq.withEnvironmentToUser.Clone(), withEnvironmentToHost: eq.withEnvironmentToHost.Clone(), @@ -720,16 +748,15 @@ func (eq *EnvironmentQuery) Clone() *EnvironmentQuery { withEnvironmentToRepository: eq.withEnvironmentToRepository.Clone(), withEnvironmentToServerTask: eq.withEnvironmentToServerTask.Clone(), // clone intermediate query. - sql: eq.sql.Clone(), - path: eq.path, - unique: eq.unique, + sql: eq.sql.Clone(), + path: eq.path, } } // WithEnvironmentToUser tells the query-builder to eager-load the nodes that are connected to // the "EnvironmentToUser" edge. The optional arguments are used to configure the query builder of the edge. func (eq *EnvironmentQuery) WithEnvironmentToUser(opts ...func(*UserQuery)) *EnvironmentQuery { - query := &UserQuery{config: eq.config} + query := (&UserClient{config: eq.config}).Query() for _, opt := range opts { opt(query) } @@ -740,7 +767,7 @@ func (eq *EnvironmentQuery) WithEnvironmentToUser(opts ...func(*UserQuery)) *Env // WithEnvironmentToHost tells the query-builder to eager-load the nodes that are connected to // the "EnvironmentToHost" edge. The optional arguments are used to configure the query builder of the edge. func (eq *EnvironmentQuery) WithEnvironmentToHost(opts ...func(*HostQuery)) *EnvironmentQuery { - query := &HostQuery{config: eq.config} + query := (&HostClient{config: eq.config}).Query() for _, opt := range opts { opt(query) } @@ -751,7 +778,7 @@ func (eq *EnvironmentQuery) WithEnvironmentToHost(opts ...func(*HostQuery)) *Env // WithEnvironmentToCompetition tells the query-builder to eager-load the nodes that are connected to // the "EnvironmentToCompetition" edge. The optional arguments are used to configure the query builder of the edge. func (eq *EnvironmentQuery) WithEnvironmentToCompetition(opts ...func(*CompetitionQuery)) *EnvironmentQuery { - query := &CompetitionQuery{config: eq.config} + query := (&CompetitionClient{config: eq.config}).Query() for _, opt := range opts { opt(query) } @@ -762,7 +789,7 @@ func (eq *EnvironmentQuery) WithEnvironmentToCompetition(opts ...func(*Competiti // WithEnvironmentToIdentity tells the query-builder to eager-load the nodes that are connected to // the "EnvironmentToIdentity" edge. The optional arguments are used to configure the query builder of the edge. func (eq *EnvironmentQuery) WithEnvironmentToIdentity(opts ...func(*IdentityQuery)) *EnvironmentQuery { - query := &IdentityQuery{config: eq.config} + query := (&IdentityClient{config: eq.config}).Query() for _, opt := range opts { opt(query) } @@ -773,7 +800,7 @@ func (eq *EnvironmentQuery) WithEnvironmentToIdentity(opts ...func(*IdentityQuer // WithEnvironmentToCommand tells the query-builder to eager-load the nodes that are connected to // the "EnvironmentToCommand" edge. The optional arguments are used to configure the query builder of the edge. func (eq *EnvironmentQuery) WithEnvironmentToCommand(opts ...func(*CommandQuery)) *EnvironmentQuery { - query := &CommandQuery{config: eq.config} + query := (&CommandClient{config: eq.config}).Query() for _, opt := range opts { opt(query) } @@ -784,7 +811,7 @@ func (eq *EnvironmentQuery) WithEnvironmentToCommand(opts ...func(*CommandQuery) // WithEnvironmentToScript tells the query-builder to eager-load the nodes that are connected to // the "EnvironmentToScript" edge. The optional arguments are used to configure the query builder of the edge. func (eq *EnvironmentQuery) WithEnvironmentToScript(opts ...func(*ScriptQuery)) *EnvironmentQuery { - query := &ScriptQuery{config: eq.config} + query := (&ScriptClient{config: eq.config}).Query() for _, opt := range opts { opt(query) } @@ -795,7 +822,7 @@ func (eq *EnvironmentQuery) WithEnvironmentToScript(opts ...func(*ScriptQuery)) // WithEnvironmentToFileDownload tells the query-builder to eager-load the nodes that are connected to // the "EnvironmentToFileDownload" edge. The optional arguments are used to configure the query builder of the edge. func (eq *EnvironmentQuery) WithEnvironmentToFileDownload(opts ...func(*FileDownloadQuery)) *EnvironmentQuery { - query := &FileDownloadQuery{config: eq.config} + query := (&FileDownloadClient{config: eq.config}).Query() for _, opt := range opts { opt(query) } @@ -806,7 +833,7 @@ func (eq *EnvironmentQuery) WithEnvironmentToFileDownload(opts ...func(*FileDown // WithEnvironmentToFileDelete tells the query-builder to eager-load the nodes that are connected to // the "EnvironmentToFileDelete" edge. The optional arguments are used to configure the query builder of the edge. func (eq *EnvironmentQuery) WithEnvironmentToFileDelete(opts ...func(*FileDeleteQuery)) *EnvironmentQuery { - query := &FileDeleteQuery{config: eq.config} + query := (&FileDeleteClient{config: eq.config}).Query() for _, opt := range opts { opt(query) } @@ -817,7 +844,7 @@ func (eq *EnvironmentQuery) WithEnvironmentToFileDelete(opts ...func(*FileDelete // WithEnvironmentToFileExtract tells the query-builder to eager-load the nodes that are connected to // the "EnvironmentToFileExtract" edge. The optional arguments are used to configure the query builder of the edge. func (eq *EnvironmentQuery) WithEnvironmentToFileExtract(opts ...func(*FileExtractQuery)) *EnvironmentQuery { - query := &FileExtractQuery{config: eq.config} + query := (&FileExtractClient{config: eq.config}).Query() for _, opt := range opts { opt(query) } @@ -828,7 +855,7 @@ func (eq *EnvironmentQuery) WithEnvironmentToFileExtract(opts ...func(*FileExtra // WithEnvironmentToIncludedNetwork tells the query-builder to eager-load the nodes that are connected to // the "EnvironmentToIncludedNetwork" edge. The optional arguments are used to configure the query builder of the edge. func (eq *EnvironmentQuery) WithEnvironmentToIncludedNetwork(opts ...func(*IncludedNetworkQuery)) *EnvironmentQuery { - query := &IncludedNetworkQuery{config: eq.config} + query := (&IncludedNetworkClient{config: eq.config}).Query() for _, opt := range opts { opt(query) } @@ -839,7 +866,7 @@ func (eq *EnvironmentQuery) WithEnvironmentToIncludedNetwork(opts ...func(*Inclu // WithEnvironmentToFinding tells the query-builder to eager-load the nodes that are connected to // the "EnvironmentToFinding" edge. The optional arguments are used to configure the query builder of the edge. func (eq *EnvironmentQuery) WithEnvironmentToFinding(opts ...func(*FindingQuery)) *EnvironmentQuery { - query := &FindingQuery{config: eq.config} + query := (&FindingClient{config: eq.config}).Query() for _, opt := range opts { opt(query) } @@ -850,7 +877,7 @@ func (eq *EnvironmentQuery) WithEnvironmentToFinding(opts ...func(*FindingQuery) // WithEnvironmentToDNSRecord tells the query-builder to eager-load the nodes that are connected to // the "EnvironmentToDNSRecord" edge. The optional arguments are used to configure the query builder of the edge. func (eq *EnvironmentQuery) WithEnvironmentToDNSRecord(opts ...func(*DNSRecordQuery)) *EnvironmentQuery { - query := &DNSRecordQuery{config: eq.config} + query := (&DNSRecordClient{config: eq.config}).Query() for _, opt := range opts { opt(query) } @@ -861,7 +888,7 @@ func (eq *EnvironmentQuery) WithEnvironmentToDNSRecord(opts ...func(*DNSRecordQu // WithEnvironmentToDNS tells the query-builder to eager-load the nodes that are connected to // the "EnvironmentToDNS" edge. The optional arguments are used to configure the query builder of the edge. func (eq *EnvironmentQuery) WithEnvironmentToDNS(opts ...func(*DNSQuery)) *EnvironmentQuery { - query := &DNSQuery{config: eq.config} + query := (&DNSClient{config: eq.config}).Query() for _, opt := range opts { opt(query) } @@ -872,7 +899,7 @@ func (eq *EnvironmentQuery) WithEnvironmentToDNS(opts ...func(*DNSQuery)) *Envir // WithEnvironmentToNetwork tells the query-builder to eager-load the nodes that are connected to // the "EnvironmentToNetwork" edge. The optional arguments are used to configure the query builder of the edge. func (eq *EnvironmentQuery) WithEnvironmentToNetwork(opts ...func(*NetworkQuery)) *EnvironmentQuery { - query := &NetworkQuery{config: eq.config} + query := (&NetworkClient{config: eq.config}).Query() for _, opt := range opts { opt(query) } @@ -883,7 +910,7 @@ func (eq *EnvironmentQuery) WithEnvironmentToNetwork(opts ...func(*NetworkQuery) // WithEnvironmentToHostDependency tells the query-builder to eager-load the nodes that are connected to // the "EnvironmentToHostDependency" edge. The optional arguments are used to configure the query builder of the edge. func (eq *EnvironmentQuery) WithEnvironmentToHostDependency(opts ...func(*HostDependencyQuery)) *EnvironmentQuery { - query := &HostDependencyQuery{config: eq.config} + query := (&HostDependencyClient{config: eq.config}).Query() for _, opt := range opts { opt(query) } @@ -894,7 +921,7 @@ func (eq *EnvironmentQuery) WithEnvironmentToHostDependency(opts ...func(*HostDe // WithEnvironmentToAnsible tells the query-builder to eager-load the nodes that are connected to // the "EnvironmentToAnsible" edge. The optional arguments are used to configure the query builder of the edge. func (eq *EnvironmentQuery) WithEnvironmentToAnsible(opts ...func(*AnsibleQuery)) *EnvironmentQuery { - query := &AnsibleQuery{config: eq.config} + query := (&AnsibleClient{config: eq.config}).Query() for _, opt := range opts { opt(query) } @@ -905,7 +932,7 @@ func (eq *EnvironmentQuery) WithEnvironmentToAnsible(opts ...func(*AnsibleQuery) // WithEnvironmentToBuild tells the query-builder to eager-load the nodes that are connected to // the "EnvironmentToBuild" edge. The optional arguments are used to configure the query builder of the edge. func (eq *EnvironmentQuery) WithEnvironmentToBuild(opts ...func(*BuildQuery)) *EnvironmentQuery { - query := &BuildQuery{config: eq.config} + query := (&BuildClient{config: eq.config}).Query() for _, opt := range opts { opt(query) } @@ -916,7 +943,7 @@ func (eq *EnvironmentQuery) WithEnvironmentToBuild(opts ...func(*BuildQuery)) *E // WithEnvironmentToRepository tells the query-builder to eager-load the nodes that are connected to // the "EnvironmentToRepository" edge. The optional arguments are used to configure the query builder of the edge. func (eq *EnvironmentQuery) WithEnvironmentToRepository(opts ...func(*RepositoryQuery)) *EnvironmentQuery { - query := &RepositoryQuery{config: eq.config} + query := (&RepositoryClient{config: eq.config}).Query() for _, opt := range opts { opt(query) } @@ -927,7 +954,7 @@ func (eq *EnvironmentQuery) WithEnvironmentToRepository(opts ...func(*Repository // WithEnvironmentToServerTask tells the query-builder to eager-load the nodes that are connected to // the "EnvironmentToServerTask" edge. The optional arguments are used to configure the query builder of the edge. func (eq *EnvironmentQuery) WithEnvironmentToServerTask(opts ...func(*ServerTaskQuery)) *EnvironmentQuery { - query := &ServerTaskQuery{config: eq.config} + query := (&ServerTaskClient{config: eq.config}).Query() for _, opt := range opts { opt(query) } @@ -941,25 +968,21 @@ func (eq *EnvironmentQuery) WithEnvironmentToServerTask(opts ...func(*ServerTask // Example: // // var v []struct { -// HclID string `json:"hcl_id,omitempty" hcl:"id,label"` +// HCLID string `json:"hcl_id,omitempty" hcl:"id,label"` // Count int `json:"count,omitempty"` // } // // client.Environment.Query(). -// GroupBy(environment.FieldHclID). +// GroupBy(environment.FieldHCLID). // Aggregate(ent.Count()). // Scan(ctx, &v) -// func (eq *EnvironmentQuery) GroupBy(field string, fields ...string) *EnvironmentGroupBy { - group := &EnvironmentGroupBy{config: eq.config} - group.fields = append([]string{field}, fields...) - group.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := eq.prepareQuery(ctx); err != nil { - return nil, err - } - return eq.sqlQuery(ctx), nil - } - return group + eq.ctx.Fields = append([]string{field}, fields...) + grbuild := &EnvironmentGroupBy{build: eq} + grbuild.flds = &eq.ctx.Fields + grbuild.label = environment.Label + grbuild.scan = grbuild.Scan + return grbuild } // Select allows the selection one or more fields/columns for the given query, @@ -968,20 +991,37 @@ func (eq *EnvironmentQuery) GroupBy(field string, fields ...string) *Environment // Example: // // var v []struct { -// HclID string `json:"hcl_id,omitempty" hcl:"id,label"` +// HCLID string `json:"hcl_id,omitempty" hcl:"id,label"` // } // // client.Environment.Query(). -// Select(environment.FieldHclID). +// Select(environment.FieldHCLID). // Scan(ctx, &v) -// func (eq *EnvironmentQuery) Select(fields ...string) *EnvironmentSelect { - eq.fields = append(eq.fields, fields...) - return &EnvironmentSelect{EnvironmentQuery: eq} + eq.ctx.Fields = append(eq.ctx.Fields, fields...) + sbuild := &EnvironmentSelect{EnvironmentQuery: eq} + sbuild.label = environment.Label + sbuild.flds, sbuild.scan = &eq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a EnvironmentSelect configured with the given aggregations. +func (eq *EnvironmentQuery) Aggregate(fns ...AggregateFunc) *EnvironmentSelect { + return eq.Select().Aggregate(fns...) } func (eq *EnvironmentQuery) prepareQuery(ctx context.Context) error { - for _, f := range eq.fields { + for _, inter := range eq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, eq); err != nil { + return err + } + } + } + for _, f := range eq.ctx.Fields { if !environment.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -996,7 +1036,7 @@ func (eq *EnvironmentQuery) prepareQuery(ctx context.Context) error { return nil } -func (eq *EnvironmentQuery) sqlAll(ctx context.Context) ([]*Environment, error) { +func (eq *EnvironmentQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Environment, error) { var ( nodes = []*Environment{} _spec = eq.querySpec() @@ -1022,1302 +1062,1472 @@ func (eq *EnvironmentQuery) sqlAll(ctx context.Context) ([]*Environment, error) eq.withEnvironmentToServerTask != nil, } ) - _spec.ScanValues = func(columns []string) ([]interface{}, error) { + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Environment).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { node := &Environment{config: eq.config} nodes = append(nodes, node) - return node.scanValues(columns) - } - _spec.Assign = func(columns []string, values []interface{}) error { - if len(nodes) == 0 { - return fmt.Errorf("ent: Assign called without calling ScanValues") - } - node := nodes[len(nodes)-1] node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(eq.modifiers) > 0 { + _spec.Modifiers = eq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } if err := sqlgraph.QueryNodes(ctx, eq.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := eq.withEnvironmentToUser; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - ids := make(map[uuid.UUID]*Environment, len(nodes)) - for _, node := range nodes { - ids[node.ID] = node - fks = append(fks, node.ID) - node.Edges.EnvironmentToUser = []*User{} - } - var ( - edgeids []uuid.UUID - edges = make(map[uuid.UUID][]*Environment) - ) - _spec := &sqlgraph.EdgeQuerySpec{ - Edge: &sqlgraph.EdgeSpec{ - Inverse: false, - Table: environment.EnvironmentToUserTable, - Columns: environment.EnvironmentToUserPrimaryKey, - }, - Predicate: func(s *sql.Selector) { - s.Where(sql.InValues(environment.EnvironmentToUserPrimaryKey[0], fks...)) - }, - ScanValues: func() [2]interface{} { - return [2]interface{}{new(uuid.UUID), new(uuid.UUID)} - }, - Assign: func(out, in interface{}) error { - eout, ok := out.(*uuid.UUID) - if !ok || eout == nil { - return fmt.Errorf("unexpected id value for edge-out") - } - ein, ok := in.(*uuid.UUID) - if !ok || ein == nil { - return fmt.Errorf("unexpected id value for edge-in") - } - outValue := *eout - inValue := *ein - node, ok := ids[outValue] - if !ok { - return fmt.Errorf("unexpected node id in edges: %v", outValue) - } - if _, ok := edges[inValue]; !ok { - edgeids = append(edgeids, inValue) - } - edges[inValue] = append(edges[inValue], node) - return nil - }, - } - if err := sqlgraph.QueryEdges(ctx, eq.driver, _spec); err != nil { - return nil, fmt.Errorf(`query edges "EnvironmentToUser": %w`, err) - } - query.Where(user.IDIn(edgeids...)) - neighbors, err := query.All(ctx) - if err != nil { + if err := eq.loadEnvironmentToUser(ctx, query, nodes, + func(n *Environment) { n.Edges.EnvironmentToUser = []*User{} }, + func(n *Environment, e *User) { n.Edges.EnvironmentToUser = append(n.Edges.EnvironmentToUser, e) }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := edges[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected "EnvironmentToUser" node returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.EnvironmentToUser = append(nodes[i].Edges.EnvironmentToUser, n) - } - } } - if query := eq.withEnvironmentToHost; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*Environment) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.EnvironmentToHost = []*Host{} - } - query.withFKs = true - query.Where(predicate.Host(func(s *sql.Selector) { - s.Where(sql.InValues(environment.EnvironmentToHostColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { + if err := eq.loadEnvironmentToHost(ctx, query, nodes, + func(n *Environment) { n.Edges.EnvironmentToHost = []*Host{} }, + func(n *Environment, e *Host) { n.Edges.EnvironmentToHost = append(n.Edges.EnvironmentToHost, e) }); err != nil { return nil, err } - for _, n := range neighbors { - fk := n.environment_environment_to_host - if fk == nil { - return nil, fmt.Errorf(`foreign-key "environment_environment_to_host" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "environment_environment_to_host" returned %v for node %v`, *fk, n.ID) - } - node.Edges.EnvironmentToHost = append(node.Edges.EnvironmentToHost, n) - } } - if query := eq.withEnvironmentToCompetition; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*Environment) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.EnvironmentToCompetition = []*Competition{} - } - query.withFKs = true - query.Where(predicate.Competition(func(s *sql.Selector) { - s.Where(sql.InValues(environment.EnvironmentToCompetitionColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { + if err := eq.loadEnvironmentToCompetition(ctx, query, nodes, + func(n *Environment) { n.Edges.EnvironmentToCompetition = []*Competition{} }, + func(n *Environment, e *Competition) { + n.Edges.EnvironmentToCompetition = append(n.Edges.EnvironmentToCompetition, e) + }); err != nil { return nil, err } - for _, n := range neighbors { - fk := n.environment_environment_to_competition - if fk == nil { - return nil, fmt.Errorf(`foreign-key "environment_environment_to_competition" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "environment_environment_to_competition" returned %v for node %v`, *fk, n.ID) - } - node.Edges.EnvironmentToCompetition = append(node.Edges.EnvironmentToCompetition, n) - } } - if query := eq.withEnvironmentToIdentity; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*Environment) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.EnvironmentToIdentity = []*Identity{} - } - query.withFKs = true - query.Where(predicate.Identity(func(s *sql.Selector) { - s.Where(sql.InValues(environment.EnvironmentToIdentityColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { + if err := eq.loadEnvironmentToIdentity(ctx, query, nodes, + func(n *Environment) { n.Edges.EnvironmentToIdentity = []*Identity{} }, + func(n *Environment, e *Identity) { + n.Edges.EnvironmentToIdentity = append(n.Edges.EnvironmentToIdentity, e) + }); err != nil { return nil, err } - for _, n := range neighbors { - fk := n.environment_environment_to_identity - if fk == nil { - return nil, fmt.Errorf(`foreign-key "environment_environment_to_identity" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "environment_environment_to_identity" returned %v for node %v`, *fk, n.ID) - } - node.Edges.EnvironmentToIdentity = append(node.Edges.EnvironmentToIdentity, n) - } } - if query := eq.withEnvironmentToCommand; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*Environment) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.EnvironmentToCommand = []*Command{} - } - query.withFKs = true - query.Where(predicate.Command(func(s *sql.Selector) { - s.Where(sql.InValues(environment.EnvironmentToCommandColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { + if err := eq.loadEnvironmentToCommand(ctx, query, nodes, + func(n *Environment) { n.Edges.EnvironmentToCommand = []*Command{} }, + func(n *Environment, e *Command) { + n.Edges.EnvironmentToCommand = append(n.Edges.EnvironmentToCommand, e) + }); err != nil { return nil, err } - for _, n := range neighbors { - fk := n.environment_environment_to_command - if fk == nil { - return nil, fmt.Errorf(`foreign-key "environment_environment_to_command" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "environment_environment_to_command" returned %v for node %v`, *fk, n.ID) - } - node.Edges.EnvironmentToCommand = append(node.Edges.EnvironmentToCommand, n) - } } - if query := eq.withEnvironmentToScript; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*Environment) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.EnvironmentToScript = []*Script{} - } - query.withFKs = true - query.Where(predicate.Script(func(s *sql.Selector) { - s.Where(sql.InValues(environment.EnvironmentToScriptColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { + if err := eq.loadEnvironmentToScript(ctx, query, nodes, + func(n *Environment) { n.Edges.EnvironmentToScript = []*Script{} }, + func(n *Environment, e *Script) { n.Edges.EnvironmentToScript = append(n.Edges.EnvironmentToScript, e) }); err != nil { return nil, err } - for _, n := range neighbors { - fk := n.environment_environment_to_script - if fk == nil { - return nil, fmt.Errorf(`foreign-key "environment_environment_to_script" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "environment_environment_to_script" returned %v for node %v`, *fk, n.ID) - } - node.Edges.EnvironmentToScript = append(node.Edges.EnvironmentToScript, n) - } } - if query := eq.withEnvironmentToFileDownload; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*Environment) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.EnvironmentToFileDownload = []*FileDownload{} - } - query.withFKs = true - query.Where(predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.InValues(environment.EnvironmentToFileDownloadColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { + if err := eq.loadEnvironmentToFileDownload(ctx, query, nodes, + func(n *Environment) { n.Edges.EnvironmentToFileDownload = []*FileDownload{} }, + func(n *Environment, e *FileDownload) { + n.Edges.EnvironmentToFileDownload = append(n.Edges.EnvironmentToFileDownload, e) + }); err != nil { return nil, err } - for _, n := range neighbors { - fk := n.environment_environment_to_file_download - if fk == nil { - return nil, fmt.Errorf(`foreign-key "environment_environment_to_file_download" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "environment_environment_to_file_download" returned %v for node %v`, *fk, n.ID) - } - node.Edges.EnvironmentToFileDownload = append(node.Edges.EnvironmentToFileDownload, n) - } } - if query := eq.withEnvironmentToFileDelete; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*Environment) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.EnvironmentToFileDelete = []*FileDelete{} - } - query.withFKs = true - query.Where(predicate.FileDelete(func(s *sql.Selector) { - s.Where(sql.InValues(environment.EnvironmentToFileDeleteColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { + if err := eq.loadEnvironmentToFileDelete(ctx, query, nodes, + func(n *Environment) { n.Edges.EnvironmentToFileDelete = []*FileDelete{} }, + func(n *Environment, e *FileDelete) { + n.Edges.EnvironmentToFileDelete = append(n.Edges.EnvironmentToFileDelete, e) + }); err != nil { return nil, err } - for _, n := range neighbors { - fk := n.environment_environment_to_file_delete - if fk == nil { - return nil, fmt.Errorf(`foreign-key "environment_environment_to_file_delete" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "environment_environment_to_file_delete" returned %v for node %v`, *fk, n.ID) - } - node.Edges.EnvironmentToFileDelete = append(node.Edges.EnvironmentToFileDelete, n) - } } - if query := eq.withEnvironmentToFileExtract; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*Environment) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.EnvironmentToFileExtract = []*FileExtract{} - } - query.withFKs = true - query.Where(predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.InValues(environment.EnvironmentToFileExtractColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { + if err := eq.loadEnvironmentToFileExtract(ctx, query, nodes, + func(n *Environment) { n.Edges.EnvironmentToFileExtract = []*FileExtract{} }, + func(n *Environment, e *FileExtract) { + n.Edges.EnvironmentToFileExtract = append(n.Edges.EnvironmentToFileExtract, e) + }); err != nil { return nil, err } - for _, n := range neighbors { - fk := n.environment_environment_to_file_extract - if fk == nil { - return nil, fmt.Errorf(`foreign-key "environment_environment_to_file_extract" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "environment_environment_to_file_extract" returned %v for node %v`, *fk, n.ID) - } - node.Edges.EnvironmentToFileExtract = append(node.Edges.EnvironmentToFileExtract, n) - } } - if query := eq.withEnvironmentToIncludedNetwork; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - ids := make(map[uuid.UUID]*Environment, len(nodes)) - for _, node := range nodes { - ids[node.ID] = node - fks = append(fks, node.ID) - node.Edges.EnvironmentToIncludedNetwork = []*IncludedNetwork{} - } - var ( - edgeids []uuid.UUID - edges = make(map[uuid.UUID][]*Environment) - ) - _spec := &sqlgraph.EdgeQuerySpec{ - Edge: &sqlgraph.EdgeSpec{ - Inverse: false, - Table: environment.EnvironmentToIncludedNetworkTable, - Columns: environment.EnvironmentToIncludedNetworkPrimaryKey, - }, - Predicate: func(s *sql.Selector) { - s.Where(sql.InValues(environment.EnvironmentToIncludedNetworkPrimaryKey[0], fks...)) - }, - ScanValues: func() [2]interface{} { - return [2]interface{}{new(uuid.UUID), new(uuid.UUID)} - }, - Assign: func(out, in interface{}) error { - eout, ok := out.(*uuid.UUID) - if !ok || eout == nil { - return fmt.Errorf("unexpected id value for edge-out") - } - ein, ok := in.(*uuid.UUID) - if !ok || ein == nil { - return fmt.Errorf("unexpected id value for edge-in") - } - outValue := *eout - inValue := *ein - node, ok := ids[outValue] - if !ok { - return fmt.Errorf("unexpected node id in edges: %v", outValue) - } - if _, ok := edges[inValue]; !ok { - edgeids = append(edgeids, inValue) - } - edges[inValue] = append(edges[inValue], node) - return nil - }, - } - if err := sqlgraph.QueryEdges(ctx, eq.driver, _spec); err != nil { - return nil, fmt.Errorf(`query edges "EnvironmentToIncludedNetwork": %w`, err) - } - query.Where(includednetwork.IDIn(edgeids...)) - neighbors, err := query.All(ctx) - if err != nil { + if err := eq.loadEnvironmentToIncludedNetwork(ctx, query, nodes, + func(n *Environment) { n.Edges.EnvironmentToIncludedNetwork = []*IncludedNetwork{} }, + func(n *Environment, e *IncludedNetwork) { + n.Edges.EnvironmentToIncludedNetwork = append(n.Edges.EnvironmentToIncludedNetwork, e) + }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := edges[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected "EnvironmentToIncludedNetwork" node returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.EnvironmentToIncludedNetwork = append(nodes[i].Edges.EnvironmentToIncludedNetwork, n) - } - } } - if query := eq.withEnvironmentToFinding; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*Environment) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.EnvironmentToFinding = []*Finding{} - } - query.withFKs = true - query.Where(predicate.Finding(func(s *sql.Selector) { - s.Where(sql.InValues(environment.EnvironmentToFindingColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { + if err := eq.loadEnvironmentToFinding(ctx, query, nodes, + func(n *Environment) { n.Edges.EnvironmentToFinding = []*Finding{} }, + func(n *Environment, e *Finding) { + n.Edges.EnvironmentToFinding = append(n.Edges.EnvironmentToFinding, e) + }); err != nil { return nil, err } - for _, n := range neighbors { - fk := n.environment_environment_to_finding - if fk == nil { - return nil, fmt.Errorf(`foreign-key "environment_environment_to_finding" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "environment_environment_to_finding" returned %v for node %v`, *fk, n.ID) - } - node.Edges.EnvironmentToFinding = append(node.Edges.EnvironmentToFinding, n) - } } - if query := eq.withEnvironmentToDNSRecord; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*Environment) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.EnvironmentToDNSRecord = []*DNSRecord{} - } - query.withFKs = true - query.Where(predicate.DNSRecord(func(s *sql.Selector) { - s.Where(sql.InValues(environment.EnvironmentToDNSRecordColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { + if err := eq.loadEnvironmentToDNSRecord(ctx, query, nodes, + func(n *Environment) { n.Edges.EnvironmentToDNSRecord = []*DNSRecord{} }, + func(n *Environment, e *DNSRecord) { + n.Edges.EnvironmentToDNSRecord = append(n.Edges.EnvironmentToDNSRecord, e) + }); err != nil { return nil, err } - for _, n := range neighbors { - fk := n.environment_environment_to_dns_record - if fk == nil { - return nil, fmt.Errorf(`foreign-key "environment_environment_to_dns_record" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "environment_environment_to_dns_record" returned %v for node %v`, *fk, n.ID) - } - node.Edges.EnvironmentToDNSRecord = append(node.Edges.EnvironmentToDNSRecord, n) - } } - if query := eq.withEnvironmentToDNS; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - ids := make(map[uuid.UUID]*Environment, len(nodes)) - for _, node := range nodes { - ids[node.ID] = node - fks = append(fks, node.ID) - node.Edges.EnvironmentToDNS = []*DNS{} - } - var ( - edgeids []uuid.UUID - edges = make(map[uuid.UUID][]*Environment) - ) - _spec := &sqlgraph.EdgeQuerySpec{ - Edge: &sqlgraph.EdgeSpec{ - Inverse: false, - Table: environment.EnvironmentToDNSTable, - Columns: environment.EnvironmentToDNSPrimaryKey, - }, - Predicate: func(s *sql.Selector) { - s.Where(sql.InValues(environment.EnvironmentToDNSPrimaryKey[0], fks...)) - }, - ScanValues: func() [2]interface{} { - return [2]interface{}{new(uuid.UUID), new(uuid.UUID)} - }, - Assign: func(out, in interface{}) error { - eout, ok := out.(*uuid.UUID) - if !ok || eout == nil { - return fmt.Errorf("unexpected id value for edge-out") - } - ein, ok := in.(*uuid.UUID) - if !ok || ein == nil { - return fmt.Errorf("unexpected id value for edge-in") - } - outValue := *eout - inValue := *ein - node, ok := ids[outValue] - if !ok { - return fmt.Errorf("unexpected node id in edges: %v", outValue) - } - if _, ok := edges[inValue]; !ok { - edgeids = append(edgeids, inValue) - } - edges[inValue] = append(edges[inValue], node) - return nil - }, - } - if err := sqlgraph.QueryEdges(ctx, eq.driver, _spec); err != nil { - return nil, fmt.Errorf(`query edges "EnvironmentToDNS": %w`, err) - } - query.Where(dns.IDIn(edgeids...)) - neighbors, err := query.All(ctx) - if err != nil { + if err := eq.loadEnvironmentToDNS(ctx, query, nodes, + func(n *Environment) { n.Edges.EnvironmentToDNS = []*DNS{} }, + func(n *Environment, e *DNS) { n.Edges.EnvironmentToDNS = append(n.Edges.EnvironmentToDNS, e) }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := edges[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected "EnvironmentToDNS" node returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.EnvironmentToDNS = append(nodes[i].Edges.EnvironmentToDNS, n) - } - } } - if query := eq.withEnvironmentToNetwork; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*Environment) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.EnvironmentToNetwork = []*Network{} - } - query.withFKs = true - query.Where(predicate.Network(func(s *sql.Selector) { - s.Where(sql.InValues(environment.EnvironmentToNetworkColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { + if err := eq.loadEnvironmentToNetwork(ctx, query, nodes, + func(n *Environment) { n.Edges.EnvironmentToNetwork = []*Network{} }, + func(n *Environment, e *Network) { + n.Edges.EnvironmentToNetwork = append(n.Edges.EnvironmentToNetwork, e) + }); err != nil { return nil, err } - for _, n := range neighbors { - fk := n.environment_environment_to_network - if fk == nil { - return nil, fmt.Errorf(`foreign-key "environment_environment_to_network" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "environment_environment_to_network" returned %v for node %v`, *fk, n.ID) - } - node.Edges.EnvironmentToNetwork = append(node.Edges.EnvironmentToNetwork, n) - } } - if query := eq.withEnvironmentToHostDependency; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*Environment) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.EnvironmentToHostDependency = []*HostDependency{} - } - query.withFKs = true - query.Where(predicate.HostDependency(func(s *sql.Selector) { - s.Where(sql.InValues(environment.EnvironmentToHostDependencyColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { + if err := eq.loadEnvironmentToHostDependency(ctx, query, nodes, + func(n *Environment) { n.Edges.EnvironmentToHostDependency = []*HostDependency{} }, + func(n *Environment, e *HostDependency) { + n.Edges.EnvironmentToHostDependency = append(n.Edges.EnvironmentToHostDependency, e) + }); err != nil { return nil, err } - for _, n := range neighbors { - fk := n.environment_environment_to_host_dependency - if fk == nil { - return nil, fmt.Errorf(`foreign-key "environment_environment_to_host_dependency" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "environment_environment_to_host_dependency" returned %v for node %v`, *fk, n.ID) - } - node.Edges.EnvironmentToHostDependency = append(node.Edges.EnvironmentToHostDependency, n) - } } - if query := eq.withEnvironmentToAnsible; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*Environment) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.EnvironmentToAnsible = []*Ansible{} - } - query.withFKs = true - query.Where(predicate.Ansible(func(s *sql.Selector) { - s.Where(sql.InValues(environment.EnvironmentToAnsibleColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { + if err := eq.loadEnvironmentToAnsible(ctx, query, nodes, + func(n *Environment) { n.Edges.EnvironmentToAnsible = []*Ansible{} }, + func(n *Environment, e *Ansible) { + n.Edges.EnvironmentToAnsible = append(n.Edges.EnvironmentToAnsible, e) + }); err != nil { return nil, err } - for _, n := range neighbors { - fk := n.environment_environment_to_ansible - if fk == nil { - return nil, fmt.Errorf(`foreign-key "environment_environment_to_ansible" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "environment_environment_to_ansible" returned %v for node %v`, *fk, n.ID) - } - node.Edges.EnvironmentToAnsible = append(node.Edges.EnvironmentToAnsible, n) - } } - if query := eq.withEnvironmentToBuild; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*Environment) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.EnvironmentToBuild = []*Build{} - } - query.withFKs = true - query.Where(predicate.Build(func(s *sql.Selector) { - s.Where(sql.InValues(environment.EnvironmentToBuildColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { + if err := eq.loadEnvironmentToBuild(ctx, query, nodes, + func(n *Environment) { n.Edges.EnvironmentToBuild = []*Build{} }, + func(n *Environment, e *Build) { n.Edges.EnvironmentToBuild = append(n.Edges.EnvironmentToBuild, e) }); err != nil { return nil, err } - for _, n := range neighbors { - fk := n.build_build_to_environment - if fk == nil { - return nil, fmt.Errorf(`foreign-key "build_build_to_environment" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "build_build_to_environment" returned %v for node %v`, *fk, n.ID) - } - node.Edges.EnvironmentToBuild = append(node.Edges.EnvironmentToBuild, n) - } } - if query := eq.withEnvironmentToRepository; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - ids := make(map[uuid.UUID]*Environment, len(nodes)) - for _, node := range nodes { - ids[node.ID] = node - fks = append(fks, node.ID) - node.Edges.EnvironmentToRepository = []*Repository{} - } - var ( - edgeids []uuid.UUID - edges = make(map[uuid.UUID][]*Environment) - ) - _spec := &sqlgraph.EdgeQuerySpec{ - Edge: &sqlgraph.EdgeSpec{ - Inverse: true, - Table: environment.EnvironmentToRepositoryTable, - Columns: environment.EnvironmentToRepositoryPrimaryKey, - }, - Predicate: func(s *sql.Selector) { - s.Where(sql.InValues(environment.EnvironmentToRepositoryPrimaryKey[1], fks...)) - }, - ScanValues: func() [2]interface{} { - return [2]interface{}{new(uuid.UUID), new(uuid.UUID)} - }, - Assign: func(out, in interface{}) error { - eout, ok := out.(*uuid.UUID) - if !ok || eout == nil { - return fmt.Errorf("unexpected id value for edge-out") - } - ein, ok := in.(*uuid.UUID) - if !ok || ein == nil { - return fmt.Errorf("unexpected id value for edge-in") - } - outValue := *eout - inValue := *ein - node, ok := ids[outValue] - if !ok { - return fmt.Errorf("unexpected node id in edges: %v", outValue) - } - if _, ok := edges[inValue]; !ok { - edgeids = append(edgeids, inValue) - } - edges[inValue] = append(edges[inValue], node) - return nil - }, + if err := eq.loadEnvironmentToRepository(ctx, query, nodes, + func(n *Environment) { n.Edges.EnvironmentToRepository = []*Repository{} }, + func(n *Environment, e *Repository) { + n.Edges.EnvironmentToRepository = append(n.Edges.EnvironmentToRepository, e) + }); err != nil { + return nil, err } - if err := sqlgraph.QueryEdges(ctx, eq.driver, _spec); err != nil { - return nil, fmt.Errorf(`query edges "EnvironmentToRepository": %w`, err) + } + if query := eq.withEnvironmentToServerTask; query != nil { + if err := eq.loadEnvironmentToServerTask(ctx, query, nodes, + func(n *Environment) { n.Edges.EnvironmentToServerTask = []*ServerTask{} }, + func(n *Environment, e *ServerTask) { + n.Edges.EnvironmentToServerTask = append(n.Edges.EnvironmentToServerTask, e) + }); err != nil { + return nil, err } - query.Where(repository.IDIn(edgeids...)) - neighbors, err := query.All(ctx) - if err != nil { + } + for name, query := range eq.withNamedEnvironmentToUser { + if err := eq.loadEnvironmentToUser(ctx, query, nodes, + func(n *Environment) { n.appendNamedEnvironmentToUser(name) }, + func(n *Environment, e *User) { n.appendNamedEnvironmentToUser(name, e) }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := edges[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected "EnvironmentToRepository" node returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.EnvironmentToRepository = append(nodes[i].Edges.EnvironmentToRepository, n) - } + } + for name, query := range eq.withNamedEnvironmentToHost { + if err := eq.loadEnvironmentToHost(ctx, query, nodes, + func(n *Environment) { n.appendNamedEnvironmentToHost(name) }, + func(n *Environment, e *Host) { n.appendNamedEnvironmentToHost(name, e) }); err != nil { + return nil, err } } - - if query := eq.withEnvironmentToServerTask; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*Environment) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.EnvironmentToServerTask = []*ServerTask{} - } - query.withFKs = true - query.Where(predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.InValues(environment.EnvironmentToServerTaskColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { + for name, query := range eq.withNamedEnvironmentToCompetition { + if err := eq.loadEnvironmentToCompetition(ctx, query, nodes, + func(n *Environment) { n.appendNamedEnvironmentToCompetition(name) }, + func(n *Environment, e *Competition) { n.appendNamedEnvironmentToCompetition(name, e) }); err != nil { return nil, err } - for _, n := range neighbors { - fk := n.server_task_server_task_to_environment - if fk == nil { - return nil, fmt.Errorf(`foreign-key "server_task_server_task_to_environment" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "server_task_server_task_to_environment" returned %v for node %v`, *fk, n.ID) - } - node.Edges.EnvironmentToServerTask = append(node.Edges.EnvironmentToServerTask, n) + } + for name, query := range eq.withNamedEnvironmentToIdentity { + if err := eq.loadEnvironmentToIdentity(ctx, query, nodes, + func(n *Environment) { n.appendNamedEnvironmentToIdentity(name) }, + func(n *Environment, e *Identity) { n.appendNamedEnvironmentToIdentity(name, e) }); err != nil { + return nil, err } } - - return nodes, nil -} - -func (eq *EnvironmentQuery) sqlCount(ctx context.Context) (int, error) { - _spec := eq.querySpec() - _spec.Node.Columns = eq.fields - if len(eq.fields) > 0 { - _spec.Unique = eq.unique != nil && *eq.unique + for name, query := range eq.withNamedEnvironmentToCommand { + if err := eq.loadEnvironmentToCommand(ctx, query, nodes, + func(n *Environment) { n.appendNamedEnvironmentToCommand(name) }, + func(n *Environment, e *Command) { n.appendNamedEnvironmentToCommand(name, e) }); err != nil { + return nil, err + } } - return sqlgraph.CountNodes(ctx, eq.driver, _spec) -} - -func (eq *EnvironmentQuery) sqlExist(ctx context.Context) (bool, error) { - n, err := eq.sqlCount(ctx) - if err != nil { - return false, fmt.Errorf("ent: check existence: %w", err) + for name, query := range eq.withNamedEnvironmentToScript { + if err := eq.loadEnvironmentToScript(ctx, query, nodes, + func(n *Environment) { n.appendNamedEnvironmentToScript(name) }, + func(n *Environment, e *Script) { n.appendNamedEnvironmentToScript(name, e) }); err != nil { + return nil, err + } } - return n > 0, nil -} - -func (eq *EnvironmentQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: environment.Table, - Columns: environment.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, - }, - From: eq.sql, - Unique: true, - } - if unique := eq.unique; unique != nil { - _spec.Unique = *unique + for name, query := range eq.withNamedEnvironmentToFileDownload { + if err := eq.loadEnvironmentToFileDownload(ctx, query, nodes, + func(n *Environment) { n.appendNamedEnvironmentToFileDownload(name) }, + func(n *Environment, e *FileDownload) { n.appendNamedEnvironmentToFileDownload(name, e) }); err != nil { + return nil, err + } } - if fields := eq.fields; len(fields) > 0 { - _spec.Node.Columns = make([]string, 0, len(fields)) - _spec.Node.Columns = append(_spec.Node.Columns, environment.FieldID) - for i := range fields { - if fields[i] != environment.FieldID { - _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) - } + for name, query := range eq.withNamedEnvironmentToFileDelete { + if err := eq.loadEnvironmentToFileDelete(ctx, query, nodes, + func(n *Environment) { n.appendNamedEnvironmentToFileDelete(name) }, + func(n *Environment, e *FileDelete) { n.appendNamedEnvironmentToFileDelete(name, e) }); err != nil { + return nil, err } } - if ps := eq.predicates; len(ps) > 0 { - _spec.Predicate = func(selector *sql.Selector) { - for i := range ps { - ps[i](selector) - } + for name, query := range eq.withNamedEnvironmentToFileExtract { + if err := eq.loadEnvironmentToFileExtract(ctx, query, nodes, + func(n *Environment) { n.appendNamedEnvironmentToFileExtract(name) }, + func(n *Environment, e *FileExtract) { n.appendNamedEnvironmentToFileExtract(name, e) }); err != nil { + return nil, err } } - if limit := eq.limit; limit != nil { - _spec.Limit = *limit + for name, query := range eq.withNamedEnvironmentToIncludedNetwork { + if err := eq.loadEnvironmentToIncludedNetwork(ctx, query, nodes, + func(n *Environment) { n.appendNamedEnvironmentToIncludedNetwork(name) }, + func(n *Environment, e *IncludedNetwork) { n.appendNamedEnvironmentToIncludedNetwork(name, e) }); err != nil { + return nil, err + } } - if offset := eq.offset; offset != nil { - _spec.Offset = *offset + for name, query := range eq.withNamedEnvironmentToFinding { + if err := eq.loadEnvironmentToFinding(ctx, query, nodes, + func(n *Environment) { n.appendNamedEnvironmentToFinding(name) }, + func(n *Environment, e *Finding) { n.appendNamedEnvironmentToFinding(name, e) }); err != nil { + return nil, err + } } - if ps := eq.order; len(ps) > 0 { - _spec.Order = func(selector *sql.Selector) { - for i := range ps { - ps[i](selector) - } + for name, query := range eq.withNamedEnvironmentToDNSRecord { + if err := eq.loadEnvironmentToDNSRecord(ctx, query, nodes, + func(n *Environment) { n.appendNamedEnvironmentToDNSRecord(name) }, + func(n *Environment, e *DNSRecord) { n.appendNamedEnvironmentToDNSRecord(name, e) }); err != nil { + return nil, err } } - return _spec -} - -func (eq *EnvironmentQuery) sqlQuery(ctx context.Context) *sql.Selector { - builder := sql.Dialect(eq.driver.Dialect()) - t1 := builder.Table(environment.Table) - columns := eq.fields - if len(columns) == 0 { - columns = environment.Columns + for name, query := range eq.withNamedEnvironmentToDNS { + if err := eq.loadEnvironmentToDNS(ctx, query, nodes, + func(n *Environment) { n.appendNamedEnvironmentToDNS(name) }, + func(n *Environment, e *DNS) { n.appendNamedEnvironmentToDNS(name, e) }); err != nil { + return nil, err + } } - selector := builder.Select(t1.Columns(columns...)...).From(t1) - if eq.sql != nil { - selector = eq.sql - selector.Select(selector.Columns(columns...)...) + for name, query := range eq.withNamedEnvironmentToNetwork { + if err := eq.loadEnvironmentToNetwork(ctx, query, nodes, + func(n *Environment) { n.appendNamedEnvironmentToNetwork(name) }, + func(n *Environment, e *Network) { n.appendNamedEnvironmentToNetwork(name, e) }); err != nil { + return nil, err + } } - if eq.unique != nil && *eq.unique { - selector.Distinct() + for name, query := range eq.withNamedEnvironmentToHostDependency { + if err := eq.loadEnvironmentToHostDependency(ctx, query, nodes, + func(n *Environment) { n.appendNamedEnvironmentToHostDependency(name) }, + func(n *Environment, e *HostDependency) { n.appendNamedEnvironmentToHostDependency(name, e) }); err != nil { + return nil, err + } } - for _, p := range eq.predicates { - p(selector) + for name, query := range eq.withNamedEnvironmentToAnsible { + if err := eq.loadEnvironmentToAnsible(ctx, query, nodes, + func(n *Environment) { n.appendNamedEnvironmentToAnsible(name) }, + func(n *Environment, e *Ansible) { n.appendNamedEnvironmentToAnsible(name, e) }); err != nil { + return nil, err + } } - for _, p := range eq.order { - p(selector) + for name, query := range eq.withNamedEnvironmentToBuild { + if err := eq.loadEnvironmentToBuild(ctx, query, nodes, + func(n *Environment) { n.appendNamedEnvironmentToBuild(name) }, + func(n *Environment, e *Build) { n.appendNamedEnvironmentToBuild(name, e) }); err != nil { + return nil, err + } } - if offset := eq.offset; offset != nil { - // limit is mandatory for offset clause. We start - // with default value, and override it below if needed. - selector.Offset(*offset).Limit(math.MaxInt32) + for name, query := range eq.withNamedEnvironmentToRepository { + if err := eq.loadEnvironmentToRepository(ctx, query, nodes, + func(n *Environment) { n.appendNamedEnvironmentToRepository(name) }, + func(n *Environment, e *Repository) { n.appendNamedEnvironmentToRepository(name, e) }); err != nil { + return nil, err + } } - if limit := eq.limit; limit != nil { - selector.Limit(*limit) + for name, query := range eq.withNamedEnvironmentToServerTask { + if err := eq.loadEnvironmentToServerTask(ctx, query, nodes, + func(n *Environment) { n.appendNamedEnvironmentToServerTask(name) }, + func(n *Environment, e *ServerTask) { n.appendNamedEnvironmentToServerTask(name, e) }); err != nil { + return nil, err + } } - return selector -} - -// EnvironmentGroupBy is the group-by builder for Environment entities. -type EnvironmentGroupBy struct { - config - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) -} - -// Aggregate adds the given aggregation functions to the group-by query. -func (egb *EnvironmentGroupBy) Aggregate(fns ...AggregateFunc) *EnvironmentGroupBy { - egb.fns = append(egb.fns, fns...) - return egb + for i := range eq.loadTotal { + if err := eq.loadTotal[i](ctx, nodes); err != nil { + return nil, err + } + } + return nodes, nil } -// Scan applies the group-by query and scans the result into the given value. -func (egb *EnvironmentGroupBy) Scan(ctx context.Context, v interface{}) error { - query, err := egb.path(ctx) - if err != nil { +func (eq *EnvironmentQuery) loadEnvironmentToUser(ctx context.Context, query *UserQuery, nodes []*Environment, init func(*Environment), assign func(*Environment, *User)) error { + edgeIDs := make([]driver.Value, len(nodes)) + byID := make(map[uuid.UUID]*Environment) + nids := make(map[uuid.UUID]map[*Environment]struct{}) + for i, node := range nodes { + edgeIDs[i] = node.ID + byID[node.ID] = node + if init != nil { + init(node) + } + } + query.Where(func(s *sql.Selector) { + joinT := sql.Table(environment.EnvironmentToUserTable) + s.Join(joinT).On(s.C(user.FieldID), joinT.C(environment.EnvironmentToUserPrimaryKey[1])) + s.Where(sql.InValues(joinT.C(environment.EnvironmentToUserPrimaryKey[0]), edgeIDs...)) + columns := s.SelectedColumns() + s.Select(joinT.C(environment.EnvironmentToUserPrimaryKey[0])) + s.AppendSelect(columns...) + s.SetDistinct(false) + }) + if err := query.prepareQuery(ctx); err != nil { return err } - egb.sql = query - return egb.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (egb *EnvironmentGroupBy) ScanX(ctx context.Context, v interface{}) { - if err := egb.Scan(ctx, v); err != nil { - panic(err) + qr := QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + return query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) { + assign := spec.Assign + values := spec.ScanValues + spec.ScanValues = func(columns []string) ([]any, error) { + values, err := values(columns[1:]) + if err != nil { + return nil, err + } + return append([]any{new(uuid.UUID)}, values...), nil + } + spec.Assign = func(columns []string, values []any) error { + outValue := *values[0].(*uuid.UUID) + inValue := *values[1].(*uuid.UUID) + if nids[inValue] == nil { + nids[inValue] = map[*Environment]struct{}{byID[outValue]: {}} + return assign(columns[1:], values[1:]) + } + nids[inValue][byID[outValue]] = struct{}{} + return nil + } + }) + }) + neighbors, err := withInterceptors[[]*User](ctx, query, qr, query.inters) + if err != nil { + return err } + for _, n := range neighbors { + nodes, ok := nids[n.ID] + if !ok { + return fmt.Errorf(`unexpected "EnvironmentToUser" node returned %v`, n.ID) + } + for kn := range nodes { + assign(kn, n) + } + } + return nil } - -// Strings returns list of strings from group-by. -// It is only allowed when executing a group-by query with one field. -func (egb *EnvironmentGroupBy) Strings(ctx context.Context) ([]string, error) { - if len(egb.fields) > 1 { - return nil, errors.New("ent: EnvironmentGroupBy.Strings is not achievable when grouping more than 1 field") +func (eq *EnvironmentQuery) loadEnvironmentToHost(ctx context.Context, query *HostQuery, nodes []*Environment, init func(*Environment), assign func(*Environment, *Host)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Environment) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.Host(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(environment.EnvironmentToHostColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err } - var v []string - if err := egb.Scan(ctx, &v); err != nil { - return nil, err + for _, n := range neighbors { + fk := n.environment_environment_to_host + if fk == nil { + return fmt.Errorf(`foreign-key "environment_environment_to_host" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "environment_environment_to_host" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) } - return v, nil + return nil } - -// StringsX is like Strings, but panics if an error occurs. -func (egb *EnvironmentGroupBy) StringsX(ctx context.Context) []string { - v, err := egb.Strings(ctx) +func (eq *EnvironmentQuery) loadEnvironmentToCompetition(ctx context.Context, query *CompetitionQuery, nodes []*Environment, init func(*Environment), assign func(*Environment, *Competition)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Environment) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.Competition(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(environment.EnvironmentToCompetitionColumn), fks...)) + })) + neighbors, err := query.All(ctx) if err != nil { - panic(err) + return err + } + for _, n := range neighbors { + fk := n.environment_environment_to_competition + if fk == nil { + return fmt.Errorf(`foreign-key "environment_environment_to_competition" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "environment_environment_to_competition" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) } - return v + return nil } - -// String returns a single string from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (egb *EnvironmentGroupBy) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = egb.Strings(ctx); err != nil { - return +func (eq *EnvironmentQuery) loadEnvironmentToIdentity(ctx context.Context, query *IdentityQuery, nodes []*Environment, init func(*Environment), assign func(*Environment, *Identity)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Environment) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.Identity(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(environment.EnvironmentToIdentityColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{environment.Label} - default: - err = fmt.Errorf("ent: EnvironmentGroupBy.Strings returned %d results when one was expected", len(v)) + for _, n := range neighbors { + fk := n.environment_environment_to_identity + if fk == nil { + return fmt.Errorf(`foreign-key "environment_environment_to_identity" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "environment_environment_to_identity" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) } - return + return nil } - -// StringX is like String, but panics if an error occurs. -func (egb *EnvironmentGroupBy) StringX(ctx context.Context) string { - v, err := egb.String(ctx) +func (eq *EnvironmentQuery) loadEnvironmentToCommand(ctx context.Context, query *CommandQuery, nodes []*Environment, init func(*Environment), assign func(*Environment, *Command)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Environment) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.Command(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(environment.EnvironmentToCommandColumn), fks...)) + })) + neighbors, err := query.All(ctx) if err != nil { - panic(err) + return err } - return v + for _, n := range neighbors { + fk := n.environment_environment_to_command + if fk == nil { + return fmt.Errorf(`foreign-key "environment_environment_to_command" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "environment_environment_to_command" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil } - -// Ints returns list of ints from group-by. -// It is only allowed when executing a group-by query with one field. -func (egb *EnvironmentGroupBy) Ints(ctx context.Context) ([]int, error) { - if len(egb.fields) > 1 { - return nil, errors.New("ent: EnvironmentGroupBy.Ints is not achievable when grouping more than 1 field") +func (eq *EnvironmentQuery) loadEnvironmentToScript(ctx context.Context, query *ScriptQuery, nodes []*Environment, init func(*Environment), assign func(*Environment, *Script)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Environment) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.Script(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(environment.EnvironmentToScriptColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err } - var v []int - if err := egb.Scan(ctx, &v); err != nil { - return nil, err + for _, n := range neighbors { + fk := n.environment_environment_to_script + if fk == nil { + return fmt.Errorf(`foreign-key "environment_environment_to_script" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "environment_environment_to_script" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) } - return v, nil + return nil } - -// IntsX is like Ints, but panics if an error occurs. -func (egb *EnvironmentGroupBy) IntsX(ctx context.Context) []int { - v, err := egb.Ints(ctx) +func (eq *EnvironmentQuery) loadEnvironmentToFileDownload(ctx context.Context, query *FileDownloadQuery, nodes []*Environment, init func(*Environment), assign func(*Environment, *FileDownload)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Environment) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.FileDownload(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(environment.EnvironmentToFileDownloadColumn), fks...)) + })) + neighbors, err := query.All(ctx) if err != nil { - panic(err) + return err } - return v + for _, n := range neighbors { + fk := n.environment_environment_to_file_download + if fk == nil { + return fmt.Errorf(`foreign-key "environment_environment_to_file_download" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "environment_environment_to_file_download" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil } - -// Int returns a single int from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (egb *EnvironmentGroupBy) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = egb.Ints(ctx); err != nil { - return +func (eq *EnvironmentQuery) loadEnvironmentToFileDelete(ctx context.Context, query *FileDeleteQuery, nodes []*Environment, init func(*Environment), assign func(*Environment, *FileDelete)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Environment) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.FileDelete(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(environment.EnvironmentToFileDeleteColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{environment.Label} - default: - err = fmt.Errorf("ent: EnvironmentGroupBy.Ints returned %d results when one was expected", len(v)) + for _, n := range neighbors { + fk := n.environment_environment_to_file_delete + if fk == nil { + return fmt.Errorf(`foreign-key "environment_environment_to_file_delete" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "environment_environment_to_file_delete" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) } - return + return nil } - -// IntX is like Int, but panics if an error occurs. -func (egb *EnvironmentGroupBy) IntX(ctx context.Context) int { - v, err := egb.Int(ctx) +func (eq *EnvironmentQuery) loadEnvironmentToFileExtract(ctx context.Context, query *FileExtractQuery, nodes []*Environment, init func(*Environment), assign func(*Environment, *FileExtract)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Environment) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.FileExtract(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(environment.EnvironmentToFileExtractColumn), fks...)) + })) + neighbors, err := query.All(ctx) if err != nil { - panic(err) + return err + } + for _, n := range neighbors { + fk := n.environment_environment_to_file_extract + if fk == nil { + return fmt.Errorf(`foreign-key "environment_environment_to_file_extract" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "environment_environment_to_file_extract" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) } - return v + return nil } - -// Float64s returns list of float64s from group-by. -// It is only allowed when executing a group-by query with one field. -func (egb *EnvironmentGroupBy) Float64s(ctx context.Context) ([]float64, error) { - if len(egb.fields) > 1 { - return nil, errors.New("ent: EnvironmentGroupBy.Float64s is not achievable when grouping more than 1 field") +func (eq *EnvironmentQuery) loadEnvironmentToIncludedNetwork(ctx context.Context, query *IncludedNetworkQuery, nodes []*Environment, init func(*Environment), assign func(*Environment, *IncludedNetwork)) error { + edgeIDs := make([]driver.Value, len(nodes)) + byID := make(map[uuid.UUID]*Environment) + nids := make(map[uuid.UUID]map[*Environment]struct{}) + for i, node := range nodes { + edgeIDs[i] = node.ID + byID[node.ID] = node + if init != nil { + init(node) + } + } + query.Where(func(s *sql.Selector) { + joinT := sql.Table(environment.EnvironmentToIncludedNetworkTable) + s.Join(joinT).On(s.C(includednetwork.FieldID), joinT.C(environment.EnvironmentToIncludedNetworkPrimaryKey[1])) + s.Where(sql.InValues(joinT.C(environment.EnvironmentToIncludedNetworkPrimaryKey[0]), edgeIDs...)) + columns := s.SelectedColumns() + s.Select(joinT.C(environment.EnvironmentToIncludedNetworkPrimaryKey[0])) + s.AppendSelect(columns...) + s.SetDistinct(false) + }) + if err := query.prepareQuery(ctx); err != nil { + return err } - var v []float64 - if err := egb.Scan(ctx, &v); err != nil { - return nil, err + qr := QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + return query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) { + assign := spec.Assign + values := spec.ScanValues + spec.ScanValues = func(columns []string) ([]any, error) { + values, err := values(columns[1:]) + if err != nil { + return nil, err + } + return append([]any{new(uuid.UUID)}, values...), nil + } + spec.Assign = func(columns []string, values []any) error { + outValue := *values[0].(*uuid.UUID) + inValue := *values[1].(*uuid.UUID) + if nids[inValue] == nil { + nids[inValue] = map[*Environment]struct{}{byID[outValue]: {}} + return assign(columns[1:], values[1:]) + } + nids[inValue][byID[outValue]] = struct{}{} + return nil + } + }) + }) + neighbors, err := withInterceptors[[]*IncludedNetwork](ctx, query, qr, query.inters) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nids[n.ID] + if !ok { + return fmt.Errorf(`unexpected "EnvironmentToIncludedNetwork" node returned %v`, n.ID) + } + for kn := range nodes { + assign(kn, n) + } } - return v, nil + return nil } - -// Float64sX is like Float64s, but panics if an error occurs. -func (egb *EnvironmentGroupBy) Float64sX(ctx context.Context) []float64 { - v, err := egb.Float64s(ctx) +func (eq *EnvironmentQuery) loadEnvironmentToFinding(ctx context.Context, query *FindingQuery, nodes []*Environment, init func(*Environment), assign func(*Environment, *Finding)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Environment) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.Finding(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(environment.EnvironmentToFindingColumn), fks...)) + })) + neighbors, err := query.All(ctx) if err != nil { - panic(err) + return err } - return v + for _, n := range neighbors { + fk := n.environment_environment_to_finding + if fk == nil { + return fmt.Errorf(`foreign-key "environment_environment_to_finding" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "environment_environment_to_finding" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil } - -// Float64 returns a single float64 from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (egb *EnvironmentGroupBy) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = egb.Float64s(ctx); err != nil { - return +func (eq *EnvironmentQuery) loadEnvironmentToDNSRecord(ctx context.Context, query *DNSRecordQuery, nodes []*Environment, init func(*Environment), assign func(*Environment, *DNSRecord)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Environment) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.DNSRecord(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(environment.EnvironmentToDNSRecordColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{environment.Label} - default: - err = fmt.Errorf("ent: EnvironmentGroupBy.Float64s returned %d results when one was expected", len(v)) + for _, n := range neighbors { + fk := n.environment_environment_to_dns_record + if fk == nil { + return fmt.Errorf(`foreign-key "environment_environment_to_dns_record" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "environment_environment_to_dns_record" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) } - return + return nil } - -// Float64X is like Float64, but panics if an error occurs. -func (egb *EnvironmentGroupBy) Float64X(ctx context.Context) float64 { - v, err := egb.Float64(ctx) +func (eq *EnvironmentQuery) loadEnvironmentToDNS(ctx context.Context, query *DNSQuery, nodes []*Environment, init func(*Environment), assign func(*Environment, *DNS)) error { + edgeIDs := make([]driver.Value, len(nodes)) + byID := make(map[uuid.UUID]*Environment) + nids := make(map[uuid.UUID]map[*Environment]struct{}) + for i, node := range nodes { + edgeIDs[i] = node.ID + byID[node.ID] = node + if init != nil { + init(node) + } + } + query.Where(func(s *sql.Selector) { + joinT := sql.Table(environment.EnvironmentToDNSTable) + s.Join(joinT).On(s.C(dns.FieldID), joinT.C(environment.EnvironmentToDNSPrimaryKey[1])) + s.Where(sql.InValues(joinT.C(environment.EnvironmentToDNSPrimaryKey[0]), edgeIDs...)) + columns := s.SelectedColumns() + s.Select(joinT.C(environment.EnvironmentToDNSPrimaryKey[0])) + s.AppendSelect(columns...) + s.SetDistinct(false) + }) + if err := query.prepareQuery(ctx); err != nil { + return err + } + qr := QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + return query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) { + assign := spec.Assign + values := spec.ScanValues + spec.ScanValues = func(columns []string) ([]any, error) { + values, err := values(columns[1:]) + if err != nil { + return nil, err + } + return append([]any{new(uuid.UUID)}, values...), nil + } + spec.Assign = func(columns []string, values []any) error { + outValue := *values[0].(*uuid.UUID) + inValue := *values[1].(*uuid.UUID) + if nids[inValue] == nil { + nids[inValue] = map[*Environment]struct{}{byID[outValue]: {}} + return assign(columns[1:], values[1:]) + } + nids[inValue][byID[outValue]] = struct{}{} + return nil + } + }) + }) + neighbors, err := withInterceptors[[]*DNS](ctx, query, qr, query.inters) if err != nil { - panic(err) + return err } - return v + for _, n := range neighbors { + nodes, ok := nids[n.ID] + if !ok { + return fmt.Errorf(`unexpected "EnvironmentToDNS" node returned %v`, n.ID) + } + for kn := range nodes { + assign(kn, n) + } + } + return nil } - -// Bools returns list of bools from group-by. -// It is only allowed when executing a group-by query with one field. -func (egb *EnvironmentGroupBy) Bools(ctx context.Context) ([]bool, error) { - if len(egb.fields) > 1 { - return nil, errors.New("ent: EnvironmentGroupBy.Bools is not achievable when grouping more than 1 field") +func (eq *EnvironmentQuery) loadEnvironmentToNetwork(ctx context.Context, query *NetworkQuery, nodes []*Environment, init func(*Environment), assign func(*Environment, *Network)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Environment) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.Network(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(environment.EnvironmentToNetworkColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err } - var v []bool - if err := egb.Scan(ctx, &v); err != nil { - return nil, err + for _, n := range neighbors { + fk := n.environment_environment_to_network + if fk == nil { + return fmt.Errorf(`foreign-key "environment_environment_to_network" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "environment_environment_to_network" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) } - return v, nil + return nil } - -// BoolsX is like Bools, but panics if an error occurs. -func (egb *EnvironmentGroupBy) BoolsX(ctx context.Context) []bool { - v, err := egb.Bools(ctx) +func (eq *EnvironmentQuery) loadEnvironmentToHostDependency(ctx context.Context, query *HostDependencyQuery, nodes []*Environment, init func(*Environment), assign func(*Environment, *HostDependency)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Environment) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.HostDependency(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(environment.EnvironmentToHostDependencyColumn), fks...)) + })) + neighbors, err := query.All(ctx) if err != nil { - panic(err) + return err } - return v + for _, n := range neighbors { + fk := n.environment_environment_to_host_dependency + if fk == nil { + return fmt.Errorf(`foreign-key "environment_environment_to_host_dependency" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "environment_environment_to_host_dependency" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil } - -// Bool returns a single bool from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (egb *EnvironmentGroupBy) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = egb.Bools(ctx); err != nil { - return +func (eq *EnvironmentQuery) loadEnvironmentToAnsible(ctx context.Context, query *AnsibleQuery, nodes []*Environment, init func(*Environment), assign func(*Environment, *Ansible)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Environment) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.Ansible(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(environment.EnvironmentToAnsibleColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{environment.Label} - default: - err = fmt.Errorf("ent: EnvironmentGroupBy.Bools returned %d results when one was expected", len(v)) + for _, n := range neighbors { + fk := n.environment_environment_to_ansible + if fk == nil { + return fmt.Errorf(`foreign-key "environment_environment_to_ansible" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "environment_environment_to_ansible" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) } - return + return nil } - -// BoolX is like Bool, but panics if an error occurs. -func (egb *EnvironmentGroupBy) BoolX(ctx context.Context) bool { - v, err := egb.Bool(ctx) +func (eq *EnvironmentQuery) loadEnvironmentToBuild(ctx context.Context, query *BuildQuery, nodes []*Environment, init func(*Environment), assign func(*Environment, *Build)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Environment) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.Build(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(environment.EnvironmentToBuildColumn), fks...)) + })) + neighbors, err := query.All(ctx) if err != nil { - panic(err) + return err } - return v -} - -func (egb *EnvironmentGroupBy) sqlScan(ctx context.Context, v interface{}) error { - for _, f := range egb.fields { - if !environment.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + for _, n := range neighbors { + fk := n.build_build_to_environment + if fk == nil { + return fmt.Errorf(`foreign-key "build_build_to_environment" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "build_build_to_environment" returned %v for node %v`, *fk, n.ID) } + assign(node, n) } - selector := egb.sqlQuery() - if err := selector.Err(); err != nil { + return nil +} +func (eq *EnvironmentQuery) loadEnvironmentToRepository(ctx context.Context, query *RepositoryQuery, nodes []*Environment, init func(*Environment), assign func(*Environment, *Repository)) error { + edgeIDs := make([]driver.Value, len(nodes)) + byID := make(map[uuid.UUID]*Environment) + nids := make(map[uuid.UUID]map[*Environment]struct{}) + for i, node := range nodes { + edgeIDs[i] = node.ID + byID[node.ID] = node + if init != nil { + init(node) + } + } + query.Where(func(s *sql.Selector) { + joinT := sql.Table(environment.EnvironmentToRepositoryTable) + s.Join(joinT).On(s.C(repository.FieldID), joinT.C(environment.EnvironmentToRepositoryPrimaryKey[0])) + s.Where(sql.InValues(joinT.C(environment.EnvironmentToRepositoryPrimaryKey[1]), edgeIDs...)) + columns := s.SelectedColumns() + s.Select(joinT.C(environment.EnvironmentToRepositoryPrimaryKey[1])) + s.AppendSelect(columns...) + s.SetDistinct(false) + }) + if err := query.prepareQuery(ctx); err != nil { return err } - rows := &sql.Rows{} - query, args := selector.Query() - if err := egb.driver.Query(ctx, query, args, rows); err != nil { + qr := QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + return query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) { + assign := spec.Assign + values := spec.ScanValues + spec.ScanValues = func(columns []string) ([]any, error) { + values, err := values(columns[1:]) + if err != nil { + return nil, err + } + return append([]any{new(uuid.UUID)}, values...), nil + } + spec.Assign = func(columns []string, values []any) error { + outValue := *values[0].(*uuid.UUID) + inValue := *values[1].(*uuid.UUID) + if nids[inValue] == nil { + nids[inValue] = map[*Environment]struct{}{byID[outValue]: {}} + return assign(columns[1:], values[1:]) + } + nids[inValue][byID[outValue]] = struct{}{} + return nil + } + }) + }) + neighbors, err := withInterceptors[[]*Repository](ctx, query, qr, query.inters) + if err != nil { return err } - defer rows.Close() - return sql.ScanSlice(rows, v) + for _, n := range neighbors { + nodes, ok := nids[n.ID] + if !ok { + return fmt.Errorf(`unexpected "EnvironmentToRepository" node returned %v`, n.ID) + } + for kn := range nodes { + assign(kn, n) + } + } + return nil +} +func (eq *EnvironmentQuery) loadEnvironmentToServerTask(ctx context.Context, query *ServerTaskQuery, nodes []*Environment, init func(*Environment), assign func(*Environment, *ServerTask)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Environment) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.ServerTask(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(environment.EnvironmentToServerTaskColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.server_task_server_task_to_environment + if fk == nil { + return fmt.Errorf(`foreign-key "server_task_server_task_to_environment" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "server_task_server_task_to_environment" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil } -func (egb *EnvironmentGroupBy) sqlQuery() *sql.Selector { - selector := egb.sql.Select() - aggregation := make([]string, 0, len(egb.fns)) - for _, fn := range egb.fns { - aggregation = append(aggregation, fn(selector)) +func (eq *EnvironmentQuery) sqlCount(ctx context.Context) (int, error) { + _spec := eq.querySpec() + if len(eq.modifiers) > 0 { + _spec.Modifiers = eq.modifiers } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(egb.fields)+len(egb.fns)) - for _, f := range egb.fields { - columns = append(columns, selector.C(f)) + _spec.Node.Columns = eq.ctx.Fields + if len(eq.ctx.Fields) > 0 { + _spec.Unique = eq.ctx.Unique != nil && *eq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, eq.driver, _spec) +} + +func (eq *EnvironmentQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(environment.Table, environment.Columns, sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID)) + _spec.From = eq.sql + if unique := eq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if eq.path != nil { + _spec.Unique = true + } + if fields := eq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, environment.FieldID) + for i := range fields { + if fields[i] != environment.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := eq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } } - columns = append(columns, aggregation...) - selector.Select(columns...) } - return selector.GroupBy(selector.Columns(egb.fields...)...) + if limit := eq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := eq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := eq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec } -// EnvironmentSelect is the builder for selecting fields of Environment entities. -type EnvironmentSelect struct { - *EnvironmentQuery - // intermediate query (i.e. traversal path). - sql *sql.Selector +func (eq *EnvironmentQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(eq.driver.Dialect()) + t1 := builder.Table(environment.Table) + columns := eq.ctx.Fields + if len(columns) == 0 { + columns = environment.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if eq.sql != nil { + selector = eq.sql + selector.Select(selector.Columns(columns...)...) + } + if eq.ctx.Unique != nil && *eq.ctx.Unique { + selector.Distinct() + } + for _, p := range eq.predicates { + p(selector) + } + for _, p := range eq.order { + p(selector) + } + if offset := eq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := eq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector } -// Scan applies the selector query and scans the result into the given value. -func (es *EnvironmentSelect) Scan(ctx context.Context, v interface{}) error { - if err := es.prepareQuery(ctx); err != nil { - return err +// WithNamedEnvironmentToUser tells the query-builder to eager-load the nodes that are connected to the "EnvironmentToUser" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (eq *EnvironmentQuery) WithNamedEnvironmentToUser(name string, opts ...func(*UserQuery)) *EnvironmentQuery { + query := (&UserClient{config: eq.config}).Query() + for _, opt := range opts { + opt(query) + } + if eq.withNamedEnvironmentToUser == nil { + eq.withNamedEnvironmentToUser = make(map[string]*UserQuery) } - es.sql = es.EnvironmentQuery.sqlQuery(ctx) - return es.sqlScan(ctx, v) + eq.withNamedEnvironmentToUser[name] = query + return eq } -// ScanX is like Scan, but panics if an error occurs. -func (es *EnvironmentSelect) ScanX(ctx context.Context, v interface{}) { - if err := es.Scan(ctx, v); err != nil { - panic(err) +// WithNamedEnvironmentToHost tells the query-builder to eager-load the nodes that are connected to the "EnvironmentToHost" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (eq *EnvironmentQuery) WithNamedEnvironmentToHost(name string, opts ...func(*HostQuery)) *EnvironmentQuery { + query := (&HostClient{config: eq.config}).Query() + for _, opt := range opts { + opt(query) + } + if eq.withNamedEnvironmentToHost == nil { + eq.withNamedEnvironmentToHost = make(map[string]*HostQuery) } + eq.withNamedEnvironmentToHost[name] = query + return eq } -// Strings returns list of strings from a selector. It is only allowed when selecting one field. -func (es *EnvironmentSelect) Strings(ctx context.Context) ([]string, error) { - if len(es.fields) > 1 { - return nil, errors.New("ent: EnvironmentSelect.Strings is not achievable when selecting more than 1 field") +// WithNamedEnvironmentToCompetition tells the query-builder to eager-load the nodes that are connected to the "EnvironmentToCompetition" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (eq *EnvironmentQuery) WithNamedEnvironmentToCompetition(name string, opts ...func(*CompetitionQuery)) *EnvironmentQuery { + query := (&CompetitionClient{config: eq.config}).Query() + for _, opt := range opts { + opt(query) } - var v []string - if err := es.Scan(ctx, &v); err != nil { - return nil, err + if eq.withNamedEnvironmentToCompetition == nil { + eq.withNamedEnvironmentToCompetition = make(map[string]*CompetitionQuery) } - return v, nil + eq.withNamedEnvironmentToCompetition[name] = query + return eq } -// StringsX is like Strings, but panics if an error occurs. -func (es *EnvironmentSelect) StringsX(ctx context.Context) []string { - v, err := es.Strings(ctx) - if err != nil { - panic(err) +// WithNamedEnvironmentToIdentity tells the query-builder to eager-load the nodes that are connected to the "EnvironmentToIdentity" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (eq *EnvironmentQuery) WithNamedEnvironmentToIdentity(name string, opts ...func(*IdentityQuery)) *EnvironmentQuery { + query := (&IdentityClient{config: eq.config}).Query() + for _, opt := range opts { + opt(query) + } + if eq.withNamedEnvironmentToIdentity == nil { + eq.withNamedEnvironmentToIdentity = make(map[string]*IdentityQuery) } - return v + eq.withNamedEnvironmentToIdentity[name] = query + return eq } -// String returns a single string from a selector. It is only allowed when selecting one field. -func (es *EnvironmentSelect) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = es.Strings(ctx); err != nil { - return +// WithNamedEnvironmentToCommand tells the query-builder to eager-load the nodes that are connected to the "EnvironmentToCommand" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (eq *EnvironmentQuery) WithNamedEnvironmentToCommand(name string, opts ...func(*CommandQuery)) *EnvironmentQuery { + query := (&CommandClient{config: eq.config}).Query() + for _, opt := range opts { + opt(query) } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{environment.Label} - default: - err = fmt.Errorf("ent: EnvironmentSelect.Strings returned %d results when one was expected", len(v)) + if eq.withNamedEnvironmentToCommand == nil { + eq.withNamedEnvironmentToCommand = make(map[string]*CommandQuery) } - return + eq.withNamedEnvironmentToCommand[name] = query + return eq } -// StringX is like String, but panics if an error occurs. -func (es *EnvironmentSelect) StringX(ctx context.Context) string { - v, err := es.String(ctx) - if err != nil { - panic(err) +// WithNamedEnvironmentToScript tells the query-builder to eager-load the nodes that are connected to the "EnvironmentToScript" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (eq *EnvironmentQuery) WithNamedEnvironmentToScript(name string, opts ...func(*ScriptQuery)) *EnvironmentQuery { + query := (&ScriptClient{config: eq.config}).Query() + for _, opt := range opts { + opt(query) + } + if eq.withNamedEnvironmentToScript == nil { + eq.withNamedEnvironmentToScript = make(map[string]*ScriptQuery) } - return v + eq.withNamedEnvironmentToScript[name] = query + return eq } -// Ints returns list of ints from a selector. It is only allowed when selecting one field. -func (es *EnvironmentSelect) Ints(ctx context.Context) ([]int, error) { - if len(es.fields) > 1 { - return nil, errors.New("ent: EnvironmentSelect.Ints is not achievable when selecting more than 1 field") +// WithNamedEnvironmentToFileDownload tells the query-builder to eager-load the nodes that are connected to the "EnvironmentToFileDownload" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (eq *EnvironmentQuery) WithNamedEnvironmentToFileDownload(name string, opts ...func(*FileDownloadQuery)) *EnvironmentQuery { + query := (&FileDownloadClient{config: eq.config}).Query() + for _, opt := range opts { + opt(query) } - var v []int - if err := es.Scan(ctx, &v); err != nil { - return nil, err + if eq.withNamedEnvironmentToFileDownload == nil { + eq.withNamedEnvironmentToFileDownload = make(map[string]*FileDownloadQuery) } - return v, nil + eq.withNamedEnvironmentToFileDownload[name] = query + return eq } -// IntsX is like Ints, but panics if an error occurs. -func (es *EnvironmentSelect) IntsX(ctx context.Context) []int { - v, err := es.Ints(ctx) - if err != nil { - panic(err) +// WithNamedEnvironmentToFileDelete tells the query-builder to eager-load the nodes that are connected to the "EnvironmentToFileDelete" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (eq *EnvironmentQuery) WithNamedEnvironmentToFileDelete(name string, opts ...func(*FileDeleteQuery)) *EnvironmentQuery { + query := (&FileDeleteClient{config: eq.config}).Query() + for _, opt := range opts { + opt(query) + } + if eq.withNamedEnvironmentToFileDelete == nil { + eq.withNamedEnvironmentToFileDelete = make(map[string]*FileDeleteQuery) } - return v + eq.withNamedEnvironmentToFileDelete[name] = query + return eq } -// Int returns a single int from a selector. It is only allowed when selecting one field. -func (es *EnvironmentSelect) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = es.Ints(ctx); err != nil { - return +// WithNamedEnvironmentToFileExtract tells the query-builder to eager-load the nodes that are connected to the "EnvironmentToFileExtract" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (eq *EnvironmentQuery) WithNamedEnvironmentToFileExtract(name string, opts ...func(*FileExtractQuery)) *EnvironmentQuery { + query := (&FileExtractClient{config: eq.config}).Query() + for _, opt := range opts { + opt(query) } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{environment.Label} - default: - err = fmt.Errorf("ent: EnvironmentSelect.Ints returned %d results when one was expected", len(v)) + if eq.withNamedEnvironmentToFileExtract == nil { + eq.withNamedEnvironmentToFileExtract = make(map[string]*FileExtractQuery) } - return + eq.withNamedEnvironmentToFileExtract[name] = query + return eq } -// IntX is like Int, but panics if an error occurs. -func (es *EnvironmentSelect) IntX(ctx context.Context) int { - v, err := es.Int(ctx) - if err != nil { - panic(err) +// WithNamedEnvironmentToIncludedNetwork tells the query-builder to eager-load the nodes that are connected to the "EnvironmentToIncludedNetwork" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (eq *EnvironmentQuery) WithNamedEnvironmentToIncludedNetwork(name string, opts ...func(*IncludedNetworkQuery)) *EnvironmentQuery { + query := (&IncludedNetworkClient{config: eq.config}).Query() + for _, opt := range opts { + opt(query) + } + if eq.withNamedEnvironmentToIncludedNetwork == nil { + eq.withNamedEnvironmentToIncludedNetwork = make(map[string]*IncludedNetworkQuery) } - return v + eq.withNamedEnvironmentToIncludedNetwork[name] = query + return eq } -// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. -func (es *EnvironmentSelect) Float64s(ctx context.Context) ([]float64, error) { - if len(es.fields) > 1 { - return nil, errors.New("ent: EnvironmentSelect.Float64s is not achievable when selecting more than 1 field") +// WithNamedEnvironmentToFinding tells the query-builder to eager-load the nodes that are connected to the "EnvironmentToFinding" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (eq *EnvironmentQuery) WithNamedEnvironmentToFinding(name string, opts ...func(*FindingQuery)) *EnvironmentQuery { + query := (&FindingClient{config: eq.config}).Query() + for _, opt := range opts { + opt(query) } - var v []float64 - if err := es.Scan(ctx, &v); err != nil { - return nil, err + if eq.withNamedEnvironmentToFinding == nil { + eq.withNamedEnvironmentToFinding = make(map[string]*FindingQuery) } - return v, nil + eq.withNamedEnvironmentToFinding[name] = query + return eq } -// Float64sX is like Float64s, but panics if an error occurs. -func (es *EnvironmentSelect) Float64sX(ctx context.Context) []float64 { - v, err := es.Float64s(ctx) - if err != nil { - panic(err) +// WithNamedEnvironmentToDNSRecord tells the query-builder to eager-load the nodes that are connected to the "EnvironmentToDNSRecord" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (eq *EnvironmentQuery) WithNamedEnvironmentToDNSRecord(name string, opts ...func(*DNSRecordQuery)) *EnvironmentQuery { + query := (&DNSRecordClient{config: eq.config}).Query() + for _, opt := range opts { + opt(query) + } + if eq.withNamedEnvironmentToDNSRecord == nil { + eq.withNamedEnvironmentToDNSRecord = make(map[string]*DNSRecordQuery) } - return v + eq.withNamedEnvironmentToDNSRecord[name] = query + return eq } -// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. -func (es *EnvironmentSelect) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = es.Float64s(ctx); err != nil { - return +// WithNamedEnvironmentToDNS tells the query-builder to eager-load the nodes that are connected to the "EnvironmentToDNS" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (eq *EnvironmentQuery) WithNamedEnvironmentToDNS(name string, opts ...func(*DNSQuery)) *EnvironmentQuery { + query := (&DNSClient{config: eq.config}).Query() + for _, opt := range opts { + opt(query) } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{environment.Label} - default: - err = fmt.Errorf("ent: EnvironmentSelect.Float64s returned %d results when one was expected", len(v)) + if eq.withNamedEnvironmentToDNS == nil { + eq.withNamedEnvironmentToDNS = make(map[string]*DNSQuery) } - return + eq.withNamedEnvironmentToDNS[name] = query + return eq } -// Float64X is like Float64, but panics if an error occurs. -func (es *EnvironmentSelect) Float64X(ctx context.Context) float64 { - v, err := es.Float64(ctx) - if err != nil { - panic(err) +// WithNamedEnvironmentToNetwork tells the query-builder to eager-load the nodes that are connected to the "EnvironmentToNetwork" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (eq *EnvironmentQuery) WithNamedEnvironmentToNetwork(name string, opts ...func(*NetworkQuery)) *EnvironmentQuery { + query := (&NetworkClient{config: eq.config}).Query() + for _, opt := range opts { + opt(query) + } + if eq.withNamedEnvironmentToNetwork == nil { + eq.withNamedEnvironmentToNetwork = make(map[string]*NetworkQuery) } - return v + eq.withNamedEnvironmentToNetwork[name] = query + return eq } -// Bools returns list of bools from a selector. It is only allowed when selecting one field. -func (es *EnvironmentSelect) Bools(ctx context.Context) ([]bool, error) { - if len(es.fields) > 1 { - return nil, errors.New("ent: EnvironmentSelect.Bools is not achievable when selecting more than 1 field") +// WithNamedEnvironmentToHostDependency tells the query-builder to eager-load the nodes that are connected to the "EnvironmentToHostDependency" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (eq *EnvironmentQuery) WithNamedEnvironmentToHostDependency(name string, opts ...func(*HostDependencyQuery)) *EnvironmentQuery { + query := (&HostDependencyClient{config: eq.config}).Query() + for _, opt := range opts { + opt(query) } - var v []bool - if err := es.Scan(ctx, &v); err != nil { - return nil, err + if eq.withNamedEnvironmentToHostDependency == nil { + eq.withNamedEnvironmentToHostDependency = make(map[string]*HostDependencyQuery) } - return v, nil + eq.withNamedEnvironmentToHostDependency[name] = query + return eq } -// BoolsX is like Bools, but panics if an error occurs. -func (es *EnvironmentSelect) BoolsX(ctx context.Context) []bool { - v, err := es.Bools(ctx) - if err != nil { - panic(err) +// WithNamedEnvironmentToAnsible tells the query-builder to eager-load the nodes that are connected to the "EnvironmentToAnsible" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (eq *EnvironmentQuery) WithNamedEnvironmentToAnsible(name string, opts ...func(*AnsibleQuery)) *EnvironmentQuery { + query := (&AnsibleClient{config: eq.config}).Query() + for _, opt := range opts { + opt(query) + } + if eq.withNamedEnvironmentToAnsible == nil { + eq.withNamedEnvironmentToAnsible = make(map[string]*AnsibleQuery) } - return v + eq.withNamedEnvironmentToAnsible[name] = query + return eq } -// Bool returns a single bool from a selector. It is only allowed when selecting one field. -func (es *EnvironmentSelect) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = es.Bools(ctx); err != nil { - return +// WithNamedEnvironmentToBuild tells the query-builder to eager-load the nodes that are connected to the "EnvironmentToBuild" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (eq *EnvironmentQuery) WithNamedEnvironmentToBuild(name string, opts ...func(*BuildQuery)) *EnvironmentQuery { + query := (&BuildClient{config: eq.config}).Query() + for _, opt := range opts { + opt(query) } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{environment.Label} - default: - err = fmt.Errorf("ent: EnvironmentSelect.Bools returned %d results when one was expected", len(v)) + if eq.withNamedEnvironmentToBuild == nil { + eq.withNamedEnvironmentToBuild = make(map[string]*BuildQuery) } - return + eq.withNamedEnvironmentToBuild[name] = query + return eq } -// BoolX is like Bool, but panics if an error occurs. -func (es *EnvironmentSelect) BoolX(ctx context.Context) bool { - v, err := es.Bool(ctx) - if err != nil { - panic(err) +// WithNamedEnvironmentToRepository tells the query-builder to eager-load the nodes that are connected to the "EnvironmentToRepository" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (eq *EnvironmentQuery) WithNamedEnvironmentToRepository(name string, opts ...func(*RepositoryQuery)) *EnvironmentQuery { + query := (&RepositoryClient{config: eq.config}).Query() + for _, opt := range opts { + opt(query) } - return v + if eq.withNamedEnvironmentToRepository == nil { + eq.withNamedEnvironmentToRepository = make(map[string]*RepositoryQuery) + } + eq.withNamedEnvironmentToRepository[name] = query + return eq +} + +// WithNamedEnvironmentToServerTask tells the query-builder to eager-load the nodes that are connected to the "EnvironmentToServerTask" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (eq *EnvironmentQuery) WithNamedEnvironmentToServerTask(name string, opts ...func(*ServerTaskQuery)) *EnvironmentQuery { + query := (&ServerTaskClient{config: eq.config}).Query() + for _, opt := range opts { + opt(query) + } + if eq.withNamedEnvironmentToServerTask == nil { + eq.withNamedEnvironmentToServerTask = make(map[string]*ServerTaskQuery) + } + eq.withNamedEnvironmentToServerTask[name] = query + return eq } -func (es *EnvironmentSelect) sqlScan(ctx context.Context, v interface{}) error { +// EnvironmentGroupBy is the group-by builder for Environment entities. +type EnvironmentGroupBy struct { + selector + build *EnvironmentQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (egb *EnvironmentGroupBy) Aggregate(fns ...AggregateFunc) *EnvironmentGroupBy { + egb.fns = append(egb.fns, fns...) + return egb +} + +// Scan applies the selector query and scans the result into the given value. +func (egb *EnvironmentGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, egb.build.ctx, "GroupBy") + if err := egb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*EnvironmentQuery, *EnvironmentGroupBy](ctx, egb.build, egb, egb.build.inters, v) +} + +func (egb *EnvironmentGroupBy) sqlScan(ctx context.Context, root *EnvironmentQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(egb.fns)) + for _, fn := range egb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*egb.flds)+len(egb.fns)) + for _, f := range *egb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*egb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := egb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// EnvironmentSelect is the builder for selecting fields of Environment entities. +type EnvironmentSelect struct { + *EnvironmentQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (es *EnvironmentSelect) Aggregate(fns ...AggregateFunc) *EnvironmentSelect { + es.fns = append(es.fns, fns...) + return es +} + +// Scan applies the selector query and scans the result into the given value. +func (es *EnvironmentSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, es.ctx, "Select") + if err := es.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*EnvironmentQuery, *EnvironmentSelect](ctx, es.EnvironmentQuery, es, es.inters, v) +} + +func (es *EnvironmentSelect) sqlScan(ctx context.Context, root *EnvironmentQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(es.fns)) + for _, fn := range es.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*es.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } rows := &sql.Rows{} - query, args := es.sql.Query() + query, args := selector.Query() if err := es.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/ent/environment_update.go b/ent/environment_update.go index 3b817e98..349942c1 100755 --- a/ent/environment_update.go +++ b/ent/environment_update.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -9,6 +9,7 @@ import ( "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/dialect/sql/sqljson" "entgo.io/ent/schema/field" "github.com/gen0cide/laforge/ent/ansible" "github.com/gen0cide/laforge/ent/build" @@ -47,9 +48,17 @@ func (eu *EnvironmentUpdate) Where(ps ...predicate.Environment) *EnvironmentUpda return eu } -// SetHclID sets the "hcl_id" field. -func (eu *EnvironmentUpdate) SetHclID(s string) *EnvironmentUpdate { - eu.mutation.SetHclID(s) +// SetHCLID sets the "hcl_id" field. +func (eu *EnvironmentUpdate) SetHCLID(s string) *EnvironmentUpdate { + eu.mutation.SetHCLID(s) + return eu +} + +// SetNillableHCLID sets the "hcl_id" field if the given value is not nil. +func (eu *EnvironmentUpdate) SetNillableHCLID(s *string) *EnvironmentUpdate { + if s != nil { + eu.SetHCLID(*s) + } return eu } @@ -59,24 +68,56 @@ func (eu *EnvironmentUpdate) SetCompetitionID(s string) *EnvironmentUpdate { return eu } +// SetNillableCompetitionID sets the "competition_id" field if the given value is not nil. +func (eu *EnvironmentUpdate) SetNillableCompetitionID(s *string) *EnvironmentUpdate { + if s != nil { + eu.SetCompetitionID(*s) + } + return eu +} + // SetName sets the "name" field. func (eu *EnvironmentUpdate) SetName(s string) *EnvironmentUpdate { eu.mutation.SetName(s) return eu } +// SetNillableName sets the "name" field if the given value is not nil. +func (eu *EnvironmentUpdate) SetNillableName(s *string) *EnvironmentUpdate { + if s != nil { + eu.SetName(*s) + } + return eu +} + // SetDescription sets the "description" field. func (eu *EnvironmentUpdate) SetDescription(s string) *EnvironmentUpdate { eu.mutation.SetDescription(s) return eu } +// SetNillableDescription sets the "description" field if the given value is not nil. +func (eu *EnvironmentUpdate) SetNillableDescription(s *string) *EnvironmentUpdate { + if s != nil { + eu.SetDescription(*s) + } + return eu +} + // SetBuilder sets the "builder" field. func (eu *EnvironmentUpdate) SetBuilder(s string) *EnvironmentUpdate { eu.mutation.SetBuilder(s) return eu } +// SetNillableBuilder sets the "builder" field if the given value is not nil. +func (eu *EnvironmentUpdate) SetNillableBuilder(s *string) *EnvironmentUpdate { + if s != nil { + eu.SetBuilder(*s) + } + return eu +} + // SetTeamCount sets the "team_count" field. func (eu *EnvironmentUpdate) SetTeamCount(i int) *EnvironmentUpdate { eu.mutation.ResetTeamCount() @@ -84,6 +125,14 @@ func (eu *EnvironmentUpdate) SetTeamCount(i int) *EnvironmentUpdate { return eu } +// SetNillableTeamCount sets the "team_count" field if the given value is not nil. +func (eu *EnvironmentUpdate) SetNillableTeamCount(i *int) *EnvironmentUpdate { + if i != nil { + eu.SetTeamCount(*i) + } + return eu +} + // AddTeamCount adds i to the "team_count" field. func (eu *EnvironmentUpdate) AddTeamCount(i int) *EnvironmentUpdate { eu.mutation.AddTeamCount(i) @@ -97,6 +146,14 @@ func (eu *EnvironmentUpdate) SetRevision(i int) *EnvironmentUpdate { return eu } +// SetNillableRevision sets the "revision" field if the given value is not nil. +func (eu *EnvironmentUpdate) SetNillableRevision(i *int) *EnvironmentUpdate { + if i != nil { + eu.SetRevision(*i) + } + return eu +} + // AddRevision adds i to the "revision" field. func (eu *EnvironmentUpdate) AddRevision(i int) *EnvironmentUpdate { eu.mutation.AddRevision(i) @@ -109,12 +166,24 @@ func (eu *EnvironmentUpdate) SetAdminCidrs(s []string) *EnvironmentUpdate { return eu } +// AppendAdminCidrs appends s to the "admin_cidrs" field. +func (eu *EnvironmentUpdate) AppendAdminCidrs(s []string) *EnvironmentUpdate { + eu.mutation.AppendAdminCidrs(s) + return eu +} + // SetExposedVdiPorts sets the "exposed_vdi_ports" field. func (eu *EnvironmentUpdate) SetExposedVdiPorts(s []string) *EnvironmentUpdate { eu.mutation.SetExposedVdiPorts(s) return eu } +// AppendExposedVdiPorts appends s to the "exposed_vdi_ports" field. +func (eu *EnvironmentUpdate) AppendExposedVdiPorts(s []string) *EnvironmentUpdate { + eu.mutation.AppendExposedVdiPorts(s) + return eu +} + // SetConfig sets the "config" field. func (eu *EnvironmentUpdate) SetConfig(m map[string]string) *EnvironmentUpdate { eu.mutation.SetConfig(m) @@ -818,34 +887,7 @@ func (eu *EnvironmentUpdate) RemoveEnvironmentToServerTask(s ...*ServerTask) *En // Save executes the query and returns the number of nodes affected by the update operation. func (eu *EnvironmentUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(eu.hooks) == 0 { - affected, err = eu.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*EnvironmentMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - eu.mutation = mutation - affected, err = eu.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(eu.hooks) - 1; i >= 0; i-- { - if eu.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = eu.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, eu.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, eu.sqlSave, eu.mutation, eu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -871,16 +913,7 @@ func (eu *EnvironmentUpdate) ExecX(ctx context.Context) { } func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: environment.Table, - Columns: environment.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(environment.Table, environment.Columns, sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID)) if ps := eu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -888,96 +921,54 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { } } } - if value, ok := eu.mutation.HclID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: environment.FieldHclID, - }) + if value, ok := eu.mutation.HCLID(); ok { + _spec.SetField(environment.FieldHCLID, field.TypeString, value) } if value, ok := eu.mutation.CompetitionID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: environment.FieldCompetitionID, - }) + _spec.SetField(environment.FieldCompetitionID, field.TypeString, value) } if value, ok := eu.mutation.Name(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: environment.FieldName, - }) + _spec.SetField(environment.FieldName, field.TypeString, value) } if value, ok := eu.mutation.Description(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: environment.FieldDescription, - }) + _spec.SetField(environment.FieldDescription, field.TypeString, value) } if value, ok := eu.mutation.Builder(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: environment.FieldBuilder, - }) + _spec.SetField(environment.FieldBuilder, field.TypeString, value) } if value, ok := eu.mutation.TeamCount(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: environment.FieldTeamCount, - }) + _spec.SetField(environment.FieldTeamCount, field.TypeInt, value) } if value, ok := eu.mutation.AddedTeamCount(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: environment.FieldTeamCount, - }) + _spec.AddField(environment.FieldTeamCount, field.TypeInt, value) } if value, ok := eu.mutation.Revision(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: environment.FieldRevision, - }) + _spec.SetField(environment.FieldRevision, field.TypeInt, value) } if value, ok := eu.mutation.AddedRevision(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: environment.FieldRevision, - }) + _spec.AddField(environment.FieldRevision, field.TypeInt, value) } if value, ok := eu.mutation.AdminCidrs(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: environment.FieldAdminCidrs, + _spec.SetField(environment.FieldAdminCidrs, field.TypeJSON, value) + } + if value, ok := eu.mutation.AppendedAdminCidrs(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, environment.FieldAdminCidrs, value) }) } if value, ok := eu.mutation.ExposedVdiPorts(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: environment.FieldExposedVdiPorts, + _spec.SetField(environment.FieldExposedVdiPorts, field.TypeJSON, value) + } + if value, ok := eu.mutation.AppendedExposedVdiPorts(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, environment.FieldExposedVdiPorts, value) }) } if value, ok := eu.mutation.Config(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: environment.FieldConfig, - }) + _spec.SetField(environment.FieldConfig, field.TypeJSON, value) } if value, ok := eu.mutation.Tags(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: environment.FieldTags, - }) + _spec.SetField(environment.FieldTags, field.TypeJSON, value) } if eu.mutation.EnvironmentToUserCleared() { edge := &sqlgraph.EdgeSpec{ @@ -987,10 +978,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: environment.EnvironmentToUserPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1003,10 +991,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: environment.EnvironmentToUserPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1022,10 +1007,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: environment.EnvironmentToUserPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1041,10 +1023,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: host.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(host.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1057,10 +1036,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: host.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(host.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1076,10 +1052,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: host.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(host.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1095,10 +1068,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToCompetitionColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: competition.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(competition.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1111,10 +1081,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToCompetitionColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: competition.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(competition.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1130,10 +1097,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToCompetitionColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: competition.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(competition.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1149,10 +1113,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToIdentityColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: identity.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(identity.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1165,10 +1126,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToIdentityColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: identity.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(identity.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1184,10 +1142,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToIdentityColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: identity.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(identity.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1203,10 +1158,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToCommandColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: command.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(command.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1219,10 +1171,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToCommandColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: command.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(command.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1238,10 +1187,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToCommandColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: command.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(command.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1257,10 +1203,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToScriptColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: script.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(script.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1273,10 +1216,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToScriptColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: script.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(script.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1292,10 +1232,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToScriptColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: script.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(script.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1311,10 +1248,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToFileDownloadColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: filedownload.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(filedownload.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1327,10 +1261,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToFileDownloadColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: filedownload.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(filedownload.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1346,10 +1277,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToFileDownloadColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: filedownload.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(filedownload.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1365,10 +1293,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToFileDeleteColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: filedelete.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(filedelete.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1381,10 +1306,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToFileDeleteColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: filedelete.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(filedelete.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1400,10 +1322,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToFileDeleteColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: filedelete.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(filedelete.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1419,10 +1338,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToFileExtractColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: fileextract.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(fileextract.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1435,10 +1351,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToFileExtractColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: fileextract.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(fileextract.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1454,10 +1367,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToFileExtractColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: fileextract.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(fileextract.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1473,10 +1383,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: environment.EnvironmentToIncludedNetworkPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: includednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(includednetwork.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1489,10 +1396,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: environment.EnvironmentToIncludedNetworkPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: includednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(includednetwork.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1508,10 +1412,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: environment.EnvironmentToIncludedNetworkPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: includednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(includednetwork.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1527,10 +1428,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToFindingColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: finding.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(finding.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1543,10 +1441,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToFindingColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: finding.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(finding.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1562,10 +1457,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToFindingColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: finding.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(finding.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1581,10 +1473,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToDNSRecordColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: dnsrecord.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(dnsrecord.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1597,10 +1486,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToDNSRecordColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: dnsrecord.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(dnsrecord.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1616,10 +1502,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToDNSRecordColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: dnsrecord.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(dnsrecord.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1635,10 +1518,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: environment.EnvironmentToDNSPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: dns.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(dns.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1651,10 +1531,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: environment.EnvironmentToDNSPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: dns.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(dns.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1670,10 +1547,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: environment.EnvironmentToDNSPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: dns.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(dns.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1689,10 +1563,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: network.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(network.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1705,10 +1576,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: network.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(network.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1724,10 +1592,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: network.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(network.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1743,10 +1608,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToHostDependencyColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: hostdependency.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(hostdependency.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1759,10 +1621,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToHostDependencyColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: hostdependency.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(hostdependency.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1778,10 +1637,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToHostDependencyColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: hostdependency.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(hostdependency.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1797,10 +1653,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToAnsibleColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: ansible.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(ansible.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1813,10 +1666,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToAnsibleColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: ansible.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(ansible.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1832,10 +1682,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToAnsibleColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: ansible.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(ansible.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1851,10 +1698,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1867,10 +1711,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1886,10 +1727,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1905,10 +1743,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: environment.EnvironmentToRepositoryPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: repository.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(repository.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1921,10 +1756,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: environment.EnvironmentToRepositoryPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: repository.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(repository.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1940,10 +1772,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: environment.EnvironmentToRepositoryPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: repository.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(repository.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1959,10 +1788,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToServerTaskColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: servertask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(servertask.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1975,10 +1801,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToServerTaskColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: servertask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(servertask.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1994,10 +1817,7 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{environment.EnvironmentToServerTaskColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: servertask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(servertask.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -2009,10 +1829,11 @@ func (eu *EnvironmentUpdate) sqlSave(ctx context.Context) (n int, err error) { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{environment.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return 0, err } + eu.mutation.done = true return n, nil } @@ -2024,9 +1845,17 @@ type EnvironmentUpdateOne struct { mutation *EnvironmentMutation } -// SetHclID sets the "hcl_id" field. -func (euo *EnvironmentUpdateOne) SetHclID(s string) *EnvironmentUpdateOne { - euo.mutation.SetHclID(s) +// SetHCLID sets the "hcl_id" field. +func (euo *EnvironmentUpdateOne) SetHCLID(s string) *EnvironmentUpdateOne { + euo.mutation.SetHCLID(s) + return euo +} + +// SetNillableHCLID sets the "hcl_id" field if the given value is not nil. +func (euo *EnvironmentUpdateOne) SetNillableHCLID(s *string) *EnvironmentUpdateOne { + if s != nil { + euo.SetHCLID(*s) + } return euo } @@ -2036,24 +1865,56 @@ func (euo *EnvironmentUpdateOne) SetCompetitionID(s string) *EnvironmentUpdateOn return euo } +// SetNillableCompetitionID sets the "competition_id" field if the given value is not nil. +func (euo *EnvironmentUpdateOne) SetNillableCompetitionID(s *string) *EnvironmentUpdateOne { + if s != nil { + euo.SetCompetitionID(*s) + } + return euo +} + // SetName sets the "name" field. func (euo *EnvironmentUpdateOne) SetName(s string) *EnvironmentUpdateOne { euo.mutation.SetName(s) return euo } +// SetNillableName sets the "name" field if the given value is not nil. +func (euo *EnvironmentUpdateOne) SetNillableName(s *string) *EnvironmentUpdateOne { + if s != nil { + euo.SetName(*s) + } + return euo +} + // SetDescription sets the "description" field. func (euo *EnvironmentUpdateOne) SetDescription(s string) *EnvironmentUpdateOne { euo.mutation.SetDescription(s) return euo } +// SetNillableDescription sets the "description" field if the given value is not nil. +func (euo *EnvironmentUpdateOne) SetNillableDescription(s *string) *EnvironmentUpdateOne { + if s != nil { + euo.SetDescription(*s) + } + return euo +} + // SetBuilder sets the "builder" field. func (euo *EnvironmentUpdateOne) SetBuilder(s string) *EnvironmentUpdateOne { euo.mutation.SetBuilder(s) return euo } +// SetNillableBuilder sets the "builder" field if the given value is not nil. +func (euo *EnvironmentUpdateOne) SetNillableBuilder(s *string) *EnvironmentUpdateOne { + if s != nil { + euo.SetBuilder(*s) + } + return euo +} + // SetTeamCount sets the "team_count" field. func (euo *EnvironmentUpdateOne) SetTeamCount(i int) *EnvironmentUpdateOne { euo.mutation.ResetTeamCount() @@ -2061,6 +1922,14 @@ func (euo *EnvironmentUpdateOne) SetTeamCount(i int) *EnvironmentUpdateOne { return euo } +// SetNillableTeamCount sets the "team_count" field if the given value is not nil. +func (euo *EnvironmentUpdateOne) SetNillableTeamCount(i *int) *EnvironmentUpdateOne { + if i != nil { + euo.SetTeamCount(*i) + } + return euo +} + // AddTeamCount adds i to the "team_count" field. func (euo *EnvironmentUpdateOne) AddTeamCount(i int) *EnvironmentUpdateOne { euo.mutation.AddTeamCount(i) @@ -2074,6 +1943,14 @@ func (euo *EnvironmentUpdateOne) SetRevision(i int) *EnvironmentUpdateOne { return euo } +// SetNillableRevision sets the "revision" field if the given value is not nil. +func (euo *EnvironmentUpdateOne) SetNillableRevision(i *int) *EnvironmentUpdateOne { + if i != nil { + euo.SetRevision(*i) + } + return euo +} + // AddRevision adds i to the "revision" field. func (euo *EnvironmentUpdateOne) AddRevision(i int) *EnvironmentUpdateOne { euo.mutation.AddRevision(i) @@ -2086,12 +1963,24 @@ func (euo *EnvironmentUpdateOne) SetAdminCidrs(s []string) *EnvironmentUpdateOne return euo } +// AppendAdminCidrs appends s to the "admin_cidrs" field. +func (euo *EnvironmentUpdateOne) AppendAdminCidrs(s []string) *EnvironmentUpdateOne { + euo.mutation.AppendAdminCidrs(s) + return euo +} + // SetExposedVdiPorts sets the "exposed_vdi_ports" field. func (euo *EnvironmentUpdateOne) SetExposedVdiPorts(s []string) *EnvironmentUpdateOne { euo.mutation.SetExposedVdiPorts(s) return euo } +// AppendExposedVdiPorts appends s to the "exposed_vdi_ports" field. +func (euo *EnvironmentUpdateOne) AppendExposedVdiPorts(s []string) *EnvironmentUpdateOne { + euo.mutation.AppendExposedVdiPorts(s) + return euo +} + // SetConfig sets the "config" field. func (euo *EnvironmentUpdateOne) SetConfig(m map[string]string) *EnvironmentUpdateOne { euo.mutation.SetConfig(m) @@ -2793,6 +2682,12 @@ func (euo *EnvironmentUpdateOne) RemoveEnvironmentToServerTask(s ...*ServerTask) return euo.RemoveEnvironmentToServerTaskIDs(ids...) } +// Where appends a list predicates to the EnvironmentUpdate builder. +func (euo *EnvironmentUpdateOne) Where(ps ...predicate.Environment) *EnvironmentUpdateOne { + euo.mutation.Where(ps...) + return euo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (euo *EnvironmentUpdateOne) Select(field string, fields ...string) *EnvironmentUpdateOne { @@ -2802,34 +2697,7 @@ func (euo *EnvironmentUpdateOne) Select(field string, fields ...string) *Environ // Save executes the query and returns the updated Environment entity. func (euo *EnvironmentUpdateOne) Save(ctx context.Context) (*Environment, error) { - var ( - err error - node *Environment - ) - if len(euo.hooks) == 0 { - node, err = euo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*EnvironmentMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - euo.mutation = mutation - node, err = euo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(euo.hooks) - 1; i >= 0; i-- { - if euo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = euo.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, euo.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, euo.sqlSave, euo.mutation, euo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -2855,16 +2723,7 @@ func (euo *EnvironmentUpdateOne) ExecX(ctx context.Context) { } func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environment, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: environment.Table, - Columns: environment.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(environment.Table, environment.Columns, sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID)) id, ok := euo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Environment.id" for update`)} @@ -2889,96 +2748,54 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen } } } - if value, ok := euo.mutation.HclID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: environment.FieldHclID, - }) + if value, ok := euo.mutation.HCLID(); ok { + _spec.SetField(environment.FieldHCLID, field.TypeString, value) } if value, ok := euo.mutation.CompetitionID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: environment.FieldCompetitionID, - }) + _spec.SetField(environment.FieldCompetitionID, field.TypeString, value) } if value, ok := euo.mutation.Name(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: environment.FieldName, - }) + _spec.SetField(environment.FieldName, field.TypeString, value) } if value, ok := euo.mutation.Description(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: environment.FieldDescription, - }) + _spec.SetField(environment.FieldDescription, field.TypeString, value) } if value, ok := euo.mutation.Builder(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: environment.FieldBuilder, - }) + _spec.SetField(environment.FieldBuilder, field.TypeString, value) } if value, ok := euo.mutation.TeamCount(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: environment.FieldTeamCount, - }) + _spec.SetField(environment.FieldTeamCount, field.TypeInt, value) } if value, ok := euo.mutation.AddedTeamCount(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: environment.FieldTeamCount, - }) + _spec.AddField(environment.FieldTeamCount, field.TypeInt, value) } if value, ok := euo.mutation.Revision(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: environment.FieldRevision, - }) + _spec.SetField(environment.FieldRevision, field.TypeInt, value) } if value, ok := euo.mutation.AddedRevision(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: environment.FieldRevision, - }) + _spec.AddField(environment.FieldRevision, field.TypeInt, value) } if value, ok := euo.mutation.AdminCidrs(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: environment.FieldAdminCidrs, + _spec.SetField(environment.FieldAdminCidrs, field.TypeJSON, value) + } + if value, ok := euo.mutation.AppendedAdminCidrs(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, environment.FieldAdminCidrs, value) }) } if value, ok := euo.mutation.ExposedVdiPorts(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: environment.FieldExposedVdiPorts, + _spec.SetField(environment.FieldExposedVdiPorts, field.TypeJSON, value) + } + if value, ok := euo.mutation.AppendedExposedVdiPorts(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, environment.FieldExposedVdiPorts, value) }) } if value, ok := euo.mutation.Config(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: environment.FieldConfig, - }) + _spec.SetField(environment.FieldConfig, field.TypeJSON, value) } if value, ok := euo.mutation.Tags(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: environment.FieldTags, - }) + _spec.SetField(environment.FieldTags, field.TypeJSON, value) } if euo.mutation.EnvironmentToUserCleared() { edge := &sqlgraph.EdgeSpec{ @@ -2988,10 +2805,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: environment.EnvironmentToUserPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -3004,10 +2818,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: environment.EnvironmentToUserPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -3023,10 +2834,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: environment.EnvironmentToUserPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -3042,10 +2850,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: host.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(host.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -3058,10 +2863,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: host.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(host.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -3077,10 +2879,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: host.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(host.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -3096,10 +2895,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToCompetitionColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: competition.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(competition.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -3112,10 +2908,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToCompetitionColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: competition.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(competition.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -3131,10 +2924,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToCompetitionColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: competition.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(competition.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -3150,10 +2940,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToIdentityColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: identity.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(identity.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -3166,10 +2953,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToIdentityColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: identity.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(identity.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -3185,10 +2969,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToIdentityColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: identity.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(identity.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -3204,10 +2985,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToCommandColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: command.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(command.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -3220,10 +2998,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToCommandColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: command.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(command.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -3239,10 +3014,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToCommandColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: command.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(command.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -3258,10 +3030,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToScriptColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: script.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(script.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -3274,10 +3043,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToScriptColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: script.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(script.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -3293,10 +3059,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToScriptColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: script.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(script.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -3312,10 +3075,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToFileDownloadColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: filedownload.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(filedownload.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -3328,10 +3088,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToFileDownloadColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: filedownload.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(filedownload.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -3347,10 +3104,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToFileDownloadColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: filedownload.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(filedownload.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -3366,10 +3120,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToFileDeleteColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: filedelete.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(filedelete.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -3382,10 +3133,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToFileDeleteColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: filedelete.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(filedelete.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -3401,10 +3149,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToFileDeleteColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: filedelete.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(filedelete.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -3420,10 +3165,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToFileExtractColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: fileextract.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(fileextract.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -3436,10 +3178,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToFileExtractColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: fileextract.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(fileextract.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -3455,10 +3194,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToFileExtractColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: fileextract.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(fileextract.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -3474,10 +3210,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: environment.EnvironmentToIncludedNetworkPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: includednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(includednetwork.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -3490,10 +3223,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: environment.EnvironmentToIncludedNetworkPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: includednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(includednetwork.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -3509,10 +3239,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: environment.EnvironmentToIncludedNetworkPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: includednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(includednetwork.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -3528,10 +3255,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToFindingColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: finding.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(finding.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -3544,10 +3268,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToFindingColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: finding.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(finding.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -3563,10 +3284,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToFindingColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: finding.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(finding.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -3582,10 +3300,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToDNSRecordColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: dnsrecord.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(dnsrecord.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -3598,10 +3313,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToDNSRecordColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: dnsrecord.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(dnsrecord.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -3617,10 +3329,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToDNSRecordColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: dnsrecord.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(dnsrecord.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -3636,10 +3345,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: environment.EnvironmentToDNSPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: dns.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(dns.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -3652,10 +3358,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: environment.EnvironmentToDNSPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: dns.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(dns.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -3671,10 +3374,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: environment.EnvironmentToDNSPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: dns.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(dns.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -3690,10 +3390,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: network.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(network.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -3706,10 +3403,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: network.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(network.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -3725,10 +3419,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: network.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(network.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -3744,10 +3435,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToHostDependencyColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: hostdependency.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(hostdependency.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -3760,10 +3448,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToHostDependencyColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: hostdependency.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(hostdependency.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -3779,10 +3464,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToHostDependencyColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: hostdependency.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(hostdependency.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -3798,10 +3480,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToAnsibleColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: ansible.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(ansible.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -3814,10 +3493,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToAnsibleColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: ansible.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(ansible.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -3833,10 +3509,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToAnsibleColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: ansible.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(ansible.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -3852,10 +3525,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -3868,10 +3538,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -3887,10 +3554,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -3906,10 +3570,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: environment.EnvironmentToRepositoryPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: repository.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(repository.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -3922,10 +3583,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: environment.EnvironmentToRepositoryPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: repository.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(repository.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -3941,10 +3599,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: environment.EnvironmentToRepositoryPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: repository.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(repository.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -3960,10 +3615,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToServerTaskColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: servertask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(servertask.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -3976,10 +3628,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToServerTaskColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: servertask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(servertask.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -3995,10 +3644,7 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen Columns: []string{environment.EnvironmentToServerTaskColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: servertask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(servertask.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -4013,9 +3659,10 @@ func (euo *EnvironmentUpdateOne) sqlSave(ctx context.Context) (_node *Environmen if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{environment.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } + euo.mutation.done = true return _node, nil } diff --git a/ent/filedelete.go b/ent/filedelete.go index 93a5fe07..ec4ab1e7 100755 --- a/ent/filedelete.go +++ b/ent/filedelete.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -7,6 +7,7 @@ import ( "fmt" "strings" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/gen0cide/laforge/ent/environment" "github.com/gen0cide/laforge/ent/filedelete" @@ -18,8 +19,8 @@ type FileDelete struct { config ` json:"-"` // ID of the ent. ID uuid.UUID `json:"id,omitempty"` - // HclID holds the value of the "hcl_id" field. - HclID string `json:"hcl_id,omitempty" hcl:"id,label"` + // HCLID holds the value of the "hcl_id" field. + HCLID string `json:"hcl_id,omitempty" hcl:"id,label"` // Path holds the value of the "path" field. Path string `json:"path,omitempty" hcl:"path,attr"` // Tags holds the value of the "tags" field. @@ -28,11 +29,13 @@ type FileDelete struct { // The values are being populated by the FileDeleteQuery when eager-loading is set. Edges FileDeleteEdges `json:"edges"` + // vvvvvvvvvvvv CUSTOM vvvvvvvvvvvv // Edges put into the main struct to be loaded via hcl // FileDeleteToEnvironment holds the value of the FileDeleteToEnvironment edge. HCLFileDeleteToEnvironment *Environment `json:"FileDeleteToEnvironment,omitempty"` - // + // ^^^^^^^^^^^^ CUSTOM ^^^^^^^^^^^^^ environment_environment_to_file_delete *uuid.UUID + selectValues sql.SelectValues } // FileDeleteEdges holds the relations/edges for other nodes in the graph. @@ -42,6 +45,8 @@ type FileDeleteEdges struct { // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. loadedTypes [1]bool + // totalCount holds the count of the edges above. + totalCount [1]map[string]int } // FileDeleteToEnvironmentOrErr returns the FileDeleteToEnvironment value or an error if the edge @@ -49,8 +54,7 @@ type FileDeleteEdges struct { func (e FileDeleteEdges) FileDeleteToEnvironmentOrErr() (*Environment, error) { if e.loadedTypes[0] { if e.FileDeleteToEnvironment == nil { - // The edge FileDeleteToEnvironment was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: environment.Label} } return e.FileDeleteToEnvironment, nil @@ -59,20 +63,20 @@ func (e FileDeleteEdges) FileDeleteToEnvironmentOrErr() (*Environment, error) { } // scanValues returns the types for scanning values from sql.Rows. -func (*FileDelete) scanValues(columns []string) ([]interface{}, error) { - values := make([]interface{}, len(columns)) +func (*FileDelete) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) for i := range columns { switch columns[i] { case filedelete.FieldTags: values[i] = new([]byte) - case filedelete.FieldHclID, filedelete.FieldPath: + case filedelete.FieldHCLID, filedelete.FieldPath: values[i] = new(sql.NullString) case filedelete.FieldID: values[i] = new(uuid.UUID) case filedelete.ForeignKeys[0]: // environment_environment_to_file_delete values[i] = &sql.NullScanner{S: new(uuid.UUID)} default: - return nil, fmt.Errorf("unexpected column %q for type FileDelete", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -80,7 +84,7 @@ func (*FileDelete) scanValues(columns []string) ([]interface{}, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the FileDelete fields. -func (fd *FileDelete) assignValues(columns []string, values []interface{}) error { +func (fd *FileDelete) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -92,11 +96,11 @@ func (fd *FileDelete) assignValues(columns []string, values []interface{}) error } else if value != nil { fd.ID = *value } - case filedelete.FieldHclID: + case filedelete.FieldHCLID: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field hcl_id", values[i]) } else if value.Valid { - fd.HclID = value.String + fd.HCLID = value.String } case filedelete.FieldPath: if value, ok := values[i].(*sql.NullString); !ok { @@ -119,31 +123,39 @@ func (fd *FileDelete) assignValues(columns []string, values []interface{}) error fd.environment_environment_to_file_delete = new(uuid.UUID) *fd.environment_environment_to_file_delete = *value.S.(*uuid.UUID) } + default: + fd.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the FileDelete. +// This includes values selected through modifiers, order, etc. +func (fd *FileDelete) Value(name string) (ent.Value, error) { + return fd.selectValues.Get(name) +} + // QueryFileDeleteToEnvironment queries the "FileDeleteToEnvironment" edge of the FileDelete entity. func (fd *FileDelete) QueryFileDeleteToEnvironment() *EnvironmentQuery { - return (&FileDeleteClient{config: fd.config}).QueryFileDeleteToEnvironment(fd) + return NewFileDeleteClient(fd.config).QueryFileDeleteToEnvironment(fd) } // Update returns a builder for updating this FileDelete. // Note that you need to call FileDelete.Unwrap() before calling this method if this FileDelete // was returned from a transaction, and the transaction was committed or rolled back. func (fd *FileDelete) Update() *FileDeleteUpdateOne { - return (&FileDeleteClient{config: fd.config}).UpdateOne(fd) + return NewFileDeleteClient(fd.config).UpdateOne(fd) } // Unwrap unwraps the FileDelete entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. func (fd *FileDelete) Unwrap() *FileDelete { - tx, ok := fd.config.driver.(*txDriver) + _tx, ok := fd.config.driver.(*txDriver) if !ok { panic("ent: FileDelete is not a transactional entity") } - fd.config.driver = tx.drv + fd.config.driver = _tx.drv return fd } @@ -151,12 +163,14 @@ func (fd *FileDelete) Unwrap() *FileDelete { func (fd *FileDelete) String() string { var builder strings.Builder builder.WriteString("FileDelete(") - builder.WriteString(fmt.Sprintf("id=%v", fd.ID)) - builder.WriteString(", hcl_id=") - builder.WriteString(fd.HclID) - builder.WriteString(", path=") + builder.WriteString(fmt.Sprintf("id=%v, ", fd.ID)) + builder.WriteString("hcl_id=") + builder.WriteString(fd.HCLID) + builder.WriteString(", ") + builder.WriteString("path=") builder.WriteString(fd.Path) - builder.WriteString(", tags=") + builder.WriteString(", ") + builder.WriteString("tags=") builder.WriteString(fmt.Sprintf("%v", fd.Tags)) builder.WriteByte(')') return builder.String() @@ -164,9 +178,3 @@ func (fd *FileDelete) String() string { // FileDeletes is a parsable slice of FileDelete. type FileDeletes []*FileDelete - -func (fd FileDeletes) config(cfg config) { - for _i := range fd { - fd[_i].config = cfg - } -} diff --git a/ent/filedelete/filedelete.go b/ent/filedelete/filedelete.go index 301d7005..9c69bdef 100755 --- a/ent/filedelete/filedelete.go +++ b/ent/filedelete/filedelete.go @@ -1,8 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package filedelete import ( + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -11,8 +13,8 @@ const ( Label = "file_delete" // FieldID holds the string denoting the id field in the database. FieldID = "id" - // FieldHclID holds the string denoting the hcl_id field in the database. - FieldHclID = "hcl_id" + // FieldHCLID holds the string denoting the hcl_id field in the database. + FieldHCLID = "hcl_id" // FieldPath holds the string denoting the path field in the database. FieldPath = "path" // FieldTags holds the string denoting the tags field in the database. @@ -33,7 +35,7 @@ const ( // Columns holds all SQL columns for filedelete fields. var Columns = []string{ FieldID, - FieldHclID, + FieldHCLID, FieldPath, FieldTags, } @@ -63,3 +65,35 @@ var ( // DefaultID holds the default value on creation for the "id" field. DefaultID func() uuid.UUID ) + +// OrderOption defines the ordering options for the FileDelete queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByHCLID orders the results by the hcl_id field. +func ByHCLID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldHCLID, opts...).ToFunc() +} + +// ByPath orders the results by the path field. +func ByPath(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPath, opts...).ToFunc() +} + +// ByFileDeleteToEnvironmentField orders the results by FileDeleteToEnvironment field. +func ByFileDeleteToEnvironmentField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newFileDeleteToEnvironmentStep(), sql.OrderByField(field, opts...)) + } +} +func newFileDeleteToEnvironmentStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(FileDeleteToEnvironmentInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, FileDeleteToEnvironmentTable, FileDeleteToEnvironmentColumn), + ) +} diff --git a/ent/filedelete/where.go b/ent/filedelete/where.go index 7ea85d53..5853395f 100755 --- a/ent/filedelete/where.go +++ b/ent/filedelete/where.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package filedelete @@ -11,321 +11,187 @@ import ( // ID filters vertices based on their ID field. func ID(id uuid.UUID) predicate.FileDelete { - return predicate.FileDelete(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.FileDelete(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id uuid.UUID) predicate.FileDelete { - return predicate.FileDelete(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.FileDelete(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id uuid.UUID) predicate.FileDelete { - return predicate.FileDelete(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.FileDelete(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...uuid.UUID) predicate.FileDelete { - return predicate.FileDelete(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.FileDelete(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...uuid.UUID) predicate.FileDelete { - return predicate.FileDelete(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.FileDelete(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id uuid.UUID) predicate.FileDelete { - return predicate.FileDelete(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.FileDelete(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id uuid.UUID) predicate.FileDelete { - return predicate.FileDelete(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.FileDelete(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id uuid.UUID) predicate.FileDelete { - return predicate.FileDelete(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.FileDelete(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id uuid.UUID) predicate.FileDelete { - return predicate.FileDelete(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.FileDelete(sql.FieldLTE(FieldID, id)) } -// HclID applies equality check predicate on the "hcl_id" field. It's identical to HclIDEQ. -func HclID(v string) predicate.FileDelete { - return predicate.FileDelete(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldHclID), v)) - }) +// HCLID applies equality check predicate on the "hcl_id" field. It's identical to HCLIDEQ. +func HCLID(v string) predicate.FileDelete { + return predicate.FileDelete(sql.FieldEQ(FieldHCLID, v)) } // Path applies equality check predicate on the "path" field. It's identical to PathEQ. func Path(v string) predicate.FileDelete { - return predicate.FileDelete(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldPath), v)) - }) + return predicate.FileDelete(sql.FieldEQ(FieldPath, v)) } -// HclIDEQ applies the EQ predicate on the "hcl_id" field. -func HclIDEQ(v string) predicate.FileDelete { - return predicate.FileDelete(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldHclID), v)) - }) +// HCLIDEQ applies the EQ predicate on the "hcl_id" field. +func HCLIDEQ(v string) predicate.FileDelete { + return predicate.FileDelete(sql.FieldEQ(FieldHCLID, v)) } -// HclIDNEQ applies the NEQ predicate on the "hcl_id" field. -func HclIDNEQ(v string) predicate.FileDelete { - return predicate.FileDelete(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldHclID), v)) - }) +// HCLIDNEQ applies the NEQ predicate on the "hcl_id" field. +func HCLIDNEQ(v string) predicate.FileDelete { + return predicate.FileDelete(sql.FieldNEQ(FieldHCLID, v)) } -// HclIDIn applies the In predicate on the "hcl_id" field. -func HclIDIn(vs ...string) predicate.FileDelete { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.FileDelete(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldHclID), v...)) - }) +// HCLIDIn applies the In predicate on the "hcl_id" field. +func HCLIDIn(vs ...string) predicate.FileDelete { + return predicate.FileDelete(sql.FieldIn(FieldHCLID, vs...)) } -// HclIDNotIn applies the NotIn predicate on the "hcl_id" field. -func HclIDNotIn(vs ...string) predicate.FileDelete { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.FileDelete(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldHclID), v...)) - }) +// HCLIDNotIn applies the NotIn predicate on the "hcl_id" field. +func HCLIDNotIn(vs ...string) predicate.FileDelete { + return predicate.FileDelete(sql.FieldNotIn(FieldHCLID, vs...)) } -// HclIDGT applies the GT predicate on the "hcl_id" field. -func HclIDGT(v string) predicate.FileDelete { - return predicate.FileDelete(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldHclID), v)) - }) +// HCLIDGT applies the GT predicate on the "hcl_id" field. +func HCLIDGT(v string) predicate.FileDelete { + return predicate.FileDelete(sql.FieldGT(FieldHCLID, v)) } -// HclIDGTE applies the GTE predicate on the "hcl_id" field. -func HclIDGTE(v string) predicate.FileDelete { - return predicate.FileDelete(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldHclID), v)) - }) +// HCLIDGTE applies the GTE predicate on the "hcl_id" field. +func HCLIDGTE(v string) predicate.FileDelete { + return predicate.FileDelete(sql.FieldGTE(FieldHCLID, v)) } -// HclIDLT applies the LT predicate on the "hcl_id" field. -func HclIDLT(v string) predicate.FileDelete { - return predicate.FileDelete(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldHclID), v)) - }) +// HCLIDLT applies the LT predicate on the "hcl_id" field. +func HCLIDLT(v string) predicate.FileDelete { + return predicate.FileDelete(sql.FieldLT(FieldHCLID, v)) } -// HclIDLTE applies the LTE predicate on the "hcl_id" field. -func HclIDLTE(v string) predicate.FileDelete { - return predicate.FileDelete(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldHclID), v)) - }) +// HCLIDLTE applies the LTE predicate on the "hcl_id" field. +func HCLIDLTE(v string) predicate.FileDelete { + return predicate.FileDelete(sql.FieldLTE(FieldHCLID, v)) } -// HclIDContains applies the Contains predicate on the "hcl_id" field. -func HclIDContains(v string) predicate.FileDelete { - return predicate.FileDelete(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldHclID), v)) - }) +// HCLIDContains applies the Contains predicate on the "hcl_id" field. +func HCLIDContains(v string) predicate.FileDelete { + return predicate.FileDelete(sql.FieldContains(FieldHCLID, v)) } -// HclIDHasPrefix applies the HasPrefix predicate on the "hcl_id" field. -func HclIDHasPrefix(v string) predicate.FileDelete { - return predicate.FileDelete(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldHclID), v)) - }) +// HCLIDHasPrefix applies the HasPrefix predicate on the "hcl_id" field. +func HCLIDHasPrefix(v string) predicate.FileDelete { + return predicate.FileDelete(sql.FieldHasPrefix(FieldHCLID, v)) } -// HclIDHasSuffix applies the HasSuffix predicate on the "hcl_id" field. -func HclIDHasSuffix(v string) predicate.FileDelete { - return predicate.FileDelete(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldHclID), v)) - }) +// HCLIDHasSuffix applies the HasSuffix predicate on the "hcl_id" field. +func HCLIDHasSuffix(v string) predicate.FileDelete { + return predicate.FileDelete(sql.FieldHasSuffix(FieldHCLID, v)) } -// HclIDEqualFold applies the EqualFold predicate on the "hcl_id" field. -func HclIDEqualFold(v string) predicate.FileDelete { - return predicate.FileDelete(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldHclID), v)) - }) +// HCLIDEqualFold applies the EqualFold predicate on the "hcl_id" field. +func HCLIDEqualFold(v string) predicate.FileDelete { + return predicate.FileDelete(sql.FieldEqualFold(FieldHCLID, v)) } -// HclIDContainsFold applies the ContainsFold predicate on the "hcl_id" field. -func HclIDContainsFold(v string) predicate.FileDelete { - return predicate.FileDelete(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldHclID), v)) - }) +// HCLIDContainsFold applies the ContainsFold predicate on the "hcl_id" field. +func HCLIDContainsFold(v string) predicate.FileDelete { + return predicate.FileDelete(sql.FieldContainsFold(FieldHCLID, v)) } // PathEQ applies the EQ predicate on the "path" field. func PathEQ(v string) predicate.FileDelete { - return predicate.FileDelete(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldPath), v)) - }) + return predicate.FileDelete(sql.FieldEQ(FieldPath, v)) } // PathNEQ applies the NEQ predicate on the "path" field. func PathNEQ(v string) predicate.FileDelete { - return predicate.FileDelete(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldPath), v)) - }) + return predicate.FileDelete(sql.FieldNEQ(FieldPath, v)) } // PathIn applies the In predicate on the "path" field. func PathIn(vs ...string) predicate.FileDelete { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.FileDelete(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldPath), v...)) - }) + return predicate.FileDelete(sql.FieldIn(FieldPath, vs...)) } // PathNotIn applies the NotIn predicate on the "path" field. func PathNotIn(vs ...string) predicate.FileDelete { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.FileDelete(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldPath), v...)) - }) + return predicate.FileDelete(sql.FieldNotIn(FieldPath, vs...)) } // PathGT applies the GT predicate on the "path" field. func PathGT(v string) predicate.FileDelete { - return predicate.FileDelete(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldPath), v)) - }) + return predicate.FileDelete(sql.FieldGT(FieldPath, v)) } // PathGTE applies the GTE predicate on the "path" field. func PathGTE(v string) predicate.FileDelete { - return predicate.FileDelete(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldPath), v)) - }) + return predicate.FileDelete(sql.FieldGTE(FieldPath, v)) } // PathLT applies the LT predicate on the "path" field. func PathLT(v string) predicate.FileDelete { - return predicate.FileDelete(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldPath), v)) - }) + return predicate.FileDelete(sql.FieldLT(FieldPath, v)) } // PathLTE applies the LTE predicate on the "path" field. func PathLTE(v string) predicate.FileDelete { - return predicate.FileDelete(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldPath), v)) - }) + return predicate.FileDelete(sql.FieldLTE(FieldPath, v)) } // PathContains applies the Contains predicate on the "path" field. func PathContains(v string) predicate.FileDelete { - return predicate.FileDelete(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldPath), v)) - }) + return predicate.FileDelete(sql.FieldContains(FieldPath, v)) } // PathHasPrefix applies the HasPrefix predicate on the "path" field. func PathHasPrefix(v string) predicate.FileDelete { - return predicate.FileDelete(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldPath), v)) - }) + return predicate.FileDelete(sql.FieldHasPrefix(FieldPath, v)) } // PathHasSuffix applies the HasSuffix predicate on the "path" field. func PathHasSuffix(v string) predicate.FileDelete { - return predicate.FileDelete(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldPath), v)) - }) + return predicate.FileDelete(sql.FieldHasSuffix(FieldPath, v)) } // PathEqualFold applies the EqualFold predicate on the "path" field. func PathEqualFold(v string) predicate.FileDelete { - return predicate.FileDelete(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldPath), v)) - }) + return predicate.FileDelete(sql.FieldEqualFold(FieldPath, v)) } // PathContainsFold applies the ContainsFold predicate on the "path" field. func PathContainsFold(v string) predicate.FileDelete { - return predicate.FileDelete(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldPath), v)) - }) + return predicate.FileDelete(sql.FieldContainsFold(FieldPath, v)) } // HasFileDeleteToEnvironment applies the HasEdge predicate on the "FileDeleteToEnvironment" edge. @@ -333,7 +199,6 @@ func HasFileDeleteToEnvironment() predicate.FileDelete { return predicate.FileDelete(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(FileDeleteToEnvironmentTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, true, FileDeleteToEnvironmentTable, FileDeleteToEnvironmentColumn), ) sqlgraph.HasNeighbors(s, step) @@ -343,11 +208,7 @@ func HasFileDeleteToEnvironment() predicate.FileDelete { // HasFileDeleteToEnvironmentWith applies the HasEdge predicate on the "FileDeleteToEnvironment" edge with a given conditions (other predicates). func HasFileDeleteToEnvironmentWith(preds ...predicate.Environment) predicate.FileDelete { return predicate.FileDelete(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(FileDeleteToEnvironmentInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, FileDeleteToEnvironmentTable, FileDeleteToEnvironmentColumn), - ) + step := newFileDeleteToEnvironmentStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -358,32 +219,15 @@ func HasFileDeleteToEnvironmentWith(preds ...predicate.Environment) predicate.Fi // And groups predicates with the AND operator between them. func And(predicates ...predicate.FileDelete) predicate.FileDelete { - return predicate.FileDelete(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.FileDelete(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.FileDelete) predicate.FileDelete { - return predicate.FileDelete(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.FileDelete(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.FileDelete) predicate.FileDelete { - return predicate.FileDelete(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.FileDelete(sql.NotPredicates(p)) } diff --git a/ent/filedelete_create.go b/ent/filedelete_create.go index f2785161..09a877c8 100755 --- a/ent/filedelete_create.go +++ b/ent/filedelete_create.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -21,9 +21,9 @@ type FileDeleteCreate struct { hooks []Hook } -// SetHclID sets the "hcl_id" field. -func (fdc *FileDeleteCreate) SetHclID(s string) *FileDeleteCreate { - fdc.mutation.SetHclID(s) +// SetHCLID sets the "hcl_id" field. +func (fdc *FileDeleteCreate) SetHCLID(s string) *FileDeleteCreate { + fdc.mutation.SetHCLID(s) return fdc } @@ -79,44 +79,8 @@ func (fdc *FileDeleteCreate) Mutation() *FileDeleteMutation { // Save creates the FileDelete in the database. func (fdc *FileDeleteCreate) Save(ctx context.Context) (*FileDelete, error) { - var ( - err error - node *FileDelete - ) fdc.defaults() - if len(fdc.hooks) == 0 { - if err = fdc.check(); err != nil { - return nil, err - } - node, err = fdc.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*FileDeleteMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = fdc.check(); err != nil { - return nil, err - } - fdc.mutation = mutation - if node, err = fdc.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(fdc.hooks) - 1; i >= 0; i-- { - if fdc.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = fdc.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, fdc.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, fdc.sqlSave, fdc.mutation, fdc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -151,7 +115,7 @@ func (fdc *FileDeleteCreate) defaults() { // check runs all checks and user-defined validators on the builder. func (fdc *FileDeleteCreate) check() error { - if _, ok := fdc.mutation.HclID(); !ok { + if _, ok := fdc.mutation.HCLID(); !ok { return &ValidationError{Name: "hcl_id", err: errors.New(`ent: missing required field "FileDelete.hcl_id"`)} } if _, ok := fdc.mutation.Path(); !ok { @@ -164,10 +128,13 @@ func (fdc *FileDeleteCreate) check() error { } func (fdc *FileDeleteCreate) sqlSave(ctx context.Context) (*FileDelete, error) { + if err := fdc.check(); err != nil { + return nil, err + } _node, _spec := fdc.createSpec() if err := sqlgraph.CreateNode(ctx, fdc.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } @@ -178,46 +145,30 @@ func (fdc *FileDeleteCreate) sqlSave(ctx context.Context) (*FileDelete, error) { return nil, err } } + fdc.mutation.id = &_node.ID + fdc.mutation.done = true return _node, nil } func (fdc *FileDeleteCreate) createSpec() (*FileDelete, *sqlgraph.CreateSpec) { var ( _node = &FileDelete{config: fdc.config} - _spec = &sqlgraph.CreateSpec{ - Table: filedelete.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: filedelete.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(filedelete.Table, sqlgraph.NewFieldSpec(filedelete.FieldID, field.TypeUUID)) ) if id, ok := fdc.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } - if value, ok := fdc.mutation.HclID(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: filedelete.FieldHclID, - }) - _node.HclID = value + if value, ok := fdc.mutation.HCLID(); ok { + _spec.SetField(filedelete.FieldHCLID, field.TypeString, value) + _node.HCLID = value } if value, ok := fdc.mutation.Path(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: filedelete.FieldPath, - }) + _spec.SetField(filedelete.FieldPath, field.TypeString, value) _node.Path = value } if value, ok := fdc.mutation.Tags(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: filedelete.FieldTags, - }) + _spec.SetField(filedelete.FieldTags, field.TypeJSON, value) _node.Tags = value } if nodes := fdc.mutation.FileDeleteToEnvironmentIDs(); len(nodes) > 0 { @@ -228,10 +179,7 @@ func (fdc *FileDeleteCreate) createSpec() (*FileDelete, *sqlgraph.CreateSpec) { Columns: []string{filedelete.FileDeleteToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -246,11 +194,15 @@ func (fdc *FileDeleteCreate) createSpec() (*FileDelete, *sqlgraph.CreateSpec) { // FileDeleteCreateBulk is the builder for creating many FileDelete entities in bulk. type FileDeleteCreateBulk struct { config + err error builders []*FileDeleteCreate } // Save creates the FileDelete entities in the database. func (fdcb *FileDeleteCreateBulk) Save(ctx context.Context) ([]*FileDelete, error) { + if fdcb.err != nil { + return nil, fdcb.err + } specs := make([]*sqlgraph.CreateSpec, len(fdcb.builders)) nodes := make([]*FileDelete, len(fdcb.builders)) mutators := make([]Mutator, len(fdcb.builders)) @@ -267,8 +219,8 @@ func (fdcb *FileDeleteCreateBulk) Save(ctx context.Context) ([]*FileDelete, erro return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, fdcb.builders[i+1].mutation) } else { @@ -276,7 +228,7 @@ func (fdcb *FileDeleteCreateBulk) Save(ctx context.Context) ([]*FileDelete, erro // Invoke the actual operation on the latest mutation in the chain. if err = sqlgraph.BatchCreate(ctx, fdcb.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } } } diff --git a/ent/filedelete_delete.go b/ent/filedelete_delete.go index 42bc53b3..2e3397d5 100755 --- a/ent/filedelete_delete.go +++ b/ent/filedelete_delete.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (fdd *FileDeleteDelete) Where(ps ...predicate.FileDelete) *FileDeleteDelete // Exec executes the deletion query and returns how many vertices were deleted. func (fdd *FileDeleteDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(fdd.hooks) == 0 { - affected, err = fdd.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*FileDeleteMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - fdd.mutation = mutation - affected, err = fdd.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(fdd.hooks) - 1; i >= 0; i-- { - if fdd.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = fdd.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, fdd.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, fdd.sqlExec, fdd.mutation, fdd.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (fdd *FileDeleteDelete) ExecX(ctx context.Context) int { } func (fdd *FileDeleteDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: filedelete.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: filedelete.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(filedelete.Table, sqlgraph.NewFieldSpec(filedelete.FieldID, field.TypeUUID)) if ps := fdd.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -84,7 +48,12 @@ func (fdd *FileDeleteDelete) sqlExec(ctx context.Context) (int, error) { } } } - return sqlgraph.DeleteNodes(ctx, fdd.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, fdd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + fdd.mutation.done = true + return affected, err } // FileDeleteDeleteOne is the builder for deleting a single FileDelete entity. @@ -92,6 +61,12 @@ type FileDeleteDeleteOne struct { fdd *FileDeleteDelete } +// Where appends a list predicates to the FileDeleteDelete builder. +func (fddo *FileDeleteDeleteOne) Where(ps ...predicate.FileDelete) *FileDeleteDeleteOne { + fddo.fdd.mutation.Where(ps...) + return fddo +} + // Exec executes the deletion query. func (fddo *FileDeleteDeleteOne) Exec(ctx context.Context) error { n, err := fddo.fdd.Exec(ctx) @@ -107,5 +82,7 @@ func (fddo *FileDeleteDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (fddo *FileDeleteDeleteOne) ExecX(ctx context.Context) { - fddo.fdd.ExecX(ctx) + if err := fddo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/ent/filedelete_query.go b/ent/filedelete_query.go index 4b67827c..55d4d062 100755 --- a/ent/filedelete_query.go +++ b/ent/filedelete_query.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "errors" "fmt" "math" @@ -20,15 +19,14 @@ import ( // FileDeleteQuery is the builder for querying FileDelete entities. type FileDeleteQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string - predicates []predicate.FileDelete - // eager-loading edges. + ctx *QueryContext + order []filedelete.OrderOption + inters []Interceptor + predicates []predicate.FileDelete withFileDeleteToEnvironment *EnvironmentQuery withFKs bool + modifiers []func(*sql.Selector) + loadTotal []func(context.Context, []*FileDelete) error // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -40,34 +38,34 @@ func (fdq *FileDeleteQuery) Where(ps ...predicate.FileDelete) *FileDeleteQuery { return fdq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (fdq *FileDeleteQuery) Limit(limit int) *FileDeleteQuery { - fdq.limit = &limit + fdq.ctx.Limit = &limit return fdq } -// Offset adds an offset step to the query. +// Offset to start from. func (fdq *FileDeleteQuery) Offset(offset int) *FileDeleteQuery { - fdq.offset = &offset + fdq.ctx.Offset = &offset return fdq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (fdq *FileDeleteQuery) Unique(unique bool) *FileDeleteQuery { - fdq.unique = &unique + fdq.ctx.Unique = &unique return fdq } -// Order adds an order step to the query. -func (fdq *FileDeleteQuery) Order(o ...OrderFunc) *FileDeleteQuery { +// Order specifies how the records should be ordered. +func (fdq *FileDeleteQuery) Order(o ...filedelete.OrderOption) *FileDeleteQuery { fdq.order = append(fdq.order, o...) return fdq } // QueryFileDeleteToEnvironment chains the current query on the "FileDeleteToEnvironment" edge. func (fdq *FileDeleteQuery) QueryFileDeleteToEnvironment() *EnvironmentQuery { - query := &EnvironmentQuery{config: fdq.config} + query := (&EnvironmentClient{config: fdq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := fdq.prepareQuery(ctx); err != nil { return nil, err @@ -90,7 +88,7 @@ func (fdq *FileDeleteQuery) QueryFileDeleteToEnvironment() *EnvironmentQuery { // First returns the first FileDelete entity from the query. // Returns a *NotFoundError when no FileDelete was found. func (fdq *FileDeleteQuery) First(ctx context.Context) (*FileDelete, error) { - nodes, err := fdq.Limit(1).All(ctx) + nodes, err := fdq.Limit(1).All(setContextOp(ctx, fdq.ctx, "First")) if err != nil { return nil, err } @@ -113,7 +111,7 @@ func (fdq *FileDeleteQuery) FirstX(ctx context.Context) *FileDelete { // Returns a *NotFoundError when no FileDelete ID was found. func (fdq *FileDeleteQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = fdq.Limit(1).IDs(ctx); err != nil { + if ids, err = fdq.Limit(1).IDs(setContextOp(ctx, fdq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -136,7 +134,7 @@ func (fdq *FileDeleteQuery) FirstIDX(ctx context.Context) uuid.UUID { // Returns a *NotSingularError when more than one FileDelete entity is found. // Returns a *NotFoundError when no FileDelete entities are found. func (fdq *FileDeleteQuery) Only(ctx context.Context) (*FileDelete, error) { - nodes, err := fdq.Limit(2).All(ctx) + nodes, err := fdq.Limit(2).All(setContextOp(ctx, fdq.ctx, "Only")) if err != nil { return nil, err } @@ -164,7 +162,7 @@ func (fdq *FileDeleteQuery) OnlyX(ctx context.Context) *FileDelete { // Returns a *NotFoundError when no entities are found. func (fdq *FileDeleteQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = fdq.Limit(2).IDs(ctx); err != nil { + if ids, err = fdq.Limit(2).IDs(setContextOp(ctx, fdq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -189,10 +187,12 @@ func (fdq *FileDeleteQuery) OnlyIDX(ctx context.Context) uuid.UUID { // All executes the query and returns a list of FileDeletes. func (fdq *FileDeleteQuery) All(ctx context.Context) ([]*FileDelete, error) { + ctx = setContextOp(ctx, fdq.ctx, "All") if err := fdq.prepareQuery(ctx); err != nil { return nil, err } - return fdq.sqlAll(ctx) + qr := querierAll[[]*FileDelete, *FileDeleteQuery]() + return withInterceptors[[]*FileDelete](ctx, fdq, qr, fdq.inters) } // AllX is like All, but panics if an error occurs. @@ -205,9 +205,12 @@ func (fdq *FileDeleteQuery) AllX(ctx context.Context) []*FileDelete { } // IDs executes the query and returns a list of FileDelete IDs. -func (fdq *FileDeleteQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { - var ids []uuid.UUID - if err := fdq.Select(filedelete.FieldID).Scan(ctx, &ids); err != nil { +func (fdq *FileDeleteQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if fdq.ctx.Unique == nil && fdq.path != nil { + fdq.Unique(true) + } + ctx = setContextOp(ctx, fdq.ctx, "IDs") + if err = fdq.Select(filedelete.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -224,10 +227,11 @@ func (fdq *FileDeleteQuery) IDsX(ctx context.Context) []uuid.UUID { // Count returns the count of the given query. func (fdq *FileDeleteQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, fdq.ctx, "Count") if err := fdq.prepareQuery(ctx); err != nil { return 0, err } - return fdq.sqlCount(ctx) + return withInterceptors[int](ctx, fdq, querierCount[*FileDeleteQuery](), fdq.inters) } // CountX is like Count, but panics if an error occurs. @@ -241,10 +245,15 @@ func (fdq *FileDeleteQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (fdq *FileDeleteQuery) Exist(ctx context.Context) (bool, error) { - if err := fdq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, fdq.ctx, "Exist") + switch _, err := fdq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return fdq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -264,22 +273,21 @@ func (fdq *FileDeleteQuery) Clone() *FileDeleteQuery { } return &FileDeleteQuery{ config: fdq.config, - limit: fdq.limit, - offset: fdq.offset, - order: append([]OrderFunc{}, fdq.order...), + ctx: fdq.ctx.Clone(), + order: append([]filedelete.OrderOption{}, fdq.order...), + inters: append([]Interceptor{}, fdq.inters...), predicates: append([]predicate.FileDelete{}, fdq.predicates...), withFileDeleteToEnvironment: fdq.withFileDeleteToEnvironment.Clone(), // clone intermediate query. - sql: fdq.sql.Clone(), - path: fdq.path, - unique: fdq.unique, + sql: fdq.sql.Clone(), + path: fdq.path, } } // WithFileDeleteToEnvironment tells the query-builder to eager-load the nodes that are connected to // the "FileDeleteToEnvironment" edge. The optional arguments are used to configure the query builder of the edge. func (fdq *FileDeleteQuery) WithFileDeleteToEnvironment(opts ...func(*EnvironmentQuery)) *FileDeleteQuery { - query := &EnvironmentQuery{config: fdq.config} + query := (&EnvironmentClient{config: fdq.config}).Query() for _, opt := range opts { opt(query) } @@ -293,25 +301,21 @@ func (fdq *FileDeleteQuery) WithFileDeleteToEnvironment(opts ...func(*Environmen // Example: // // var v []struct { -// HclID string `json:"hcl_id,omitempty" hcl:"id,label"` +// HCLID string `json:"hcl_id,omitempty" hcl:"id,label"` // Count int `json:"count,omitempty"` // } // // client.FileDelete.Query(). -// GroupBy(filedelete.FieldHclID). +// GroupBy(filedelete.FieldHCLID). // Aggregate(ent.Count()). // Scan(ctx, &v) -// func (fdq *FileDeleteQuery) GroupBy(field string, fields ...string) *FileDeleteGroupBy { - group := &FileDeleteGroupBy{config: fdq.config} - group.fields = append([]string{field}, fields...) - group.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := fdq.prepareQuery(ctx); err != nil { - return nil, err - } - return fdq.sqlQuery(ctx), nil - } - return group + fdq.ctx.Fields = append([]string{field}, fields...) + grbuild := &FileDeleteGroupBy{build: fdq} + grbuild.flds = &fdq.ctx.Fields + grbuild.label = filedelete.Label + grbuild.scan = grbuild.Scan + return grbuild } // Select allows the selection one or more fields/columns for the given query, @@ -320,20 +324,37 @@ func (fdq *FileDeleteQuery) GroupBy(field string, fields ...string) *FileDeleteG // Example: // // var v []struct { -// HclID string `json:"hcl_id,omitempty" hcl:"id,label"` +// HCLID string `json:"hcl_id,omitempty" hcl:"id,label"` // } // // client.FileDelete.Query(). -// Select(filedelete.FieldHclID). +// Select(filedelete.FieldHCLID). // Scan(ctx, &v) -// func (fdq *FileDeleteQuery) Select(fields ...string) *FileDeleteSelect { - fdq.fields = append(fdq.fields, fields...) - return &FileDeleteSelect{FileDeleteQuery: fdq} + fdq.ctx.Fields = append(fdq.ctx.Fields, fields...) + sbuild := &FileDeleteSelect{FileDeleteQuery: fdq} + sbuild.label = filedelete.Label + sbuild.flds, sbuild.scan = &fdq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a FileDeleteSelect configured with the given aggregations. +func (fdq *FileDeleteQuery) Aggregate(fns ...AggregateFunc) *FileDeleteSelect { + return fdq.Select().Aggregate(fns...) } func (fdq *FileDeleteQuery) prepareQuery(ctx context.Context) error { - for _, f := range fdq.fields { + for _, inter := range fdq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, fdq); err != nil { + return err + } + } + } + for _, f := range fdq.ctx.Fields { if !filedelete.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -348,7 +369,7 @@ func (fdq *FileDeleteQuery) prepareQuery(ctx context.Context) error { return nil } -func (fdq *FileDeleteQuery) sqlAll(ctx context.Context) ([]*FileDelete, error) { +func (fdq *FileDeleteQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*FileDelete, error) { var ( nodes = []*FileDelete{} withFKs = fdq.withFKs @@ -363,92 +384,95 @@ func (fdq *FileDeleteQuery) sqlAll(ctx context.Context) ([]*FileDelete, error) { if withFKs { _spec.Node.Columns = append(_spec.Node.Columns, filedelete.ForeignKeys...) } - _spec.ScanValues = func(columns []string) ([]interface{}, error) { + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*FileDelete).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { node := &FileDelete{config: fdq.config} nodes = append(nodes, node) - return node.scanValues(columns) - } - _spec.Assign = func(columns []string, values []interface{}) error { - if len(nodes) == 0 { - return fmt.Errorf("ent: Assign called without calling ScanValues") - } - node := nodes[len(nodes)-1] node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(fdq.modifiers) > 0 { + _spec.Modifiers = fdq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } if err := sqlgraph.QueryNodes(ctx, fdq.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := fdq.withFileDeleteToEnvironment; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*FileDelete) - for i := range nodes { - if nodes[i].environment_environment_to_file_delete == nil { - continue - } - fk := *nodes[i].environment_environment_to_file_delete - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) - } - query.Where(environment.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + if err := fdq.loadFileDeleteToEnvironment(ctx, query, nodes, nil, + func(n *FileDelete, e *Environment) { n.Edges.FileDeleteToEnvironment = e }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "environment_environment_to_file_delete" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.FileDeleteToEnvironment = n - } + } + for i := range fdq.loadTotal { + if err := fdq.loadTotal[i](ctx, nodes); err != nil { + return nil, err } } - return nodes, nil } -func (fdq *FileDeleteQuery) sqlCount(ctx context.Context) (int, error) { - _spec := fdq.querySpec() - _spec.Node.Columns = fdq.fields - if len(fdq.fields) > 0 { - _spec.Unique = fdq.unique != nil && *fdq.unique +func (fdq *FileDeleteQuery) loadFileDeleteToEnvironment(ctx context.Context, query *EnvironmentQuery, nodes []*FileDelete, init func(*FileDelete), assign func(*FileDelete, *Environment)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*FileDelete) + for i := range nodes { + if nodes[i].environment_environment_to_file_delete == nil { + continue + } + fk := *nodes[i].environment_environment_to_file_delete + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) } - return sqlgraph.CountNodes(ctx, fdq.driver, _spec) + if len(ids) == 0 { + return nil + } + query.Where(environment.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "environment_environment_to_file_delete" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil } -func (fdq *FileDeleteQuery) sqlExist(ctx context.Context) (bool, error) { - n, err := fdq.sqlCount(ctx) - if err != nil { - return false, fmt.Errorf("ent: check existence: %w", err) +func (fdq *FileDeleteQuery) sqlCount(ctx context.Context) (int, error) { + _spec := fdq.querySpec() + if len(fdq.modifiers) > 0 { + _spec.Modifiers = fdq.modifiers } - return n > 0, nil + _spec.Node.Columns = fdq.ctx.Fields + if len(fdq.ctx.Fields) > 0 { + _spec.Unique = fdq.ctx.Unique != nil && *fdq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, fdq.driver, _spec) } func (fdq *FileDeleteQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: filedelete.Table, - Columns: filedelete.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: filedelete.FieldID, - }, - }, - From: fdq.sql, - Unique: true, - } - if unique := fdq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(filedelete.Table, filedelete.Columns, sqlgraph.NewFieldSpec(filedelete.FieldID, field.TypeUUID)) + _spec.From = fdq.sql + if unique := fdq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if fdq.path != nil { + _spec.Unique = true } - if fields := fdq.fields; len(fields) > 0 { + if fields := fdq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, filedelete.FieldID) for i := range fields { @@ -464,10 +488,10 @@ func (fdq *FileDeleteQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := fdq.limit; limit != nil { + if limit := fdq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := fdq.offset; offset != nil { + if offset := fdq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := fdq.order; len(ps) > 0 { @@ -483,7 +507,7 @@ func (fdq *FileDeleteQuery) querySpec() *sqlgraph.QuerySpec { func (fdq *FileDeleteQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(fdq.driver.Dialect()) t1 := builder.Table(filedelete.Table) - columns := fdq.fields + columns := fdq.ctx.Fields if len(columns) == 0 { columns = filedelete.Columns } @@ -492,7 +516,7 @@ func (fdq *FileDeleteQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = fdq.sql selector.Select(selector.Columns(columns...)...) } - if fdq.unique != nil && *fdq.unique { + if fdq.ctx.Unique != nil && *fdq.ctx.Unique { selector.Distinct() } for _, p := range fdq.predicates { @@ -501,12 +525,12 @@ func (fdq *FileDeleteQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range fdq.order { p(selector) } - if offset := fdq.offset; offset != nil { + if offset := fdq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := fdq.limit; limit != nil { + if limit := fdq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -514,12 +538,8 @@ func (fdq *FileDeleteQuery) sqlQuery(ctx context.Context) *sql.Selector { // FileDeleteGroupBy is the group-by builder for FileDelete entities. type FileDeleteGroupBy struct { - config - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) + selector + build *FileDeleteQuery } // Aggregate adds the given aggregation functions to the group-by query. @@ -528,471 +548,77 @@ func (fdgb *FileDeleteGroupBy) Aggregate(fns ...AggregateFunc) *FileDeleteGroupB return fdgb } -// Scan applies the group-by query and scans the result into the given value. -func (fdgb *FileDeleteGroupBy) Scan(ctx context.Context, v interface{}) error { - query, err := fdgb.path(ctx) - if err != nil { +// Scan applies the selector query and scans the result into the given value. +func (fdgb *FileDeleteGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, fdgb.build.ctx, "GroupBy") + if err := fdgb.build.prepareQuery(ctx); err != nil { return err } - fdgb.sql = query - return fdgb.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (fdgb *FileDeleteGroupBy) ScanX(ctx context.Context, v interface{}) { - if err := fdgb.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from group-by. -// It is only allowed when executing a group-by query with one field. -func (fdgb *FileDeleteGroupBy) Strings(ctx context.Context) ([]string, error) { - if len(fdgb.fields) > 1 { - return nil, errors.New("ent: FileDeleteGroupBy.Strings is not achievable when grouping more than 1 field") - } - var v []string - if err := fdgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (fdgb *FileDeleteGroupBy) StringsX(ctx context.Context) []string { - v, err := fdgb.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (fdgb *FileDeleteGroupBy) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = fdgb.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{filedelete.Label} - default: - err = fmt.Errorf("ent: FileDeleteGroupBy.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (fdgb *FileDeleteGroupBy) StringX(ctx context.Context) string { - v, err := fdgb.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from group-by. -// It is only allowed when executing a group-by query with one field. -func (fdgb *FileDeleteGroupBy) Ints(ctx context.Context) ([]int, error) { - if len(fdgb.fields) > 1 { - return nil, errors.New("ent: FileDeleteGroupBy.Ints is not achievable when grouping more than 1 field") - } - var v []int - if err := fdgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (fdgb *FileDeleteGroupBy) IntsX(ctx context.Context) []int { - v, err := fdgb.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (fdgb *FileDeleteGroupBy) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = fdgb.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{filedelete.Label} - default: - err = fmt.Errorf("ent: FileDeleteGroupBy.Ints returned %d results when one was expected", len(v)) - } - return + return scanWithInterceptors[*FileDeleteQuery, *FileDeleteGroupBy](ctx, fdgb.build, fdgb, fdgb.build.inters, v) } -// IntX is like Int, but panics if an error occurs. -func (fdgb *FileDeleteGroupBy) IntX(ctx context.Context) int { - v, err := fdgb.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from group-by. -// It is only allowed when executing a group-by query with one field. -func (fdgb *FileDeleteGroupBy) Float64s(ctx context.Context) ([]float64, error) { - if len(fdgb.fields) > 1 { - return nil, errors.New("ent: FileDeleteGroupBy.Float64s is not achievable when grouping more than 1 field") - } - var v []float64 - if err := fdgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (fdgb *FileDeleteGroupBy) Float64sX(ctx context.Context) []float64 { - v, err := fdgb.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (fdgb *FileDeleteGroupBy) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = fdgb.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{filedelete.Label} - default: - err = fmt.Errorf("ent: FileDeleteGroupBy.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (fdgb *FileDeleteGroupBy) Float64X(ctx context.Context) float64 { - v, err := fdgb.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from group-by. -// It is only allowed when executing a group-by query with one field. -func (fdgb *FileDeleteGroupBy) Bools(ctx context.Context) ([]bool, error) { - if len(fdgb.fields) > 1 { - return nil, errors.New("ent: FileDeleteGroupBy.Bools is not achievable when grouping more than 1 field") - } - var v []bool - if err := fdgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (fdgb *FileDeleteGroupBy) BoolsX(ctx context.Context) []bool { - v, err := fdgb.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (fdgb *FileDeleteGroupBy) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = fdgb.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{filedelete.Label} - default: - err = fmt.Errorf("ent: FileDeleteGroupBy.Bools returned %d results when one was expected", len(v)) - } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (fdgb *FileDeleteGroupBy) BoolX(ctx context.Context) bool { - v, err := fdgb.Bool(ctx) - if err != nil { - panic(err) +func (fdgb *FileDeleteGroupBy) sqlScan(ctx context.Context, root *FileDeleteQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(fdgb.fns)) + for _, fn := range fdgb.fns { + aggregation = append(aggregation, fn(selector)) } - return v -} - -func (fdgb *FileDeleteGroupBy) sqlScan(ctx context.Context, v interface{}) error { - for _, f := range fdgb.fields { - if !filedelete.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*fdgb.flds)+len(fdgb.fns)) + for _, f := range *fdgb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := fdgb.sqlQuery() + selector.GroupBy(selector.Columns(*fdgb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := fdgb.driver.Query(ctx, query, args, rows); err != nil { + if err := fdgb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (fdgb *FileDeleteGroupBy) sqlQuery() *sql.Selector { - selector := fdgb.sql.Select() - aggregation := make([]string, 0, len(fdgb.fns)) - for _, fn := range fdgb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(fdgb.fields)+len(fdgb.fns)) - for _, f := range fdgb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(fdgb.fields...)...) -} - // FileDeleteSelect is the builder for selecting fields of FileDelete entities. type FileDeleteSelect struct { *FileDeleteQuery - // intermediate query (i.e. traversal path). - sql *sql.Selector + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (fds *FileDeleteSelect) Aggregate(fns ...AggregateFunc) *FileDeleteSelect { + fds.fns = append(fds.fns, fns...) + return fds } // Scan applies the selector query and scans the result into the given value. -func (fds *FileDeleteSelect) Scan(ctx context.Context, v interface{}) error { +func (fds *FileDeleteSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, fds.ctx, "Select") if err := fds.prepareQuery(ctx); err != nil { return err } - fds.sql = fds.FileDeleteQuery.sqlQuery(ctx) - return fds.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (fds *FileDeleteSelect) ScanX(ctx context.Context, v interface{}) { - if err := fds.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from a selector. It is only allowed when selecting one field. -func (fds *FileDeleteSelect) Strings(ctx context.Context) ([]string, error) { - if len(fds.fields) > 1 { - return nil, errors.New("ent: FileDeleteSelect.Strings is not achievable when selecting more than 1 field") - } - var v []string - if err := fds.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (fds *FileDeleteSelect) StringsX(ctx context.Context) []string { - v, err := fds.Strings(ctx) - if err != nil { - panic(err) - } - return v + return scanWithInterceptors[*FileDeleteQuery, *FileDeleteSelect](ctx, fds.FileDeleteQuery, fds, fds.inters, v) } -// String returns a single string from a selector. It is only allowed when selecting one field. -func (fds *FileDeleteSelect) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = fds.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{filedelete.Label} - default: - err = fmt.Errorf("ent: FileDeleteSelect.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (fds *FileDeleteSelect) StringX(ctx context.Context) string { - v, err := fds.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from a selector. It is only allowed when selecting one field. -func (fds *FileDeleteSelect) Ints(ctx context.Context) ([]int, error) { - if len(fds.fields) > 1 { - return nil, errors.New("ent: FileDeleteSelect.Ints is not achievable when selecting more than 1 field") - } - var v []int - if err := fds.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (fds *FileDeleteSelect) IntsX(ctx context.Context) []int { - v, err := fds.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a selector. It is only allowed when selecting one field. -func (fds *FileDeleteSelect) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = fds.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{filedelete.Label} - default: - err = fmt.Errorf("ent: FileDeleteSelect.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (fds *FileDeleteSelect) IntX(ctx context.Context) int { - v, err := fds.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. -func (fds *FileDeleteSelect) Float64s(ctx context.Context) ([]float64, error) { - if len(fds.fields) > 1 { - return nil, errors.New("ent: FileDeleteSelect.Float64s is not achievable when selecting more than 1 field") - } - var v []float64 - if err := fds.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (fds *FileDeleteSelect) Float64sX(ctx context.Context) []float64 { - v, err := fds.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. -func (fds *FileDeleteSelect) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = fds.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{filedelete.Label} - default: - err = fmt.Errorf("ent: FileDeleteSelect.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (fds *FileDeleteSelect) Float64X(ctx context.Context) float64 { - v, err := fds.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from a selector. It is only allowed when selecting one field. -func (fds *FileDeleteSelect) Bools(ctx context.Context) ([]bool, error) { - if len(fds.fields) > 1 { - return nil, errors.New("ent: FileDeleteSelect.Bools is not achievable when selecting more than 1 field") - } - var v []bool - if err := fds.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (fds *FileDeleteSelect) BoolsX(ctx context.Context) []bool { - v, err := fds.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a selector. It is only allowed when selecting one field. -func (fds *FileDeleteSelect) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = fds.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{filedelete.Label} - default: - err = fmt.Errorf("ent: FileDeleteSelect.Bools returned %d results when one was expected", len(v)) +func (fds *FileDeleteSelect) sqlScan(ctx context.Context, root *FileDeleteQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(fds.fns)) + for _, fn := range fds.fns { + aggregation = append(aggregation, fn(selector)) } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (fds *FileDeleteSelect) BoolX(ctx context.Context) bool { - v, err := fds.Bool(ctx) - if err != nil { - panic(err) + switch n := len(*fds.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) } - return v -} - -func (fds *FileDeleteSelect) sqlScan(ctx context.Context, v interface{}) error { rows := &sql.Rows{} - query, args := fds.sql.Query() + query, args := selector.Query() if err := fds.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/ent/filedelete_update.go b/ent/filedelete_update.go index 3cc8537f..2f28a10d 100755 --- a/ent/filedelete_update.go +++ b/ent/filedelete_update.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -29,9 +29,17 @@ func (fdu *FileDeleteUpdate) Where(ps ...predicate.FileDelete) *FileDeleteUpdate return fdu } -// SetHclID sets the "hcl_id" field. -func (fdu *FileDeleteUpdate) SetHclID(s string) *FileDeleteUpdate { - fdu.mutation.SetHclID(s) +// SetHCLID sets the "hcl_id" field. +func (fdu *FileDeleteUpdate) SetHCLID(s string) *FileDeleteUpdate { + fdu.mutation.SetHCLID(s) + return fdu +} + +// SetNillableHCLID sets the "hcl_id" field if the given value is not nil. +func (fdu *FileDeleteUpdate) SetNillableHCLID(s *string) *FileDeleteUpdate { + if s != nil { + fdu.SetHCLID(*s) + } return fdu } @@ -41,6 +49,14 @@ func (fdu *FileDeleteUpdate) SetPath(s string) *FileDeleteUpdate { return fdu } +// SetNillablePath sets the "path" field if the given value is not nil. +func (fdu *FileDeleteUpdate) SetNillablePath(s *string) *FileDeleteUpdate { + if s != nil { + fdu.SetPath(*s) + } + return fdu +} + // SetTags sets the "tags" field. func (fdu *FileDeleteUpdate) SetTags(m map[string]string) *FileDeleteUpdate { fdu.mutation.SetTags(m) @@ -79,34 +95,7 @@ func (fdu *FileDeleteUpdate) ClearFileDeleteToEnvironment() *FileDeleteUpdate { // Save executes the query and returns the number of nodes affected by the update operation. func (fdu *FileDeleteUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(fdu.hooks) == 0 { - affected, err = fdu.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*FileDeleteMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - fdu.mutation = mutation - affected, err = fdu.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(fdu.hooks) - 1; i >= 0; i-- { - if fdu.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = fdu.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, fdu.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, fdu.sqlSave, fdu.mutation, fdu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -132,16 +121,7 @@ func (fdu *FileDeleteUpdate) ExecX(ctx context.Context) { } func (fdu *FileDeleteUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: filedelete.Table, - Columns: filedelete.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: filedelete.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(filedelete.Table, filedelete.Columns, sqlgraph.NewFieldSpec(filedelete.FieldID, field.TypeUUID)) if ps := fdu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -149,26 +129,14 @@ func (fdu *FileDeleteUpdate) sqlSave(ctx context.Context) (n int, err error) { } } } - if value, ok := fdu.mutation.HclID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: filedelete.FieldHclID, - }) + if value, ok := fdu.mutation.HCLID(); ok { + _spec.SetField(filedelete.FieldHCLID, field.TypeString, value) } if value, ok := fdu.mutation.Path(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: filedelete.FieldPath, - }) + _spec.SetField(filedelete.FieldPath, field.TypeString, value) } if value, ok := fdu.mutation.Tags(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: filedelete.FieldTags, - }) + _spec.SetField(filedelete.FieldTags, field.TypeJSON, value) } if fdu.mutation.FileDeleteToEnvironmentCleared() { edge := &sqlgraph.EdgeSpec{ @@ -178,10 +146,7 @@ func (fdu *FileDeleteUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{filedelete.FileDeleteToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -194,10 +159,7 @@ func (fdu *FileDeleteUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{filedelete.FileDeleteToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -209,10 +171,11 @@ func (fdu *FileDeleteUpdate) sqlSave(ctx context.Context) (n int, err error) { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{filedelete.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return 0, err } + fdu.mutation.done = true return n, nil } @@ -224,9 +187,17 @@ type FileDeleteUpdateOne struct { mutation *FileDeleteMutation } -// SetHclID sets the "hcl_id" field. -func (fduo *FileDeleteUpdateOne) SetHclID(s string) *FileDeleteUpdateOne { - fduo.mutation.SetHclID(s) +// SetHCLID sets the "hcl_id" field. +func (fduo *FileDeleteUpdateOne) SetHCLID(s string) *FileDeleteUpdateOne { + fduo.mutation.SetHCLID(s) + return fduo +} + +// SetNillableHCLID sets the "hcl_id" field if the given value is not nil. +func (fduo *FileDeleteUpdateOne) SetNillableHCLID(s *string) *FileDeleteUpdateOne { + if s != nil { + fduo.SetHCLID(*s) + } return fduo } @@ -236,6 +207,14 @@ func (fduo *FileDeleteUpdateOne) SetPath(s string) *FileDeleteUpdateOne { return fduo } +// SetNillablePath sets the "path" field if the given value is not nil. +func (fduo *FileDeleteUpdateOne) SetNillablePath(s *string) *FileDeleteUpdateOne { + if s != nil { + fduo.SetPath(*s) + } + return fduo +} + // SetTags sets the "tags" field. func (fduo *FileDeleteUpdateOne) SetTags(m map[string]string) *FileDeleteUpdateOne { fduo.mutation.SetTags(m) @@ -272,6 +251,12 @@ func (fduo *FileDeleteUpdateOne) ClearFileDeleteToEnvironment() *FileDeleteUpdat return fduo } +// Where appends a list predicates to the FileDeleteUpdate builder. +func (fduo *FileDeleteUpdateOne) Where(ps ...predicate.FileDelete) *FileDeleteUpdateOne { + fduo.mutation.Where(ps...) + return fduo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (fduo *FileDeleteUpdateOne) Select(field string, fields ...string) *FileDeleteUpdateOne { @@ -281,34 +266,7 @@ func (fduo *FileDeleteUpdateOne) Select(field string, fields ...string) *FileDel // Save executes the query and returns the updated FileDelete entity. func (fduo *FileDeleteUpdateOne) Save(ctx context.Context) (*FileDelete, error) { - var ( - err error - node *FileDelete - ) - if len(fduo.hooks) == 0 { - node, err = fduo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*FileDeleteMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - fduo.mutation = mutation - node, err = fduo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(fduo.hooks) - 1; i >= 0; i-- { - if fduo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = fduo.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, fduo.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, fduo.sqlSave, fduo.mutation, fduo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -334,16 +292,7 @@ func (fduo *FileDeleteUpdateOne) ExecX(ctx context.Context) { } func (fduo *FileDeleteUpdateOne) sqlSave(ctx context.Context) (_node *FileDelete, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: filedelete.Table, - Columns: filedelete.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: filedelete.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(filedelete.Table, filedelete.Columns, sqlgraph.NewFieldSpec(filedelete.FieldID, field.TypeUUID)) id, ok := fduo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "FileDelete.id" for update`)} @@ -368,26 +317,14 @@ func (fduo *FileDeleteUpdateOne) sqlSave(ctx context.Context) (_node *FileDelete } } } - if value, ok := fduo.mutation.HclID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: filedelete.FieldHclID, - }) + if value, ok := fduo.mutation.HCLID(); ok { + _spec.SetField(filedelete.FieldHCLID, field.TypeString, value) } if value, ok := fduo.mutation.Path(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: filedelete.FieldPath, - }) + _spec.SetField(filedelete.FieldPath, field.TypeString, value) } if value, ok := fduo.mutation.Tags(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: filedelete.FieldTags, - }) + _spec.SetField(filedelete.FieldTags, field.TypeJSON, value) } if fduo.mutation.FileDeleteToEnvironmentCleared() { edge := &sqlgraph.EdgeSpec{ @@ -397,10 +334,7 @@ func (fduo *FileDeleteUpdateOne) sqlSave(ctx context.Context) (_node *FileDelete Columns: []string{filedelete.FileDeleteToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -413,10 +347,7 @@ func (fduo *FileDeleteUpdateOne) sqlSave(ctx context.Context) (_node *FileDelete Columns: []string{filedelete.FileDeleteToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -431,9 +362,10 @@ func (fduo *FileDeleteUpdateOne) sqlSave(ctx context.Context) (_node *FileDelete if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{filedelete.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } + fduo.mutation.done = true return _node, nil } diff --git a/ent/filedownload.go b/ent/filedownload.go index de435d23..9d123f5d 100755 --- a/ent/filedownload.go +++ b/ent/filedownload.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -7,6 +7,7 @@ import ( "fmt" "strings" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/gen0cide/laforge/ent/environment" "github.com/gen0cide/laforge/ent/filedownload" @@ -18,8 +19,8 @@ type FileDownload struct { config ` json:"-"` // ID of the ent. ID uuid.UUID `json:"id,omitempty"` - // HclID holds the value of the "hcl_id" field. - HclID string `json:"hcl_id,omitempty" hcl:"id,label"` + // HCLID holds the value of the "hcl_id" field. + HCLID string `json:"hcl_id,omitempty" hcl:"id,label"` // SourceType holds the value of the "source_type" field. SourceType string `json:"source_type,omitempty" hcl:"source_type,attr"` // Source holds the value of the "source" field. @@ -44,11 +45,13 @@ type FileDownload struct { // The values are being populated by the FileDownloadQuery when eager-loading is set. Edges FileDownloadEdges `json:"edges"` + // vvvvvvvvvvvv CUSTOM vvvvvvvvvvvv // Edges put into the main struct to be loaded via hcl // FileDownloadToEnvironment holds the value of the FileDownloadToEnvironment edge. HCLFileDownloadToEnvironment *Environment `json:"FileDownloadToEnvironment,omitempty"` - // + // ^^^^^^^^^^^^ CUSTOM ^^^^^^^^^^^^^ environment_environment_to_file_download *uuid.UUID + selectValues sql.SelectValues } // FileDownloadEdges holds the relations/edges for other nodes in the graph. @@ -58,6 +61,8 @@ type FileDownloadEdges struct { // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. loadedTypes [1]bool + // totalCount holds the count of the edges above. + totalCount [1]map[string]int } // FileDownloadToEnvironmentOrErr returns the FileDownloadToEnvironment value or an error if the edge @@ -65,8 +70,7 @@ type FileDownloadEdges struct { func (e FileDownloadEdges) FileDownloadToEnvironmentOrErr() (*Environment, error) { if e.loadedTypes[0] { if e.FileDownloadToEnvironment == nil { - // The edge FileDownloadToEnvironment was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: environment.Label} } return e.FileDownloadToEnvironment, nil @@ -75,22 +79,22 @@ func (e FileDownloadEdges) FileDownloadToEnvironmentOrErr() (*Environment, error } // scanValues returns the types for scanning values from sql.Rows. -func (*FileDownload) scanValues(columns []string) ([]interface{}, error) { - values := make([]interface{}, len(columns)) +func (*FileDownload) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) for i := range columns { switch columns[i] { case filedownload.FieldTags: values[i] = new([]byte) case filedownload.FieldTemplate, filedownload.FieldDisabled, filedownload.FieldIsTxt: values[i] = new(sql.NullBool) - case filedownload.FieldHclID, filedownload.FieldSourceType, filedownload.FieldSource, filedownload.FieldDestination, filedownload.FieldPerms, filedownload.FieldMd5, filedownload.FieldAbsPath: + case filedownload.FieldHCLID, filedownload.FieldSourceType, filedownload.FieldSource, filedownload.FieldDestination, filedownload.FieldPerms, filedownload.FieldMd5, filedownload.FieldAbsPath: values[i] = new(sql.NullString) case filedownload.FieldID: values[i] = new(uuid.UUID) case filedownload.ForeignKeys[0]: // environment_environment_to_file_download values[i] = &sql.NullScanner{S: new(uuid.UUID)} default: - return nil, fmt.Errorf("unexpected column %q for type FileDownload", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -98,7 +102,7 @@ func (*FileDownload) scanValues(columns []string) ([]interface{}, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the FileDownload fields. -func (fd *FileDownload) assignValues(columns []string, values []interface{}) error { +func (fd *FileDownload) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -110,11 +114,11 @@ func (fd *FileDownload) assignValues(columns []string, values []interface{}) err } else if value != nil { fd.ID = *value } - case filedownload.FieldHclID: + case filedownload.FieldHCLID: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field hcl_id", values[i]) } else if value.Valid { - fd.HclID = value.String + fd.HCLID = value.String } case filedownload.FieldSourceType: if value, ok := values[i].(*sql.NullString); !ok { @@ -185,31 +189,39 @@ func (fd *FileDownload) assignValues(columns []string, values []interface{}) err fd.environment_environment_to_file_download = new(uuid.UUID) *fd.environment_environment_to_file_download = *value.S.(*uuid.UUID) } + default: + fd.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the FileDownload. +// This includes values selected through modifiers, order, etc. +func (fd *FileDownload) Value(name string) (ent.Value, error) { + return fd.selectValues.Get(name) +} + // QueryFileDownloadToEnvironment queries the "FileDownloadToEnvironment" edge of the FileDownload entity. func (fd *FileDownload) QueryFileDownloadToEnvironment() *EnvironmentQuery { - return (&FileDownloadClient{config: fd.config}).QueryFileDownloadToEnvironment(fd) + return NewFileDownloadClient(fd.config).QueryFileDownloadToEnvironment(fd) } // Update returns a builder for updating this FileDownload. // Note that you need to call FileDownload.Unwrap() before calling this method if this FileDownload // was returned from a transaction, and the transaction was committed or rolled back. func (fd *FileDownload) Update() *FileDownloadUpdateOne { - return (&FileDownloadClient{config: fd.config}).UpdateOne(fd) + return NewFileDownloadClient(fd.config).UpdateOne(fd) } // Unwrap unwraps the FileDownload entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. func (fd *FileDownload) Unwrap() *FileDownload { - tx, ok := fd.config.driver.(*txDriver) + _tx, ok := fd.config.driver.(*txDriver) if !ok { panic("ent: FileDownload is not a transactional entity") } - fd.config.driver = tx.drv + fd.config.driver = _tx.drv return fd } @@ -217,28 +229,38 @@ func (fd *FileDownload) Unwrap() *FileDownload { func (fd *FileDownload) String() string { var builder strings.Builder builder.WriteString("FileDownload(") - builder.WriteString(fmt.Sprintf("id=%v", fd.ID)) - builder.WriteString(", hcl_id=") - builder.WriteString(fd.HclID) - builder.WriteString(", source_type=") + builder.WriteString(fmt.Sprintf("id=%v, ", fd.ID)) + builder.WriteString("hcl_id=") + builder.WriteString(fd.HCLID) + builder.WriteString(", ") + builder.WriteString("source_type=") builder.WriteString(fd.SourceType) - builder.WriteString(", source=") + builder.WriteString(", ") + builder.WriteString("source=") builder.WriteString(fd.Source) - builder.WriteString(", destination=") + builder.WriteString(", ") + builder.WriteString("destination=") builder.WriteString(fd.Destination) - builder.WriteString(", template=") + builder.WriteString(", ") + builder.WriteString("template=") builder.WriteString(fmt.Sprintf("%v", fd.Template)) - builder.WriteString(", perms=") + builder.WriteString(", ") + builder.WriteString("perms=") builder.WriteString(fd.Perms) - builder.WriteString(", disabled=") + builder.WriteString(", ") + builder.WriteString("disabled=") builder.WriteString(fmt.Sprintf("%v", fd.Disabled)) - builder.WriteString(", md5=") + builder.WriteString(", ") + builder.WriteString("md5=") builder.WriteString(fd.Md5) - builder.WriteString(", abs_path=") + builder.WriteString(", ") + builder.WriteString("abs_path=") builder.WriteString(fd.AbsPath) - builder.WriteString(", is_txt=") + builder.WriteString(", ") + builder.WriteString("is_txt=") builder.WriteString(fmt.Sprintf("%v", fd.IsTxt)) - builder.WriteString(", tags=") + builder.WriteString(", ") + builder.WriteString("tags=") builder.WriteString(fmt.Sprintf("%v", fd.Tags)) builder.WriteByte(')') return builder.String() @@ -246,9 +268,3 @@ func (fd *FileDownload) String() string { // FileDownloads is a parsable slice of FileDownload. type FileDownloads []*FileDownload - -func (fd FileDownloads) config(cfg config) { - for _i := range fd { - fd[_i].config = cfg - } -} diff --git a/ent/filedownload/filedownload.go b/ent/filedownload/filedownload.go index 694fb2dd..eeaf6b67 100755 --- a/ent/filedownload/filedownload.go +++ b/ent/filedownload/filedownload.go @@ -1,8 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package filedownload import ( + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -11,8 +13,8 @@ const ( Label = "file_download" // FieldID holds the string denoting the id field in the database. FieldID = "id" - // FieldHclID holds the string denoting the hcl_id field in the database. - FieldHclID = "hcl_id" + // FieldHCLID holds the string denoting the hcl_id field in the database. + FieldHCLID = "hcl_id" // FieldSourceType holds the string denoting the source_type field in the database. FieldSourceType = "source_type" // FieldSource holds the string denoting the source field in the database. @@ -49,7 +51,7 @@ const ( // Columns holds all SQL columns for filedownload fields. var Columns = []string{ FieldID, - FieldHclID, + FieldHCLID, FieldSourceType, FieldSource, FieldDestination, @@ -89,3 +91,75 @@ var ( // DefaultID holds the default value on creation for the "id" field. DefaultID func() uuid.UUID ) + +// OrderOption defines the ordering options for the FileDownload queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByHCLID orders the results by the hcl_id field. +func ByHCLID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldHCLID, opts...).ToFunc() +} + +// BySourceType orders the results by the source_type field. +func BySourceType(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSourceType, opts...).ToFunc() +} + +// BySource orders the results by the source field. +func BySource(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSource, opts...).ToFunc() +} + +// ByDestination orders the results by the destination field. +func ByDestination(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDestination, opts...).ToFunc() +} + +// ByTemplate orders the results by the template field. +func ByTemplate(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldTemplate, opts...).ToFunc() +} + +// ByPerms orders the results by the perms field. +func ByPerms(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPerms, opts...).ToFunc() +} + +// ByDisabled orders the results by the disabled field. +func ByDisabled(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDisabled, opts...).ToFunc() +} + +// ByMd5 orders the results by the md5 field. +func ByMd5(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldMd5, opts...).ToFunc() +} + +// ByAbsPath orders the results by the abs_path field. +func ByAbsPath(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAbsPath, opts...).ToFunc() +} + +// ByIsTxt orders the results by the is_txt field. +func ByIsTxt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldIsTxt, opts...).ToFunc() +} + +// ByFileDownloadToEnvironmentField orders the results by FileDownloadToEnvironment field. +func ByFileDownloadToEnvironmentField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newFileDownloadToEnvironmentStep(), sql.OrderByField(field, opts...)) + } +} +func newFileDownloadToEnvironmentStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(FileDownloadToEnvironmentInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, FileDownloadToEnvironmentTable, FileDownloadToEnvironmentColumn), + ) +} diff --git a/ent/filedownload/where.go b/ent/filedownload/where.go index 0d7b35c3..b285e451 100755 --- a/ent/filedownload/where.go +++ b/ent/filedownload/where.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package filedownload @@ -11,974 +11,582 @@ import ( // ID filters vertices based on their ID field. func ID(id uuid.UUID) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.FileDownload(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id uuid.UUID) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.FileDownload(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id uuid.UUID) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.FileDownload(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...uuid.UUID) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.FileDownload(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...uuid.UUID) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.FileDownload(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id uuid.UUID) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.FileDownload(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id uuid.UUID) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.FileDownload(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id uuid.UUID) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.FileDownload(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id uuid.UUID) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.FileDownload(sql.FieldLTE(FieldID, id)) } -// HclID applies equality check predicate on the "hcl_id" field. It's identical to HclIDEQ. -func HclID(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldHclID), v)) - }) +// HCLID applies equality check predicate on the "hcl_id" field. It's identical to HCLIDEQ. +func HCLID(v string) predicate.FileDownload { + return predicate.FileDownload(sql.FieldEQ(FieldHCLID, v)) } // SourceType applies equality check predicate on the "source_type" field. It's identical to SourceTypeEQ. func SourceType(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSourceType), v)) - }) + return predicate.FileDownload(sql.FieldEQ(FieldSourceType, v)) } // Source applies equality check predicate on the "source" field. It's identical to SourceEQ. func Source(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSource), v)) - }) + return predicate.FileDownload(sql.FieldEQ(FieldSource, v)) } // Destination applies equality check predicate on the "destination" field. It's identical to DestinationEQ. func Destination(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldDestination), v)) - }) + return predicate.FileDownload(sql.FieldEQ(FieldDestination, v)) } // Template applies equality check predicate on the "template" field. It's identical to TemplateEQ. func Template(v bool) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldTemplate), v)) - }) + return predicate.FileDownload(sql.FieldEQ(FieldTemplate, v)) } // Perms applies equality check predicate on the "perms" field. It's identical to PermsEQ. func Perms(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldPerms), v)) - }) + return predicate.FileDownload(sql.FieldEQ(FieldPerms, v)) } // Disabled applies equality check predicate on the "disabled" field. It's identical to DisabledEQ. func Disabled(v bool) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldDisabled), v)) - }) + return predicate.FileDownload(sql.FieldEQ(FieldDisabled, v)) } // Md5 applies equality check predicate on the "md5" field. It's identical to Md5EQ. func Md5(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldMd5), v)) - }) + return predicate.FileDownload(sql.FieldEQ(FieldMd5, v)) } // AbsPath applies equality check predicate on the "abs_path" field. It's identical to AbsPathEQ. func AbsPath(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldAbsPath), v)) - }) + return predicate.FileDownload(sql.FieldEQ(FieldAbsPath, v)) } // IsTxt applies equality check predicate on the "is_txt" field. It's identical to IsTxtEQ. func IsTxt(v bool) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldIsTxt), v)) - }) + return predicate.FileDownload(sql.FieldEQ(FieldIsTxt, v)) } -// HclIDEQ applies the EQ predicate on the "hcl_id" field. -func HclIDEQ(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldHclID), v)) - }) +// HCLIDEQ applies the EQ predicate on the "hcl_id" field. +func HCLIDEQ(v string) predicate.FileDownload { + return predicate.FileDownload(sql.FieldEQ(FieldHCLID, v)) } -// HclIDNEQ applies the NEQ predicate on the "hcl_id" field. -func HclIDNEQ(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldHclID), v)) - }) +// HCLIDNEQ applies the NEQ predicate on the "hcl_id" field. +func HCLIDNEQ(v string) predicate.FileDownload { + return predicate.FileDownload(sql.FieldNEQ(FieldHCLID, v)) } -// HclIDIn applies the In predicate on the "hcl_id" field. -func HclIDIn(vs ...string) predicate.FileDownload { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.FileDownload(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldHclID), v...)) - }) +// HCLIDIn applies the In predicate on the "hcl_id" field. +func HCLIDIn(vs ...string) predicate.FileDownload { + return predicate.FileDownload(sql.FieldIn(FieldHCLID, vs...)) } -// HclIDNotIn applies the NotIn predicate on the "hcl_id" field. -func HclIDNotIn(vs ...string) predicate.FileDownload { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.FileDownload(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldHclID), v...)) - }) +// HCLIDNotIn applies the NotIn predicate on the "hcl_id" field. +func HCLIDNotIn(vs ...string) predicate.FileDownload { + return predicate.FileDownload(sql.FieldNotIn(FieldHCLID, vs...)) } -// HclIDGT applies the GT predicate on the "hcl_id" field. -func HclIDGT(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldHclID), v)) - }) +// HCLIDGT applies the GT predicate on the "hcl_id" field. +func HCLIDGT(v string) predicate.FileDownload { + return predicate.FileDownload(sql.FieldGT(FieldHCLID, v)) } -// HclIDGTE applies the GTE predicate on the "hcl_id" field. -func HclIDGTE(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldHclID), v)) - }) +// HCLIDGTE applies the GTE predicate on the "hcl_id" field. +func HCLIDGTE(v string) predicate.FileDownload { + return predicate.FileDownload(sql.FieldGTE(FieldHCLID, v)) } -// HclIDLT applies the LT predicate on the "hcl_id" field. -func HclIDLT(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldHclID), v)) - }) +// HCLIDLT applies the LT predicate on the "hcl_id" field. +func HCLIDLT(v string) predicate.FileDownload { + return predicate.FileDownload(sql.FieldLT(FieldHCLID, v)) } -// HclIDLTE applies the LTE predicate on the "hcl_id" field. -func HclIDLTE(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldHclID), v)) - }) +// HCLIDLTE applies the LTE predicate on the "hcl_id" field. +func HCLIDLTE(v string) predicate.FileDownload { + return predicate.FileDownload(sql.FieldLTE(FieldHCLID, v)) } -// HclIDContains applies the Contains predicate on the "hcl_id" field. -func HclIDContains(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldHclID), v)) - }) +// HCLIDContains applies the Contains predicate on the "hcl_id" field. +func HCLIDContains(v string) predicate.FileDownload { + return predicate.FileDownload(sql.FieldContains(FieldHCLID, v)) } -// HclIDHasPrefix applies the HasPrefix predicate on the "hcl_id" field. -func HclIDHasPrefix(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldHclID), v)) - }) +// HCLIDHasPrefix applies the HasPrefix predicate on the "hcl_id" field. +func HCLIDHasPrefix(v string) predicate.FileDownload { + return predicate.FileDownload(sql.FieldHasPrefix(FieldHCLID, v)) } -// HclIDHasSuffix applies the HasSuffix predicate on the "hcl_id" field. -func HclIDHasSuffix(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldHclID), v)) - }) +// HCLIDHasSuffix applies the HasSuffix predicate on the "hcl_id" field. +func HCLIDHasSuffix(v string) predicate.FileDownload { + return predicate.FileDownload(sql.FieldHasSuffix(FieldHCLID, v)) } -// HclIDEqualFold applies the EqualFold predicate on the "hcl_id" field. -func HclIDEqualFold(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldHclID), v)) - }) +// HCLIDEqualFold applies the EqualFold predicate on the "hcl_id" field. +func HCLIDEqualFold(v string) predicate.FileDownload { + return predicate.FileDownload(sql.FieldEqualFold(FieldHCLID, v)) } -// HclIDContainsFold applies the ContainsFold predicate on the "hcl_id" field. -func HclIDContainsFold(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldHclID), v)) - }) +// HCLIDContainsFold applies the ContainsFold predicate on the "hcl_id" field. +func HCLIDContainsFold(v string) predicate.FileDownload { + return predicate.FileDownload(sql.FieldContainsFold(FieldHCLID, v)) } // SourceTypeEQ applies the EQ predicate on the "source_type" field. func SourceTypeEQ(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSourceType), v)) - }) + return predicate.FileDownload(sql.FieldEQ(FieldSourceType, v)) } // SourceTypeNEQ applies the NEQ predicate on the "source_type" field. func SourceTypeNEQ(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldSourceType), v)) - }) + return predicate.FileDownload(sql.FieldNEQ(FieldSourceType, v)) } // SourceTypeIn applies the In predicate on the "source_type" field. func SourceTypeIn(vs ...string) predicate.FileDownload { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.FileDownload(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldSourceType), v...)) - }) + return predicate.FileDownload(sql.FieldIn(FieldSourceType, vs...)) } // SourceTypeNotIn applies the NotIn predicate on the "source_type" field. func SourceTypeNotIn(vs ...string) predicate.FileDownload { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.FileDownload(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldSourceType), v...)) - }) + return predicate.FileDownload(sql.FieldNotIn(FieldSourceType, vs...)) } // SourceTypeGT applies the GT predicate on the "source_type" field. func SourceTypeGT(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldSourceType), v)) - }) + return predicate.FileDownload(sql.FieldGT(FieldSourceType, v)) } // SourceTypeGTE applies the GTE predicate on the "source_type" field. func SourceTypeGTE(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldSourceType), v)) - }) + return predicate.FileDownload(sql.FieldGTE(FieldSourceType, v)) } // SourceTypeLT applies the LT predicate on the "source_type" field. func SourceTypeLT(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldSourceType), v)) - }) + return predicate.FileDownload(sql.FieldLT(FieldSourceType, v)) } // SourceTypeLTE applies the LTE predicate on the "source_type" field. func SourceTypeLTE(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldSourceType), v)) - }) + return predicate.FileDownload(sql.FieldLTE(FieldSourceType, v)) } // SourceTypeContains applies the Contains predicate on the "source_type" field. func SourceTypeContains(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldSourceType), v)) - }) + return predicate.FileDownload(sql.FieldContains(FieldSourceType, v)) } // SourceTypeHasPrefix applies the HasPrefix predicate on the "source_type" field. func SourceTypeHasPrefix(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldSourceType), v)) - }) + return predicate.FileDownload(sql.FieldHasPrefix(FieldSourceType, v)) } // SourceTypeHasSuffix applies the HasSuffix predicate on the "source_type" field. func SourceTypeHasSuffix(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldSourceType), v)) - }) + return predicate.FileDownload(sql.FieldHasSuffix(FieldSourceType, v)) } // SourceTypeEqualFold applies the EqualFold predicate on the "source_type" field. func SourceTypeEqualFold(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldSourceType), v)) - }) + return predicate.FileDownload(sql.FieldEqualFold(FieldSourceType, v)) } // SourceTypeContainsFold applies the ContainsFold predicate on the "source_type" field. func SourceTypeContainsFold(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldSourceType), v)) - }) + return predicate.FileDownload(sql.FieldContainsFold(FieldSourceType, v)) } // SourceEQ applies the EQ predicate on the "source" field. func SourceEQ(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSource), v)) - }) + return predicate.FileDownload(sql.FieldEQ(FieldSource, v)) } // SourceNEQ applies the NEQ predicate on the "source" field. func SourceNEQ(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldSource), v)) - }) + return predicate.FileDownload(sql.FieldNEQ(FieldSource, v)) } // SourceIn applies the In predicate on the "source" field. func SourceIn(vs ...string) predicate.FileDownload { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.FileDownload(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldSource), v...)) - }) + return predicate.FileDownload(sql.FieldIn(FieldSource, vs...)) } // SourceNotIn applies the NotIn predicate on the "source" field. func SourceNotIn(vs ...string) predicate.FileDownload { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.FileDownload(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldSource), v...)) - }) + return predicate.FileDownload(sql.FieldNotIn(FieldSource, vs...)) } // SourceGT applies the GT predicate on the "source" field. func SourceGT(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldSource), v)) - }) + return predicate.FileDownload(sql.FieldGT(FieldSource, v)) } // SourceGTE applies the GTE predicate on the "source" field. func SourceGTE(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldSource), v)) - }) + return predicate.FileDownload(sql.FieldGTE(FieldSource, v)) } // SourceLT applies the LT predicate on the "source" field. func SourceLT(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldSource), v)) - }) + return predicate.FileDownload(sql.FieldLT(FieldSource, v)) } // SourceLTE applies the LTE predicate on the "source" field. func SourceLTE(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldSource), v)) - }) + return predicate.FileDownload(sql.FieldLTE(FieldSource, v)) } // SourceContains applies the Contains predicate on the "source" field. func SourceContains(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldSource), v)) - }) + return predicate.FileDownload(sql.FieldContains(FieldSource, v)) } // SourceHasPrefix applies the HasPrefix predicate on the "source" field. func SourceHasPrefix(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldSource), v)) - }) + return predicate.FileDownload(sql.FieldHasPrefix(FieldSource, v)) } // SourceHasSuffix applies the HasSuffix predicate on the "source" field. func SourceHasSuffix(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldSource), v)) - }) + return predicate.FileDownload(sql.FieldHasSuffix(FieldSource, v)) } // SourceEqualFold applies the EqualFold predicate on the "source" field. func SourceEqualFold(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldSource), v)) - }) + return predicate.FileDownload(sql.FieldEqualFold(FieldSource, v)) } // SourceContainsFold applies the ContainsFold predicate on the "source" field. func SourceContainsFold(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldSource), v)) - }) + return predicate.FileDownload(sql.FieldContainsFold(FieldSource, v)) } // DestinationEQ applies the EQ predicate on the "destination" field. func DestinationEQ(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldDestination), v)) - }) + return predicate.FileDownload(sql.FieldEQ(FieldDestination, v)) } // DestinationNEQ applies the NEQ predicate on the "destination" field. func DestinationNEQ(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldDestination), v)) - }) + return predicate.FileDownload(sql.FieldNEQ(FieldDestination, v)) } // DestinationIn applies the In predicate on the "destination" field. func DestinationIn(vs ...string) predicate.FileDownload { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.FileDownload(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldDestination), v...)) - }) + return predicate.FileDownload(sql.FieldIn(FieldDestination, vs...)) } // DestinationNotIn applies the NotIn predicate on the "destination" field. func DestinationNotIn(vs ...string) predicate.FileDownload { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.FileDownload(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldDestination), v...)) - }) + return predicate.FileDownload(sql.FieldNotIn(FieldDestination, vs...)) } // DestinationGT applies the GT predicate on the "destination" field. func DestinationGT(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldDestination), v)) - }) + return predicate.FileDownload(sql.FieldGT(FieldDestination, v)) } // DestinationGTE applies the GTE predicate on the "destination" field. func DestinationGTE(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldDestination), v)) - }) + return predicate.FileDownload(sql.FieldGTE(FieldDestination, v)) } // DestinationLT applies the LT predicate on the "destination" field. func DestinationLT(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldDestination), v)) - }) + return predicate.FileDownload(sql.FieldLT(FieldDestination, v)) } // DestinationLTE applies the LTE predicate on the "destination" field. func DestinationLTE(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldDestination), v)) - }) + return predicate.FileDownload(sql.FieldLTE(FieldDestination, v)) } // DestinationContains applies the Contains predicate on the "destination" field. func DestinationContains(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldDestination), v)) - }) + return predicate.FileDownload(sql.FieldContains(FieldDestination, v)) } // DestinationHasPrefix applies the HasPrefix predicate on the "destination" field. func DestinationHasPrefix(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldDestination), v)) - }) + return predicate.FileDownload(sql.FieldHasPrefix(FieldDestination, v)) } // DestinationHasSuffix applies the HasSuffix predicate on the "destination" field. func DestinationHasSuffix(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldDestination), v)) - }) + return predicate.FileDownload(sql.FieldHasSuffix(FieldDestination, v)) } // DestinationEqualFold applies the EqualFold predicate on the "destination" field. func DestinationEqualFold(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldDestination), v)) - }) + return predicate.FileDownload(sql.FieldEqualFold(FieldDestination, v)) } // DestinationContainsFold applies the ContainsFold predicate on the "destination" field. func DestinationContainsFold(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldDestination), v)) - }) + return predicate.FileDownload(sql.FieldContainsFold(FieldDestination, v)) } // TemplateEQ applies the EQ predicate on the "template" field. func TemplateEQ(v bool) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldTemplate), v)) - }) + return predicate.FileDownload(sql.FieldEQ(FieldTemplate, v)) } // TemplateNEQ applies the NEQ predicate on the "template" field. func TemplateNEQ(v bool) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldTemplate), v)) - }) + return predicate.FileDownload(sql.FieldNEQ(FieldTemplate, v)) } // PermsEQ applies the EQ predicate on the "perms" field. func PermsEQ(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldPerms), v)) - }) + return predicate.FileDownload(sql.FieldEQ(FieldPerms, v)) } // PermsNEQ applies the NEQ predicate on the "perms" field. func PermsNEQ(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldPerms), v)) - }) + return predicate.FileDownload(sql.FieldNEQ(FieldPerms, v)) } // PermsIn applies the In predicate on the "perms" field. func PermsIn(vs ...string) predicate.FileDownload { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.FileDownload(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldPerms), v...)) - }) + return predicate.FileDownload(sql.FieldIn(FieldPerms, vs...)) } // PermsNotIn applies the NotIn predicate on the "perms" field. func PermsNotIn(vs ...string) predicate.FileDownload { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.FileDownload(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldPerms), v...)) - }) + return predicate.FileDownload(sql.FieldNotIn(FieldPerms, vs...)) } // PermsGT applies the GT predicate on the "perms" field. func PermsGT(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldPerms), v)) - }) + return predicate.FileDownload(sql.FieldGT(FieldPerms, v)) } // PermsGTE applies the GTE predicate on the "perms" field. func PermsGTE(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldPerms), v)) - }) + return predicate.FileDownload(sql.FieldGTE(FieldPerms, v)) } // PermsLT applies the LT predicate on the "perms" field. func PermsLT(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldPerms), v)) - }) + return predicate.FileDownload(sql.FieldLT(FieldPerms, v)) } // PermsLTE applies the LTE predicate on the "perms" field. func PermsLTE(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldPerms), v)) - }) + return predicate.FileDownload(sql.FieldLTE(FieldPerms, v)) } // PermsContains applies the Contains predicate on the "perms" field. func PermsContains(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldPerms), v)) - }) + return predicate.FileDownload(sql.FieldContains(FieldPerms, v)) } // PermsHasPrefix applies the HasPrefix predicate on the "perms" field. func PermsHasPrefix(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldPerms), v)) - }) + return predicate.FileDownload(sql.FieldHasPrefix(FieldPerms, v)) } // PermsHasSuffix applies the HasSuffix predicate on the "perms" field. func PermsHasSuffix(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldPerms), v)) - }) + return predicate.FileDownload(sql.FieldHasSuffix(FieldPerms, v)) } // PermsEqualFold applies the EqualFold predicate on the "perms" field. func PermsEqualFold(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldPerms), v)) - }) + return predicate.FileDownload(sql.FieldEqualFold(FieldPerms, v)) } // PermsContainsFold applies the ContainsFold predicate on the "perms" field. func PermsContainsFold(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldPerms), v)) - }) + return predicate.FileDownload(sql.FieldContainsFold(FieldPerms, v)) } // DisabledEQ applies the EQ predicate on the "disabled" field. func DisabledEQ(v bool) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldDisabled), v)) - }) + return predicate.FileDownload(sql.FieldEQ(FieldDisabled, v)) } // DisabledNEQ applies the NEQ predicate on the "disabled" field. func DisabledNEQ(v bool) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldDisabled), v)) - }) + return predicate.FileDownload(sql.FieldNEQ(FieldDisabled, v)) } // Md5EQ applies the EQ predicate on the "md5" field. func Md5EQ(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldMd5), v)) - }) + return predicate.FileDownload(sql.FieldEQ(FieldMd5, v)) } // Md5NEQ applies the NEQ predicate on the "md5" field. func Md5NEQ(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldMd5), v)) - }) + return predicate.FileDownload(sql.FieldNEQ(FieldMd5, v)) } // Md5In applies the In predicate on the "md5" field. func Md5In(vs ...string) predicate.FileDownload { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.FileDownload(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldMd5), v...)) - }) + return predicate.FileDownload(sql.FieldIn(FieldMd5, vs...)) } // Md5NotIn applies the NotIn predicate on the "md5" field. func Md5NotIn(vs ...string) predicate.FileDownload { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.FileDownload(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldMd5), v...)) - }) + return predicate.FileDownload(sql.FieldNotIn(FieldMd5, vs...)) } // Md5GT applies the GT predicate on the "md5" field. func Md5GT(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldMd5), v)) - }) + return predicate.FileDownload(sql.FieldGT(FieldMd5, v)) } // Md5GTE applies the GTE predicate on the "md5" field. func Md5GTE(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldMd5), v)) - }) + return predicate.FileDownload(sql.FieldGTE(FieldMd5, v)) } // Md5LT applies the LT predicate on the "md5" field. func Md5LT(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldMd5), v)) - }) + return predicate.FileDownload(sql.FieldLT(FieldMd5, v)) } // Md5LTE applies the LTE predicate on the "md5" field. func Md5LTE(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldMd5), v)) - }) + return predicate.FileDownload(sql.FieldLTE(FieldMd5, v)) } // Md5Contains applies the Contains predicate on the "md5" field. func Md5Contains(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldMd5), v)) - }) + return predicate.FileDownload(sql.FieldContains(FieldMd5, v)) } // Md5HasPrefix applies the HasPrefix predicate on the "md5" field. func Md5HasPrefix(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldMd5), v)) - }) + return predicate.FileDownload(sql.FieldHasPrefix(FieldMd5, v)) } // Md5HasSuffix applies the HasSuffix predicate on the "md5" field. func Md5HasSuffix(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldMd5), v)) - }) + return predicate.FileDownload(sql.FieldHasSuffix(FieldMd5, v)) } // Md5EqualFold applies the EqualFold predicate on the "md5" field. func Md5EqualFold(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldMd5), v)) - }) + return predicate.FileDownload(sql.FieldEqualFold(FieldMd5, v)) } // Md5ContainsFold applies the ContainsFold predicate on the "md5" field. func Md5ContainsFold(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldMd5), v)) - }) + return predicate.FileDownload(sql.FieldContainsFold(FieldMd5, v)) } // AbsPathEQ applies the EQ predicate on the "abs_path" field. func AbsPathEQ(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldAbsPath), v)) - }) + return predicate.FileDownload(sql.FieldEQ(FieldAbsPath, v)) } // AbsPathNEQ applies the NEQ predicate on the "abs_path" field. func AbsPathNEQ(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldAbsPath), v)) - }) + return predicate.FileDownload(sql.FieldNEQ(FieldAbsPath, v)) } // AbsPathIn applies the In predicate on the "abs_path" field. func AbsPathIn(vs ...string) predicate.FileDownload { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.FileDownload(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldAbsPath), v...)) - }) + return predicate.FileDownload(sql.FieldIn(FieldAbsPath, vs...)) } // AbsPathNotIn applies the NotIn predicate on the "abs_path" field. func AbsPathNotIn(vs ...string) predicate.FileDownload { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.FileDownload(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldAbsPath), v...)) - }) + return predicate.FileDownload(sql.FieldNotIn(FieldAbsPath, vs...)) } // AbsPathGT applies the GT predicate on the "abs_path" field. func AbsPathGT(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldAbsPath), v)) - }) + return predicate.FileDownload(sql.FieldGT(FieldAbsPath, v)) } // AbsPathGTE applies the GTE predicate on the "abs_path" field. func AbsPathGTE(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldAbsPath), v)) - }) + return predicate.FileDownload(sql.FieldGTE(FieldAbsPath, v)) } // AbsPathLT applies the LT predicate on the "abs_path" field. func AbsPathLT(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldAbsPath), v)) - }) + return predicate.FileDownload(sql.FieldLT(FieldAbsPath, v)) } // AbsPathLTE applies the LTE predicate on the "abs_path" field. func AbsPathLTE(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldAbsPath), v)) - }) + return predicate.FileDownload(sql.FieldLTE(FieldAbsPath, v)) } // AbsPathContains applies the Contains predicate on the "abs_path" field. func AbsPathContains(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldAbsPath), v)) - }) + return predicate.FileDownload(sql.FieldContains(FieldAbsPath, v)) } // AbsPathHasPrefix applies the HasPrefix predicate on the "abs_path" field. func AbsPathHasPrefix(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldAbsPath), v)) - }) + return predicate.FileDownload(sql.FieldHasPrefix(FieldAbsPath, v)) } // AbsPathHasSuffix applies the HasSuffix predicate on the "abs_path" field. func AbsPathHasSuffix(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldAbsPath), v)) - }) + return predicate.FileDownload(sql.FieldHasSuffix(FieldAbsPath, v)) } // AbsPathEqualFold applies the EqualFold predicate on the "abs_path" field. func AbsPathEqualFold(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldAbsPath), v)) - }) + return predicate.FileDownload(sql.FieldEqualFold(FieldAbsPath, v)) } // AbsPathContainsFold applies the ContainsFold predicate on the "abs_path" field. func AbsPathContainsFold(v string) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldAbsPath), v)) - }) + return predicate.FileDownload(sql.FieldContainsFold(FieldAbsPath, v)) } // IsTxtEQ applies the EQ predicate on the "is_txt" field. func IsTxtEQ(v bool) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldIsTxt), v)) - }) + return predicate.FileDownload(sql.FieldEQ(FieldIsTxt, v)) } // IsTxtNEQ applies the NEQ predicate on the "is_txt" field. func IsTxtNEQ(v bool) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldIsTxt), v)) - }) + return predicate.FileDownload(sql.FieldNEQ(FieldIsTxt, v)) } // HasFileDownloadToEnvironment applies the HasEdge predicate on the "FileDownloadToEnvironment" edge. @@ -986,7 +594,6 @@ func HasFileDownloadToEnvironment() predicate.FileDownload { return predicate.FileDownload(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(FileDownloadToEnvironmentTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, true, FileDownloadToEnvironmentTable, FileDownloadToEnvironmentColumn), ) sqlgraph.HasNeighbors(s, step) @@ -996,11 +603,7 @@ func HasFileDownloadToEnvironment() predicate.FileDownload { // HasFileDownloadToEnvironmentWith applies the HasEdge predicate on the "FileDownloadToEnvironment" edge with a given conditions (other predicates). func HasFileDownloadToEnvironmentWith(preds ...predicate.Environment) predicate.FileDownload { return predicate.FileDownload(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(FileDownloadToEnvironmentInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, FileDownloadToEnvironmentTable, FileDownloadToEnvironmentColumn), - ) + step := newFileDownloadToEnvironmentStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -1011,32 +614,15 @@ func HasFileDownloadToEnvironmentWith(preds ...predicate.Environment) predicate. // And groups predicates with the AND operator between them. func And(predicates ...predicate.FileDownload) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.FileDownload(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.FileDownload) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.FileDownload(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.FileDownload) predicate.FileDownload { - return predicate.FileDownload(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.FileDownload(sql.NotPredicates(p)) } diff --git a/ent/filedownload_create.go b/ent/filedownload_create.go index 0f8ba760..061342ee 100755 --- a/ent/filedownload_create.go +++ b/ent/filedownload_create.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -21,9 +21,9 @@ type FileDownloadCreate struct { hooks []Hook } -// SetHclID sets the "hcl_id" field. -func (fdc *FileDownloadCreate) SetHclID(s string) *FileDownloadCreate { - fdc.mutation.SetHclID(s) +// SetHCLID sets the "hcl_id" field. +func (fdc *FileDownloadCreate) SetHCLID(s string) *FileDownloadCreate { + fdc.mutation.SetHCLID(s) return fdc } @@ -135,44 +135,8 @@ func (fdc *FileDownloadCreate) Mutation() *FileDownloadMutation { // Save creates the FileDownload in the database. func (fdc *FileDownloadCreate) Save(ctx context.Context) (*FileDownload, error) { - var ( - err error - node *FileDownload - ) fdc.defaults() - if len(fdc.hooks) == 0 { - if err = fdc.check(); err != nil { - return nil, err - } - node, err = fdc.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*FileDownloadMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = fdc.check(); err != nil { - return nil, err - } - fdc.mutation = mutation - if node, err = fdc.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(fdc.hooks) - 1; i >= 0; i-- { - if fdc.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = fdc.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, fdc.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, fdc.sqlSave, fdc.mutation, fdc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -211,7 +175,7 @@ func (fdc *FileDownloadCreate) defaults() { // check runs all checks and user-defined validators on the builder. func (fdc *FileDownloadCreate) check() error { - if _, ok := fdc.mutation.HclID(); !ok { + if _, ok := fdc.mutation.HCLID(); !ok { return &ValidationError{Name: "hcl_id", err: errors.New(`ent: missing required field "FileDownload.hcl_id"`)} } if _, ok := fdc.mutation.SourceType(); !ok { @@ -248,10 +212,13 @@ func (fdc *FileDownloadCreate) check() error { } func (fdc *FileDownloadCreate) sqlSave(ctx context.Context) (*FileDownload, error) { + if err := fdc.check(); err != nil { + return nil, err + } _node, _spec := fdc.createSpec() if err := sqlgraph.CreateNode(ctx, fdc.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } @@ -262,110 +229,62 @@ func (fdc *FileDownloadCreate) sqlSave(ctx context.Context) (*FileDownload, erro return nil, err } } + fdc.mutation.id = &_node.ID + fdc.mutation.done = true return _node, nil } func (fdc *FileDownloadCreate) createSpec() (*FileDownload, *sqlgraph.CreateSpec) { var ( _node = &FileDownload{config: fdc.config} - _spec = &sqlgraph.CreateSpec{ - Table: filedownload.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: filedownload.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(filedownload.Table, sqlgraph.NewFieldSpec(filedownload.FieldID, field.TypeUUID)) ) if id, ok := fdc.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } - if value, ok := fdc.mutation.HclID(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: filedownload.FieldHclID, - }) - _node.HclID = value + if value, ok := fdc.mutation.HCLID(); ok { + _spec.SetField(filedownload.FieldHCLID, field.TypeString, value) + _node.HCLID = value } if value, ok := fdc.mutation.SourceType(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: filedownload.FieldSourceType, - }) + _spec.SetField(filedownload.FieldSourceType, field.TypeString, value) _node.SourceType = value } if value, ok := fdc.mutation.Source(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: filedownload.FieldSource, - }) + _spec.SetField(filedownload.FieldSource, field.TypeString, value) _node.Source = value } if value, ok := fdc.mutation.Destination(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: filedownload.FieldDestination, - }) + _spec.SetField(filedownload.FieldDestination, field.TypeString, value) _node.Destination = value } if value, ok := fdc.mutation.Template(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: filedownload.FieldTemplate, - }) + _spec.SetField(filedownload.FieldTemplate, field.TypeBool, value) _node.Template = value } if value, ok := fdc.mutation.Perms(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: filedownload.FieldPerms, - }) + _spec.SetField(filedownload.FieldPerms, field.TypeString, value) _node.Perms = value } if value, ok := fdc.mutation.Disabled(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: filedownload.FieldDisabled, - }) + _spec.SetField(filedownload.FieldDisabled, field.TypeBool, value) _node.Disabled = value } if value, ok := fdc.mutation.Md5(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: filedownload.FieldMd5, - }) + _spec.SetField(filedownload.FieldMd5, field.TypeString, value) _node.Md5 = value } if value, ok := fdc.mutation.AbsPath(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: filedownload.FieldAbsPath, - }) + _spec.SetField(filedownload.FieldAbsPath, field.TypeString, value) _node.AbsPath = value } if value, ok := fdc.mutation.IsTxt(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: filedownload.FieldIsTxt, - }) + _spec.SetField(filedownload.FieldIsTxt, field.TypeBool, value) _node.IsTxt = value } if value, ok := fdc.mutation.Tags(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: filedownload.FieldTags, - }) + _spec.SetField(filedownload.FieldTags, field.TypeJSON, value) _node.Tags = value } if nodes := fdc.mutation.FileDownloadToEnvironmentIDs(); len(nodes) > 0 { @@ -376,10 +295,7 @@ func (fdc *FileDownloadCreate) createSpec() (*FileDownload, *sqlgraph.CreateSpec Columns: []string{filedownload.FileDownloadToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -394,11 +310,15 @@ func (fdc *FileDownloadCreate) createSpec() (*FileDownload, *sqlgraph.CreateSpec // FileDownloadCreateBulk is the builder for creating many FileDownload entities in bulk. type FileDownloadCreateBulk struct { config + err error builders []*FileDownloadCreate } // Save creates the FileDownload entities in the database. func (fdcb *FileDownloadCreateBulk) Save(ctx context.Context) ([]*FileDownload, error) { + if fdcb.err != nil { + return nil, fdcb.err + } specs := make([]*sqlgraph.CreateSpec, len(fdcb.builders)) nodes := make([]*FileDownload, len(fdcb.builders)) mutators := make([]Mutator, len(fdcb.builders)) @@ -415,8 +335,8 @@ func (fdcb *FileDownloadCreateBulk) Save(ctx context.Context) ([]*FileDownload, return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, fdcb.builders[i+1].mutation) } else { @@ -424,7 +344,7 @@ func (fdcb *FileDownloadCreateBulk) Save(ctx context.Context) ([]*FileDownload, // Invoke the actual operation on the latest mutation in the chain. if err = sqlgraph.BatchCreate(ctx, fdcb.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } } } diff --git a/ent/filedownload_delete.go b/ent/filedownload_delete.go index ff0c1361..e93d7c2b 100755 --- a/ent/filedownload_delete.go +++ b/ent/filedownload_delete.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (fdd *FileDownloadDelete) Where(ps ...predicate.FileDownload) *FileDownload // Exec executes the deletion query and returns how many vertices were deleted. func (fdd *FileDownloadDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(fdd.hooks) == 0 { - affected, err = fdd.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*FileDownloadMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - fdd.mutation = mutation - affected, err = fdd.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(fdd.hooks) - 1; i >= 0; i-- { - if fdd.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = fdd.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, fdd.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, fdd.sqlExec, fdd.mutation, fdd.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (fdd *FileDownloadDelete) ExecX(ctx context.Context) int { } func (fdd *FileDownloadDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: filedownload.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: filedownload.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(filedownload.Table, sqlgraph.NewFieldSpec(filedownload.FieldID, field.TypeUUID)) if ps := fdd.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -84,7 +48,12 @@ func (fdd *FileDownloadDelete) sqlExec(ctx context.Context) (int, error) { } } } - return sqlgraph.DeleteNodes(ctx, fdd.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, fdd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + fdd.mutation.done = true + return affected, err } // FileDownloadDeleteOne is the builder for deleting a single FileDownload entity. @@ -92,6 +61,12 @@ type FileDownloadDeleteOne struct { fdd *FileDownloadDelete } +// Where appends a list predicates to the FileDownloadDelete builder. +func (fddo *FileDownloadDeleteOne) Where(ps ...predicate.FileDownload) *FileDownloadDeleteOne { + fddo.fdd.mutation.Where(ps...) + return fddo +} + // Exec executes the deletion query. func (fddo *FileDownloadDeleteOne) Exec(ctx context.Context) error { n, err := fddo.fdd.Exec(ctx) @@ -107,5 +82,7 @@ func (fddo *FileDownloadDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (fddo *FileDownloadDeleteOne) ExecX(ctx context.Context) { - fddo.fdd.ExecX(ctx) + if err := fddo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/ent/filedownload_query.go b/ent/filedownload_query.go index 9189130a..92321d47 100755 --- a/ent/filedownload_query.go +++ b/ent/filedownload_query.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "errors" "fmt" "math" @@ -20,15 +19,14 @@ import ( // FileDownloadQuery is the builder for querying FileDownload entities. type FileDownloadQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string - predicates []predicate.FileDownload - // eager-loading edges. + ctx *QueryContext + order []filedownload.OrderOption + inters []Interceptor + predicates []predicate.FileDownload withFileDownloadToEnvironment *EnvironmentQuery withFKs bool + modifiers []func(*sql.Selector) + loadTotal []func(context.Context, []*FileDownload) error // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -40,34 +38,34 @@ func (fdq *FileDownloadQuery) Where(ps ...predicate.FileDownload) *FileDownloadQ return fdq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (fdq *FileDownloadQuery) Limit(limit int) *FileDownloadQuery { - fdq.limit = &limit + fdq.ctx.Limit = &limit return fdq } -// Offset adds an offset step to the query. +// Offset to start from. func (fdq *FileDownloadQuery) Offset(offset int) *FileDownloadQuery { - fdq.offset = &offset + fdq.ctx.Offset = &offset return fdq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (fdq *FileDownloadQuery) Unique(unique bool) *FileDownloadQuery { - fdq.unique = &unique + fdq.ctx.Unique = &unique return fdq } -// Order adds an order step to the query. -func (fdq *FileDownloadQuery) Order(o ...OrderFunc) *FileDownloadQuery { +// Order specifies how the records should be ordered. +func (fdq *FileDownloadQuery) Order(o ...filedownload.OrderOption) *FileDownloadQuery { fdq.order = append(fdq.order, o...) return fdq } // QueryFileDownloadToEnvironment chains the current query on the "FileDownloadToEnvironment" edge. func (fdq *FileDownloadQuery) QueryFileDownloadToEnvironment() *EnvironmentQuery { - query := &EnvironmentQuery{config: fdq.config} + query := (&EnvironmentClient{config: fdq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := fdq.prepareQuery(ctx); err != nil { return nil, err @@ -90,7 +88,7 @@ func (fdq *FileDownloadQuery) QueryFileDownloadToEnvironment() *EnvironmentQuery // First returns the first FileDownload entity from the query. // Returns a *NotFoundError when no FileDownload was found. func (fdq *FileDownloadQuery) First(ctx context.Context) (*FileDownload, error) { - nodes, err := fdq.Limit(1).All(ctx) + nodes, err := fdq.Limit(1).All(setContextOp(ctx, fdq.ctx, "First")) if err != nil { return nil, err } @@ -113,7 +111,7 @@ func (fdq *FileDownloadQuery) FirstX(ctx context.Context) *FileDownload { // Returns a *NotFoundError when no FileDownload ID was found. func (fdq *FileDownloadQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = fdq.Limit(1).IDs(ctx); err != nil { + if ids, err = fdq.Limit(1).IDs(setContextOp(ctx, fdq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -136,7 +134,7 @@ func (fdq *FileDownloadQuery) FirstIDX(ctx context.Context) uuid.UUID { // Returns a *NotSingularError when more than one FileDownload entity is found. // Returns a *NotFoundError when no FileDownload entities are found. func (fdq *FileDownloadQuery) Only(ctx context.Context) (*FileDownload, error) { - nodes, err := fdq.Limit(2).All(ctx) + nodes, err := fdq.Limit(2).All(setContextOp(ctx, fdq.ctx, "Only")) if err != nil { return nil, err } @@ -164,7 +162,7 @@ func (fdq *FileDownloadQuery) OnlyX(ctx context.Context) *FileDownload { // Returns a *NotFoundError when no entities are found. func (fdq *FileDownloadQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = fdq.Limit(2).IDs(ctx); err != nil { + if ids, err = fdq.Limit(2).IDs(setContextOp(ctx, fdq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -189,10 +187,12 @@ func (fdq *FileDownloadQuery) OnlyIDX(ctx context.Context) uuid.UUID { // All executes the query and returns a list of FileDownloads. func (fdq *FileDownloadQuery) All(ctx context.Context) ([]*FileDownload, error) { + ctx = setContextOp(ctx, fdq.ctx, "All") if err := fdq.prepareQuery(ctx); err != nil { return nil, err } - return fdq.sqlAll(ctx) + qr := querierAll[[]*FileDownload, *FileDownloadQuery]() + return withInterceptors[[]*FileDownload](ctx, fdq, qr, fdq.inters) } // AllX is like All, but panics if an error occurs. @@ -205,9 +205,12 @@ func (fdq *FileDownloadQuery) AllX(ctx context.Context) []*FileDownload { } // IDs executes the query and returns a list of FileDownload IDs. -func (fdq *FileDownloadQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { - var ids []uuid.UUID - if err := fdq.Select(filedownload.FieldID).Scan(ctx, &ids); err != nil { +func (fdq *FileDownloadQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if fdq.ctx.Unique == nil && fdq.path != nil { + fdq.Unique(true) + } + ctx = setContextOp(ctx, fdq.ctx, "IDs") + if err = fdq.Select(filedownload.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -224,10 +227,11 @@ func (fdq *FileDownloadQuery) IDsX(ctx context.Context) []uuid.UUID { // Count returns the count of the given query. func (fdq *FileDownloadQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, fdq.ctx, "Count") if err := fdq.prepareQuery(ctx); err != nil { return 0, err } - return fdq.sqlCount(ctx) + return withInterceptors[int](ctx, fdq, querierCount[*FileDownloadQuery](), fdq.inters) } // CountX is like Count, but panics if an error occurs. @@ -241,10 +245,15 @@ func (fdq *FileDownloadQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (fdq *FileDownloadQuery) Exist(ctx context.Context) (bool, error) { - if err := fdq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, fdq.ctx, "Exist") + switch _, err := fdq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return fdq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -264,22 +273,21 @@ func (fdq *FileDownloadQuery) Clone() *FileDownloadQuery { } return &FileDownloadQuery{ config: fdq.config, - limit: fdq.limit, - offset: fdq.offset, - order: append([]OrderFunc{}, fdq.order...), + ctx: fdq.ctx.Clone(), + order: append([]filedownload.OrderOption{}, fdq.order...), + inters: append([]Interceptor{}, fdq.inters...), predicates: append([]predicate.FileDownload{}, fdq.predicates...), withFileDownloadToEnvironment: fdq.withFileDownloadToEnvironment.Clone(), // clone intermediate query. - sql: fdq.sql.Clone(), - path: fdq.path, - unique: fdq.unique, + sql: fdq.sql.Clone(), + path: fdq.path, } } // WithFileDownloadToEnvironment tells the query-builder to eager-load the nodes that are connected to // the "FileDownloadToEnvironment" edge. The optional arguments are used to configure the query builder of the edge. func (fdq *FileDownloadQuery) WithFileDownloadToEnvironment(opts ...func(*EnvironmentQuery)) *FileDownloadQuery { - query := &EnvironmentQuery{config: fdq.config} + query := (&EnvironmentClient{config: fdq.config}).Query() for _, opt := range opts { opt(query) } @@ -293,25 +301,21 @@ func (fdq *FileDownloadQuery) WithFileDownloadToEnvironment(opts ...func(*Enviro // Example: // // var v []struct { -// HclID string `json:"hcl_id,omitempty" hcl:"id,label"` +// HCLID string `json:"hcl_id,omitempty" hcl:"id,label"` // Count int `json:"count,omitempty"` // } // // client.FileDownload.Query(). -// GroupBy(filedownload.FieldHclID). +// GroupBy(filedownload.FieldHCLID). // Aggregate(ent.Count()). // Scan(ctx, &v) -// func (fdq *FileDownloadQuery) GroupBy(field string, fields ...string) *FileDownloadGroupBy { - group := &FileDownloadGroupBy{config: fdq.config} - group.fields = append([]string{field}, fields...) - group.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := fdq.prepareQuery(ctx); err != nil { - return nil, err - } - return fdq.sqlQuery(ctx), nil - } - return group + fdq.ctx.Fields = append([]string{field}, fields...) + grbuild := &FileDownloadGroupBy{build: fdq} + grbuild.flds = &fdq.ctx.Fields + grbuild.label = filedownload.Label + grbuild.scan = grbuild.Scan + return grbuild } // Select allows the selection one or more fields/columns for the given query, @@ -320,20 +324,37 @@ func (fdq *FileDownloadQuery) GroupBy(field string, fields ...string) *FileDownl // Example: // // var v []struct { -// HclID string `json:"hcl_id,omitempty" hcl:"id,label"` +// HCLID string `json:"hcl_id,omitempty" hcl:"id,label"` // } // // client.FileDownload.Query(). -// Select(filedownload.FieldHclID). +// Select(filedownload.FieldHCLID). // Scan(ctx, &v) -// func (fdq *FileDownloadQuery) Select(fields ...string) *FileDownloadSelect { - fdq.fields = append(fdq.fields, fields...) - return &FileDownloadSelect{FileDownloadQuery: fdq} + fdq.ctx.Fields = append(fdq.ctx.Fields, fields...) + sbuild := &FileDownloadSelect{FileDownloadQuery: fdq} + sbuild.label = filedownload.Label + sbuild.flds, sbuild.scan = &fdq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a FileDownloadSelect configured with the given aggregations. +func (fdq *FileDownloadQuery) Aggregate(fns ...AggregateFunc) *FileDownloadSelect { + return fdq.Select().Aggregate(fns...) } func (fdq *FileDownloadQuery) prepareQuery(ctx context.Context) error { - for _, f := range fdq.fields { + for _, inter := range fdq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, fdq); err != nil { + return err + } + } + } + for _, f := range fdq.ctx.Fields { if !filedownload.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -348,7 +369,7 @@ func (fdq *FileDownloadQuery) prepareQuery(ctx context.Context) error { return nil } -func (fdq *FileDownloadQuery) sqlAll(ctx context.Context) ([]*FileDownload, error) { +func (fdq *FileDownloadQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*FileDownload, error) { var ( nodes = []*FileDownload{} withFKs = fdq.withFKs @@ -363,92 +384,95 @@ func (fdq *FileDownloadQuery) sqlAll(ctx context.Context) ([]*FileDownload, erro if withFKs { _spec.Node.Columns = append(_spec.Node.Columns, filedownload.ForeignKeys...) } - _spec.ScanValues = func(columns []string) ([]interface{}, error) { + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*FileDownload).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { node := &FileDownload{config: fdq.config} nodes = append(nodes, node) - return node.scanValues(columns) - } - _spec.Assign = func(columns []string, values []interface{}) error { - if len(nodes) == 0 { - return fmt.Errorf("ent: Assign called without calling ScanValues") - } - node := nodes[len(nodes)-1] node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(fdq.modifiers) > 0 { + _spec.Modifiers = fdq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } if err := sqlgraph.QueryNodes(ctx, fdq.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := fdq.withFileDownloadToEnvironment; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*FileDownload) - for i := range nodes { - if nodes[i].environment_environment_to_file_download == nil { - continue - } - fk := *nodes[i].environment_environment_to_file_download - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) - } - query.Where(environment.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + if err := fdq.loadFileDownloadToEnvironment(ctx, query, nodes, nil, + func(n *FileDownload, e *Environment) { n.Edges.FileDownloadToEnvironment = e }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "environment_environment_to_file_download" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.FileDownloadToEnvironment = n - } + } + for i := range fdq.loadTotal { + if err := fdq.loadTotal[i](ctx, nodes); err != nil { + return nil, err } } - return nodes, nil } -func (fdq *FileDownloadQuery) sqlCount(ctx context.Context) (int, error) { - _spec := fdq.querySpec() - _spec.Node.Columns = fdq.fields - if len(fdq.fields) > 0 { - _spec.Unique = fdq.unique != nil && *fdq.unique +func (fdq *FileDownloadQuery) loadFileDownloadToEnvironment(ctx context.Context, query *EnvironmentQuery, nodes []*FileDownload, init func(*FileDownload), assign func(*FileDownload, *Environment)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*FileDownload) + for i := range nodes { + if nodes[i].environment_environment_to_file_download == nil { + continue + } + fk := *nodes[i].environment_environment_to_file_download + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) } - return sqlgraph.CountNodes(ctx, fdq.driver, _spec) + if len(ids) == 0 { + return nil + } + query.Where(environment.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "environment_environment_to_file_download" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil } -func (fdq *FileDownloadQuery) sqlExist(ctx context.Context) (bool, error) { - n, err := fdq.sqlCount(ctx) - if err != nil { - return false, fmt.Errorf("ent: check existence: %w", err) +func (fdq *FileDownloadQuery) sqlCount(ctx context.Context) (int, error) { + _spec := fdq.querySpec() + if len(fdq.modifiers) > 0 { + _spec.Modifiers = fdq.modifiers } - return n > 0, nil + _spec.Node.Columns = fdq.ctx.Fields + if len(fdq.ctx.Fields) > 0 { + _spec.Unique = fdq.ctx.Unique != nil && *fdq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, fdq.driver, _spec) } func (fdq *FileDownloadQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: filedownload.Table, - Columns: filedownload.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: filedownload.FieldID, - }, - }, - From: fdq.sql, - Unique: true, - } - if unique := fdq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(filedownload.Table, filedownload.Columns, sqlgraph.NewFieldSpec(filedownload.FieldID, field.TypeUUID)) + _spec.From = fdq.sql + if unique := fdq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if fdq.path != nil { + _spec.Unique = true } - if fields := fdq.fields; len(fields) > 0 { + if fields := fdq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, filedownload.FieldID) for i := range fields { @@ -464,10 +488,10 @@ func (fdq *FileDownloadQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := fdq.limit; limit != nil { + if limit := fdq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := fdq.offset; offset != nil { + if offset := fdq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := fdq.order; len(ps) > 0 { @@ -483,7 +507,7 @@ func (fdq *FileDownloadQuery) querySpec() *sqlgraph.QuerySpec { func (fdq *FileDownloadQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(fdq.driver.Dialect()) t1 := builder.Table(filedownload.Table) - columns := fdq.fields + columns := fdq.ctx.Fields if len(columns) == 0 { columns = filedownload.Columns } @@ -492,7 +516,7 @@ func (fdq *FileDownloadQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = fdq.sql selector.Select(selector.Columns(columns...)...) } - if fdq.unique != nil && *fdq.unique { + if fdq.ctx.Unique != nil && *fdq.ctx.Unique { selector.Distinct() } for _, p := range fdq.predicates { @@ -501,12 +525,12 @@ func (fdq *FileDownloadQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range fdq.order { p(selector) } - if offset := fdq.offset; offset != nil { + if offset := fdq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := fdq.limit; limit != nil { + if limit := fdq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -514,12 +538,8 @@ func (fdq *FileDownloadQuery) sqlQuery(ctx context.Context) *sql.Selector { // FileDownloadGroupBy is the group-by builder for FileDownload entities. type FileDownloadGroupBy struct { - config - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) + selector + build *FileDownloadQuery } // Aggregate adds the given aggregation functions to the group-by query. @@ -528,471 +548,77 @@ func (fdgb *FileDownloadGroupBy) Aggregate(fns ...AggregateFunc) *FileDownloadGr return fdgb } -// Scan applies the group-by query and scans the result into the given value. -func (fdgb *FileDownloadGroupBy) Scan(ctx context.Context, v interface{}) error { - query, err := fdgb.path(ctx) - if err != nil { +// Scan applies the selector query and scans the result into the given value. +func (fdgb *FileDownloadGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, fdgb.build.ctx, "GroupBy") + if err := fdgb.build.prepareQuery(ctx); err != nil { return err } - fdgb.sql = query - return fdgb.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (fdgb *FileDownloadGroupBy) ScanX(ctx context.Context, v interface{}) { - if err := fdgb.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from group-by. -// It is only allowed when executing a group-by query with one field. -func (fdgb *FileDownloadGroupBy) Strings(ctx context.Context) ([]string, error) { - if len(fdgb.fields) > 1 { - return nil, errors.New("ent: FileDownloadGroupBy.Strings is not achievable when grouping more than 1 field") - } - var v []string - if err := fdgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (fdgb *FileDownloadGroupBy) StringsX(ctx context.Context) []string { - v, err := fdgb.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (fdgb *FileDownloadGroupBy) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = fdgb.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{filedownload.Label} - default: - err = fmt.Errorf("ent: FileDownloadGroupBy.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (fdgb *FileDownloadGroupBy) StringX(ctx context.Context) string { - v, err := fdgb.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from group-by. -// It is only allowed when executing a group-by query with one field. -func (fdgb *FileDownloadGroupBy) Ints(ctx context.Context) ([]int, error) { - if len(fdgb.fields) > 1 { - return nil, errors.New("ent: FileDownloadGroupBy.Ints is not achievable when grouping more than 1 field") - } - var v []int - if err := fdgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (fdgb *FileDownloadGroupBy) IntsX(ctx context.Context) []int { - v, err := fdgb.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (fdgb *FileDownloadGroupBy) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = fdgb.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{filedownload.Label} - default: - err = fmt.Errorf("ent: FileDownloadGroupBy.Ints returned %d results when one was expected", len(v)) - } - return + return scanWithInterceptors[*FileDownloadQuery, *FileDownloadGroupBy](ctx, fdgb.build, fdgb, fdgb.build.inters, v) } -// IntX is like Int, but panics if an error occurs. -func (fdgb *FileDownloadGroupBy) IntX(ctx context.Context) int { - v, err := fdgb.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from group-by. -// It is only allowed when executing a group-by query with one field. -func (fdgb *FileDownloadGroupBy) Float64s(ctx context.Context) ([]float64, error) { - if len(fdgb.fields) > 1 { - return nil, errors.New("ent: FileDownloadGroupBy.Float64s is not achievable when grouping more than 1 field") - } - var v []float64 - if err := fdgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (fdgb *FileDownloadGroupBy) Float64sX(ctx context.Context) []float64 { - v, err := fdgb.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (fdgb *FileDownloadGroupBy) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = fdgb.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{filedownload.Label} - default: - err = fmt.Errorf("ent: FileDownloadGroupBy.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (fdgb *FileDownloadGroupBy) Float64X(ctx context.Context) float64 { - v, err := fdgb.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from group-by. -// It is only allowed when executing a group-by query with one field. -func (fdgb *FileDownloadGroupBy) Bools(ctx context.Context) ([]bool, error) { - if len(fdgb.fields) > 1 { - return nil, errors.New("ent: FileDownloadGroupBy.Bools is not achievable when grouping more than 1 field") - } - var v []bool - if err := fdgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (fdgb *FileDownloadGroupBy) BoolsX(ctx context.Context) []bool { - v, err := fdgb.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (fdgb *FileDownloadGroupBy) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = fdgb.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{filedownload.Label} - default: - err = fmt.Errorf("ent: FileDownloadGroupBy.Bools returned %d results when one was expected", len(v)) - } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (fdgb *FileDownloadGroupBy) BoolX(ctx context.Context) bool { - v, err := fdgb.Bool(ctx) - if err != nil { - panic(err) +func (fdgb *FileDownloadGroupBy) sqlScan(ctx context.Context, root *FileDownloadQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(fdgb.fns)) + for _, fn := range fdgb.fns { + aggregation = append(aggregation, fn(selector)) } - return v -} - -func (fdgb *FileDownloadGroupBy) sqlScan(ctx context.Context, v interface{}) error { - for _, f := range fdgb.fields { - if !filedownload.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*fdgb.flds)+len(fdgb.fns)) + for _, f := range *fdgb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := fdgb.sqlQuery() + selector.GroupBy(selector.Columns(*fdgb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := fdgb.driver.Query(ctx, query, args, rows); err != nil { + if err := fdgb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (fdgb *FileDownloadGroupBy) sqlQuery() *sql.Selector { - selector := fdgb.sql.Select() - aggregation := make([]string, 0, len(fdgb.fns)) - for _, fn := range fdgb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(fdgb.fields)+len(fdgb.fns)) - for _, f := range fdgb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(fdgb.fields...)...) -} - // FileDownloadSelect is the builder for selecting fields of FileDownload entities. type FileDownloadSelect struct { *FileDownloadQuery - // intermediate query (i.e. traversal path). - sql *sql.Selector + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (fds *FileDownloadSelect) Aggregate(fns ...AggregateFunc) *FileDownloadSelect { + fds.fns = append(fds.fns, fns...) + return fds } // Scan applies the selector query and scans the result into the given value. -func (fds *FileDownloadSelect) Scan(ctx context.Context, v interface{}) error { +func (fds *FileDownloadSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, fds.ctx, "Select") if err := fds.prepareQuery(ctx); err != nil { return err } - fds.sql = fds.FileDownloadQuery.sqlQuery(ctx) - return fds.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (fds *FileDownloadSelect) ScanX(ctx context.Context, v interface{}) { - if err := fds.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from a selector. It is only allowed when selecting one field. -func (fds *FileDownloadSelect) Strings(ctx context.Context) ([]string, error) { - if len(fds.fields) > 1 { - return nil, errors.New("ent: FileDownloadSelect.Strings is not achievable when selecting more than 1 field") - } - var v []string - if err := fds.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (fds *FileDownloadSelect) StringsX(ctx context.Context) []string { - v, err := fds.Strings(ctx) - if err != nil { - panic(err) - } - return v + return scanWithInterceptors[*FileDownloadQuery, *FileDownloadSelect](ctx, fds.FileDownloadQuery, fds, fds.inters, v) } -// String returns a single string from a selector. It is only allowed when selecting one field. -func (fds *FileDownloadSelect) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = fds.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{filedownload.Label} - default: - err = fmt.Errorf("ent: FileDownloadSelect.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (fds *FileDownloadSelect) StringX(ctx context.Context) string { - v, err := fds.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from a selector. It is only allowed when selecting one field. -func (fds *FileDownloadSelect) Ints(ctx context.Context) ([]int, error) { - if len(fds.fields) > 1 { - return nil, errors.New("ent: FileDownloadSelect.Ints is not achievable when selecting more than 1 field") - } - var v []int - if err := fds.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (fds *FileDownloadSelect) IntsX(ctx context.Context) []int { - v, err := fds.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a selector. It is only allowed when selecting one field. -func (fds *FileDownloadSelect) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = fds.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{filedownload.Label} - default: - err = fmt.Errorf("ent: FileDownloadSelect.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (fds *FileDownloadSelect) IntX(ctx context.Context) int { - v, err := fds.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. -func (fds *FileDownloadSelect) Float64s(ctx context.Context) ([]float64, error) { - if len(fds.fields) > 1 { - return nil, errors.New("ent: FileDownloadSelect.Float64s is not achievable when selecting more than 1 field") - } - var v []float64 - if err := fds.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (fds *FileDownloadSelect) Float64sX(ctx context.Context) []float64 { - v, err := fds.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. -func (fds *FileDownloadSelect) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = fds.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{filedownload.Label} - default: - err = fmt.Errorf("ent: FileDownloadSelect.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (fds *FileDownloadSelect) Float64X(ctx context.Context) float64 { - v, err := fds.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from a selector. It is only allowed when selecting one field. -func (fds *FileDownloadSelect) Bools(ctx context.Context) ([]bool, error) { - if len(fds.fields) > 1 { - return nil, errors.New("ent: FileDownloadSelect.Bools is not achievable when selecting more than 1 field") - } - var v []bool - if err := fds.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (fds *FileDownloadSelect) BoolsX(ctx context.Context) []bool { - v, err := fds.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a selector. It is only allowed when selecting one field. -func (fds *FileDownloadSelect) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = fds.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{filedownload.Label} - default: - err = fmt.Errorf("ent: FileDownloadSelect.Bools returned %d results when one was expected", len(v)) +func (fds *FileDownloadSelect) sqlScan(ctx context.Context, root *FileDownloadQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(fds.fns)) + for _, fn := range fds.fns { + aggregation = append(aggregation, fn(selector)) } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (fds *FileDownloadSelect) BoolX(ctx context.Context) bool { - v, err := fds.Bool(ctx) - if err != nil { - panic(err) + switch n := len(*fds.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) } - return v -} - -func (fds *FileDownloadSelect) sqlScan(ctx context.Context, v interface{}) error { rows := &sql.Rows{} - query, args := fds.sql.Query() + query, args := selector.Query() if err := fds.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/ent/filedownload_update.go b/ent/filedownload_update.go index 61b427c4..0b920ef1 100755 --- a/ent/filedownload_update.go +++ b/ent/filedownload_update.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -29,9 +29,17 @@ func (fdu *FileDownloadUpdate) Where(ps ...predicate.FileDownload) *FileDownload return fdu } -// SetHclID sets the "hcl_id" field. -func (fdu *FileDownloadUpdate) SetHclID(s string) *FileDownloadUpdate { - fdu.mutation.SetHclID(s) +// SetHCLID sets the "hcl_id" field. +func (fdu *FileDownloadUpdate) SetHCLID(s string) *FileDownloadUpdate { + fdu.mutation.SetHCLID(s) + return fdu +} + +// SetNillableHCLID sets the "hcl_id" field if the given value is not nil. +func (fdu *FileDownloadUpdate) SetNillableHCLID(s *string) *FileDownloadUpdate { + if s != nil { + fdu.SetHCLID(*s) + } return fdu } @@ -41,48 +49,112 @@ func (fdu *FileDownloadUpdate) SetSourceType(s string) *FileDownloadUpdate { return fdu } +// SetNillableSourceType sets the "source_type" field if the given value is not nil. +func (fdu *FileDownloadUpdate) SetNillableSourceType(s *string) *FileDownloadUpdate { + if s != nil { + fdu.SetSourceType(*s) + } + return fdu +} + // SetSource sets the "source" field. func (fdu *FileDownloadUpdate) SetSource(s string) *FileDownloadUpdate { fdu.mutation.SetSource(s) return fdu } +// SetNillableSource sets the "source" field if the given value is not nil. +func (fdu *FileDownloadUpdate) SetNillableSource(s *string) *FileDownloadUpdate { + if s != nil { + fdu.SetSource(*s) + } + return fdu +} + // SetDestination sets the "destination" field. func (fdu *FileDownloadUpdate) SetDestination(s string) *FileDownloadUpdate { fdu.mutation.SetDestination(s) return fdu } +// SetNillableDestination sets the "destination" field if the given value is not nil. +func (fdu *FileDownloadUpdate) SetNillableDestination(s *string) *FileDownloadUpdate { + if s != nil { + fdu.SetDestination(*s) + } + return fdu +} + // SetTemplate sets the "template" field. func (fdu *FileDownloadUpdate) SetTemplate(b bool) *FileDownloadUpdate { fdu.mutation.SetTemplate(b) return fdu } +// SetNillableTemplate sets the "template" field if the given value is not nil. +func (fdu *FileDownloadUpdate) SetNillableTemplate(b *bool) *FileDownloadUpdate { + if b != nil { + fdu.SetTemplate(*b) + } + return fdu +} + // SetPerms sets the "perms" field. func (fdu *FileDownloadUpdate) SetPerms(s string) *FileDownloadUpdate { fdu.mutation.SetPerms(s) return fdu } +// SetNillablePerms sets the "perms" field if the given value is not nil. +func (fdu *FileDownloadUpdate) SetNillablePerms(s *string) *FileDownloadUpdate { + if s != nil { + fdu.SetPerms(*s) + } + return fdu +} + // SetDisabled sets the "disabled" field. func (fdu *FileDownloadUpdate) SetDisabled(b bool) *FileDownloadUpdate { fdu.mutation.SetDisabled(b) return fdu } +// SetNillableDisabled sets the "disabled" field if the given value is not nil. +func (fdu *FileDownloadUpdate) SetNillableDisabled(b *bool) *FileDownloadUpdate { + if b != nil { + fdu.SetDisabled(*b) + } + return fdu +} + // SetMd5 sets the "md5" field. func (fdu *FileDownloadUpdate) SetMd5(s string) *FileDownloadUpdate { fdu.mutation.SetMd5(s) return fdu } +// SetNillableMd5 sets the "md5" field if the given value is not nil. +func (fdu *FileDownloadUpdate) SetNillableMd5(s *string) *FileDownloadUpdate { + if s != nil { + fdu.SetMd5(*s) + } + return fdu +} + // SetAbsPath sets the "abs_path" field. func (fdu *FileDownloadUpdate) SetAbsPath(s string) *FileDownloadUpdate { fdu.mutation.SetAbsPath(s) return fdu } +// SetNillableAbsPath sets the "abs_path" field if the given value is not nil. +func (fdu *FileDownloadUpdate) SetNillableAbsPath(s *string) *FileDownloadUpdate { + if s != nil { + fdu.SetAbsPath(*s) + } + return fdu +} + // SetIsTxt sets the "is_txt" field. func (fdu *FileDownloadUpdate) SetIsTxt(b bool) *FileDownloadUpdate { fdu.mutation.SetIsTxt(b) @@ -135,34 +207,7 @@ func (fdu *FileDownloadUpdate) ClearFileDownloadToEnvironment() *FileDownloadUpd // Save executes the query and returns the number of nodes affected by the update operation. func (fdu *FileDownloadUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(fdu.hooks) == 0 { - affected, err = fdu.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*FileDownloadMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - fdu.mutation = mutation - affected, err = fdu.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(fdu.hooks) - 1; i >= 0; i-- { - if fdu.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = fdu.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, fdu.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, fdu.sqlSave, fdu.mutation, fdu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -188,16 +233,7 @@ func (fdu *FileDownloadUpdate) ExecX(ctx context.Context) { } func (fdu *FileDownloadUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: filedownload.Table, - Columns: filedownload.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: filedownload.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(filedownload.Table, filedownload.Columns, sqlgraph.NewFieldSpec(filedownload.FieldID, field.TypeUUID)) if ps := fdu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -205,82 +241,38 @@ func (fdu *FileDownloadUpdate) sqlSave(ctx context.Context) (n int, err error) { } } } - if value, ok := fdu.mutation.HclID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: filedownload.FieldHclID, - }) + if value, ok := fdu.mutation.HCLID(); ok { + _spec.SetField(filedownload.FieldHCLID, field.TypeString, value) } if value, ok := fdu.mutation.SourceType(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: filedownload.FieldSourceType, - }) + _spec.SetField(filedownload.FieldSourceType, field.TypeString, value) } if value, ok := fdu.mutation.Source(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: filedownload.FieldSource, - }) + _spec.SetField(filedownload.FieldSource, field.TypeString, value) } if value, ok := fdu.mutation.Destination(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: filedownload.FieldDestination, - }) + _spec.SetField(filedownload.FieldDestination, field.TypeString, value) } if value, ok := fdu.mutation.Template(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: filedownload.FieldTemplate, - }) + _spec.SetField(filedownload.FieldTemplate, field.TypeBool, value) } if value, ok := fdu.mutation.Perms(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: filedownload.FieldPerms, - }) + _spec.SetField(filedownload.FieldPerms, field.TypeString, value) } if value, ok := fdu.mutation.Disabled(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: filedownload.FieldDisabled, - }) + _spec.SetField(filedownload.FieldDisabled, field.TypeBool, value) } if value, ok := fdu.mutation.Md5(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: filedownload.FieldMd5, - }) + _spec.SetField(filedownload.FieldMd5, field.TypeString, value) } if value, ok := fdu.mutation.AbsPath(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: filedownload.FieldAbsPath, - }) + _spec.SetField(filedownload.FieldAbsPath, field.TypeString, value) } if value, ok := fdu.mutation.IsTxt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: filedownload.FieldIsTxt, - }) + _spec.SetField(filedownload.FieldIsTxt, field.TypeBool, value) } if value, ok := fdu.mutation.Tags(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: filedownload.FieldTags, - }) + _spec.SetField(filedownload.FieldTags, field.TypeJSON, value) } if fdu.mutation.FileDownloadToEnvironmentCleared() { edge := &sqlgraph.EdgeSpec{ @@ -290,10 +282,7 @@ func (fdu *FileDownloadUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{filedownload.FileDownloadToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -306,10 +295,7 @@ func (fdu *FileDownloadUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{filedownload.FileDownloadToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -321,10 +307,11 @@ func (fdu *FileDownloadUpdate) sqlSave(ctx context.Context) (n int, err error) { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{filedownload.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return 0, err } + fdu.mutation.done = true return n, nil } @@ -336,9 +323,17 @@ type FileDownloadUpdateOne struct { mutation *FileDownloadMutation } -// SetHclID sets the "hcl_id" field. -func (fduo *FileDownloadUpdateOne) SetHclID(s string) *FileDownloadUpdateOne { - fduo.mutation.SetHclID(s) +// SetHCLID sets the "hcl_id" field. +func (fduo *FileDownloadUpdateOne) SetHCLID(s string) *FileDownloadUpdateOne { + fduo.mutation.SetHCLID(s) + return fduo +} + +// SetNillableHCLID sets the "hcl_id" field if the given value is not nil. +func (fduo *FileDownloadUpdateOne) SetNillableHCLID(s *string) *FileDownloadUpdateOne { + if s != nil { + fduo.SetHCLID(*s) + } return fduo } @@ -348,48 +343,112 @@ func (fduo *FileDownloadUpdateOne) SetSourceType(s string) *FileDownloadUpdateOn return fduo } +// SetNillableSourceType sets the "source_type" field if the given value is not nil. +func (fduo *FileDownloadUpdateOne) SetNillableSourceType(s *string) *FileDownloadUpdateOne { + if s != nil { + fduo.SetSourceType(*s) + } + return fduo +} + // SetSource sets the "source" field. func (fduo *FileDownloadUpdateOne) SetSource(s string) *FileDownloadUpdateOne { fduo.mutation.SetSource(s) return fduo } +// SetNillableSource sets the "source" field if the given value is not nil. +func (fduo *FileDownloadUpdateOne) SetNillableSource(s *string) *FileDownloadUpdateOne { + if s != nil { + fduo.SetSource(*s) + } + return fduo +} + // SetDestination sets the "destination" field. func (fduo *FileDownloadUpdateOne) SetDestination(s string) *FileDownloadUpdateOne { fduo.mutation.SetDestination(s) return fduo } +// SetNillableDestination sets the "destination" field if the given value is not nil. +func (fduo *FileDownloadUpdateOne) SetNillableDestination(s *string) *FileDownloadUpdateOne { + if s != nil { + fduo.SetDestination(*s) + } + return fduo +} + // SetTemplate sets the "template" field. func (fduo *FileDownloadUpdateOne) SetTemplate(b bool) *FileDownloadUpdateOne { fduo.mutation.SetTemplate(b) return fduo } +// SetNillableTemplate sets the "template" field if the given value is not nil. +func (fduo *FileDownloadUpdateOne) SetNillableTemplate(b *bool) *FileDownloadUpdateOne { + if b != nil { + fduo.SetTemplate(*b) + } + return fduo +} + // SetPerms sets the "perms" field. func (fduo *FileDownloadUpdateOne) SetPerms(s string) *FileDownloadUpdateOne { fduo.mutation.SetPerms(s) return fduo } +// SetNillablePerms sets the "perms" field if the given value is not nil. +func (fduo *FileDownloadUpdateOne) SetNillablePerms(s *string) *FileDownloadUpdateOne { + if s != nil { + fduo.SetPerms(*s) + } + return fduo +} + // SetDisabled sets the "disabled" field. func (fduo *FileDownloadUpdateOne) SetDisabled(b bool) *FileDownloadUpdateOne { fduo.mutation.SetDisabled(b) return fduo } +// SetNillableDisabled sets the "disabled" field if the given value is not nil. +func (fduo *FileDownloadUpdateOne) SetNillableDisabled(b *bool) *FileDownloadUpdateOne { + if b != nil { + fduo.SetDisabled(*b) + } + return fduo +} + // SetMd5 sets the "md5" field. func (fduo *FileDownloadUpdateOne) SetMd5(s string) *FileDownloadUpdateOne { fduo.mutation.SetMd5(s) return fduo } +// SetNillableMd5 sets the "md5" field if the given value is not nil. +func (fduo *FileDownloadUpdateOne) SetNillableMd5(s *string) *FileDownloadUpdateOne { + if s != nil { + fduo.SetMd5(*s) + } + return fduo +} + // SetAbsPath sets the "abs_path" field. func (fduo *FileDownloadUpdateOne) SetAbsPath(s string) *FileDownloadUpdateOne { fduo.mutation.SetAbsPath(s) return fduo } +// SetNillableAbsPath sets the "abs_path" field if the given value is not nil. +func (fduo *FileDownloadUpdateOne) SetNillableAbsPath(s *string) *FileDownloadUpdateOne { + if s != nil { + fduo.SetAbsPath(*s) + } + return fduo +} + // SetIsTxt sets the "is_txt" field. func (fduo *FileDownloadUpdateOne) SetIsTxt(b bool) *FileDownloadUpdateOne { fduo.mutation.SetIsTxt(b) @@ -440,6 +499,12 @@ func (fduo *FileDownloadUpdateOne) ClearFileDownloadToEnvironment() *FileDownloa return fduo } +// Where appends a list predicates to the FileDownloadUpdate builder. +func (fduo *FileDownloadUpdateOne) Where(ps ...predicate.FileDownload) *FileDownloadUpdateOne { + fduo.mutation.Where(ps...) + return fduo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (fduo *FileDownloadUpdateOne) Select(field string, fields ...string) *FileDownloadUpdateOne { @@ -449,34 +514,7 @@ func (fduo *FileDownloadUpdateOne) Select(field string, fields ...string) *FileD // Save executes the query and returns the updated FileDownload entity. func (fduo *FileDownloadUpdateOne) Save(ctx context.Context) (*FileDownload, error) { - var ( - err error - node *FileDownload - ) - if len(fduo.hooks) == 0 { - node, err = fduo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*FileDownloadMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - fduo.mutation = mutation - node, err = fduo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(fduo.hooks) - 1; i >= 0; i-- { - if fduo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = fduo.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, fduo.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, fduo.sqlSave, fduo.mutation, fduo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -502,16 +540,7 @@ func (fduo *FileDownloadUpdateOne) ExecX(ctx context.Context) { } func (fduo *FileDownloadUpdateOne) sqlSave(ctx context.Context) (_node *FileDownload, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: filedownload.Table, - Columns: filedownload.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: filedownload.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(filedownload.Table, filedownload.Columns, sqlgraph.NewFieldSpec(filedownload.FieldID, field.TypeUUID)) id, ok := fduo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "FileDownload.id" for update`)} @@ -536,82 +565,38 @@ func (fduo *FileDownloadUpdateOne) sqlSave(ctx context.Context) (_node *FileDown } } } - if value, ok := fduo.mutation.HclID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: filedownload.FieldHclID, - }) + if value, ok := fduo.mutation.HCLID(); ok { + _spec.SetField(filedownload.FieldHCLID, field.TypeString, value) } if value, ok := fduo.mutation.SourceType(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: filedownload.FieldSourceType, - }) + _spec.SetField(filedownload.FieldSourceType, field.TypeString, value) } if value, ok := fduo.mutation.Source(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: filedownload.FieldSource, - }) + _spec.SetField(filedownload.FieldSource, field.TypeString, value) } if value, ok := fduo.mutation.Destination(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: filedownload.FieldDestination, - }) + _spec.SetField(filedownload.FieldDestination, field.TypeString, value) } if value, ok := fduo.mutation.Template(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: filedownload.FieldTemplate, - }) + _spec.SetField(filedownload.FieldTemplate, field.TypeBool, value) } if value, ok := fduo.mutation.Perms(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: filedownload.FieldPerms, - }) + _spec.SetField(filedownload.FieldPerms, field.TypeString, value) } if value, ok := fduo.mutation.Disabled(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: filedownload.FieldDisabled, - }) + _spec.SetField(filedownload.FieldDisabled, field.TypeBool, value) } if value, ok := fduo.mutation.Md5(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: filedownload.FieldMd5, - }) + _spec.SetField(filedownload.FieldMd5, field.TypeString, value) } if value, ok := fduo.mutation.AbsPath(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: filedownload.FieldAbsPath, - }) + _spec.SetField(filedownload.FieldAbsPath, field.TypeString, value) } if value, ok := fduo.mutation.IsTxt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: filedownload.FieldIsTxt, - }) + _spec.SetField(filedownload.FieldIsTxt, field.TypeBool, value) } if value, ok := fduo.mutation.Tags(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: filedownload.FieldTags, - }) + _spec.SetField(filedownload.FieldTags, field.TypeJSON, value) } if fduo.mutation.FileDownloadToEnvironmentCleared() { edge := &sqlgraph.EdgeSpec{ @@ -621,10 +606,7 @@ func (fduo *FileDownloadUpdateOne) sqlSave(ctx context.Context) (_node *FileDown Columns: []string{filedownload.FileDownloadToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -637,10 +619,7 @@ func (fduo *FileDownloadUpdateOne) sqlSave(ctx context.Context) (_node *FileDown Columns: []string{filedownload.FileDownloadToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -655,9 +634,10 @@ func (fduo *FileDownloadUpdateOne) sqlSave(ctx context.Context) (_node *FileDown if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{filedownload.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } + fduo.mutation.done = true return _node, nil } diff --git a/ent/fileextract.go b/ent/fileextract.go index 15dddf54..bc17dc78 100755 --- a/ent/fileextract.go +++ b/ent/fileextract.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -7,6 +7,7 @@ import ( "fmt" "strings" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/gen0cide/laforge/ent/environment" "github.com/gen0cide/laforge/ent/fileextract" @@ -18,8 +19,8 @@ type FileExtract struct { config ` json:"-"` // ID of the ent. ID uuid.UUID `json:"id,omitempty"` - // HclID holds the value of the "hcl_id" field. - HclID string `json:"hcl_id,omitempty" hcl:"id,label"` + // HCLID holds the value of the "hcl_id" field. + HCLID string `json:"hcl_id,omitempty" hcl:"id,label"` // Source holds the value of the "source" field. Source string `json:"source,omitempty" hcl:"source,attr"` // Destination holds the value of the "destination" field. @@ -32,11 +33,13 @@ type FileExtract struct { // The values are being populated by the FileExtractQuery when eager-loading is set. Edges FileExtractEdges `json:"edges"` + // vvvvvvvvvvvv CUSTOM vvvvvvvvvvvv // Edges put into the main struct to be loaded via hcl // FileExtractToEnvironment holds the value of the FileExtractToEnvironment edge. HCLFileExtractToEnvironment *Environment `json:"FileExtractToEnvironment,omitempty"` - // + // ^^^^^^^^^^^^ CUSTOM ^^^^^^^^^^^^^ environment_environment_to_file_extract *uuid.UUID + selectValues sql.SelectValues } // FileExtractEdges holds the relations/edges for other nodes in the graph. @@ -46,6 +49,8 @@ type FileExtractEdges struct { // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. loadedTypes [1]bool + // totalCount holds the count of the edges above. + totalCount [1]map[string]int } // FileExtractToEnvironmentOrErr returns the FileExtractToEnvironment value or an error if the edge @@ -53,8 +58,7 @@ type FileExtractEdges struct { func (e FileExtractEdges) FileExtractToEnvironmentOrErr() (*Environment, error) { if e.loadedTypes[0] { if e.FileExtractToEnvironment == nil { - // The edge FileExtractToEnvironment was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: environment.Label} } return e.FileExtractToEnvironment, nil @@ -63,20 +67,20 @@ func (e FileExtractEdges) FileExtractToEnvironmentOrErr() (*Environment, error) } // scanValues returns the types for scanning values from sql.Rows. -func (*FileExtract) scanValues(columns []string) ([]interface{}, error) { - values := make([]interface{}, len(columns)) +func (*FileExtract) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) for i := range columns { switch columns[i] { case fileextract.FieldTags: values[i] = new([]byte) - case fileextract.FieldHclID, fileextract.FieldSource, fileextract.FieldDestination, fileextract.FieldType: + case fileextract.FieldHCLID, fileextract.FieldSource, fileextract.FieldDestination, fileextract.FieldType: values[i] = new(sql.NullString) case fileextract.FieldID: values[i] = new(uuid.UUID) case fileextract.ForeignKeys[0]: // environment_environment_to_file_extract values[i] = &sql.NullScanner{S: new(uuid.UUID)} default: - return nil, fmt.Errorf("unexpected column %q for type FileExtract", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -84,7 +88,7 @@ func (*FileExtract) scanValues(columns []string) ([]interface{}, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the FileExtract fields. -func (fe *FileExtract) assignValues(columns []string, values []interface{}) error { +func (fe *FileExtract) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -96,11 +100,11 @@ func (fe *FileExtract) assignValues(columns []string, values []interface{}) erro } else if value != nil { fe.ID = *value } - case fileextract.FieldHclID: + case fileextract.FieldHCLID: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field hcl_id", values[i]) } else if value.Valid { - fe.HclID = value.String + fe.HCLID = value.String } case fileextract.FieldSource: if value, ok := values[i].(*sql.NullString); !ok { @@ -135,31 +139,39 @@ func (fe *FileExtract) assignValues(columns []string, values []interface{}) erro fe.environment_environment_to_file_extract = new(uuid.UUID) *fe.environment_environment_to_file_extract = *value.S.(*uuid.UUID) } + default: + fe.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the FileExtract. +// This includes values selected through modifiers, order, etc. +func (fe *FileExtract) Value(name string) (ent.Value, error) { + return fe.selectValues.Get(name) +} + // QueryFileExtractToEnvironment queries the "FileExtractToEnvironment" edge of the FileExtract entity. func (fe *FileExtract) QueryFileExtractToEnvironment() *EnvironmentQuery { - return (&FileExtractClient{config: fe.config}).QueryFileExtractToEnvironment(fe) + return NewFileExtractClient(fe.config).QueryFileExtractToEnvironment(fe) } // Update returns a builder for updating this FileExtract. // Note that you need to call FileExtract.Unwrap() before calling this method if this FileExtract // was returned from a transaction, and the transaction was committed or rolled back. func (fe *FileExtract) Update() *FileExtractUpdateOne { - return (&FileExtractClient{config: fe.config}).UpdateOne(fe) + return NewFileExtractClient(fe.config).UpdateOne(fe) } // Unwrap unwraps the FileExtract entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. func (fe *FileExtract) Unwrap() *FileExtract { - tx, ok := fe.config.driver.(*txDriver) + _tx, ok := fe.config.driver.(*txDriver) if !ok { panic("ent: FileExtract is not a transactional entity") } - fe.config.driver = tx.drv + fe.config.driver = _tx.drv return fe } @@ -167,16 +179,20 @@ func (fe *FileExtract) Unwrap() *FileExtract { func (fe *FileExtract) String() string { var builder strings.Builder builder.WriteString("FileExtract(") - builder.WriteString(fmt.Sprintf("id=%v", fe.ID)) - builder.WriteString(", hcl_id=") - builder.WriteString(fe.HclID) - builder.WriteString(", source=") + builder.WriteString(fmt.Sprintf("id=%v, ", fe.ID)) + builder.WriteString("hcl_id=") + builder.WriteString(fe.HCLID) + builder.WriteString(", ") + builder.WriteString("source=") builder.WriteString(fe.Source) - builder.WriteString(", destination=") + builder.WriteString(", ") + builder.WriteString("destination=") builder.WriteString(fe.Destination) - builder.WriteString(", type=") + builder.WriteString(", ") + builder.WriteString("type=") builder.WriteString(fe.Type) - builder.WriteString(", tags=") + builder.WriteString(", ") + builder.WriteString("tags=") builder.WriteString(fmt.Sprintf("%v", fe.Tags)) builder.WriteByte(')') return builder.String() @@ -184,9 +200,3 @@ func (fe *FileExtract) String() string { // FileExtracts is a parsable slice of FileExtract. type FileExtracts []*FileExtract - -func (fe FileExtracts) config(cfg config) { - for _i := range fe { - fe[_i].config = cfg - } -} diff --git a/ent/fileextract/fileextract.go b/ent/fileextract/fileextract.go index ee4361d4..31fd6016 100755 --- a/ent/fileextract/fileextract.go +++ b/ent/fileextract/fileextract.go @@ -1,8 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package fileextract import ( + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -11,8 +13,8 @@ const ( Label = "file_extract" // FieldID holds the string denoting the id field in the database. FieldID = "id" - // FieldHclID holds the string denoting the hcl_id field in the database. - FieldHclID = "hcl_id" + // FieldHCLID holds the string denoting the hcl_id field in the database. + FieldHCLID = "hcl_id" // FieldSource holds the string denoting the source field in the database. FieldSource = "source" // FieldDestination holds the string denoting the destination field in the database. @@ -37,7 +39,7 @@ const ( // Columns holds all SQL columns for fileextract fields. var Columns = []string{ FieldID, - FieldHclID, + FieldHCLID, FieldSource, FieldDestination, FieldType, @@ -69,3 +71,45 @@ var ( // DefaultID holds the default value on creation for the "id" field. DefaultID func() uuid.UUID ) + +// OrderOption defines the ordering options for the FileExtract queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByHCLID orders the results by the hcl_id field. +func ByHCLID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldHCLID, opts...).ToFunc() +} + +// BySource orders the results by the source field. +func BySource(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSource, opts...).ToFunc() +} + +// ByDestination orders the results by the destination field. +func ByDestination(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDestination, opts...).ToFunc() +} + +// ByType orders the results by the type field. +func ByType(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldType, opts...).ToFunc() +} + +// ByFileExtractToEnvironmentField orders the results by FileExtractToEnvironment field. +func ByFileExtractToEnvironmentField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newFileExtractToEnvironmentStep(), sql.OrderByField(field, opts...)) + } +} +func newFileExtractToEnvironmentStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(FileExtractToEnvironmentInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, FileExtractToEnvironmentTable, FileExtractToEnvironmentColumn), + ) +} diff --git a/ent/fileextract/where.go b/ent/fileextract/where.go index 6689b199..99fb9db3 100755 --- a/ent/fileextract/where.go +++ b/ent/fileextract/where.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package fileextract @@ -11,557 +11,327 @@ import ( // ID filters vertices based on their ID field. func ID(id uuid.UUID) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.FileExtract(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id uuid.UUID) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.FileExtract(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id uuid.UUID) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.FileExtract(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...uuid.UUID) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.FileExtract(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...uuid.UUID) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.FileExtract(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id uuid.UUID) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.FileExtract(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id uuid.UUID) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.FileExtract(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id uuid.UUID) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.FileExtract(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id uuid.UUID) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.FileExtract(sql.FieldLTE(FieldID, id)) } -// HclID applies equality check predicate on the "hcl_id" field. It's identical to HclIDEQ. -func HclID(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldHclID), v)) - }) +// HCLID applies equality check predicate on the "hcl_id" field. It's identical to HCLIDEQ. +func HCLID(v string) predicate.FileExtract { + return predicate.FileExtract(sql.FieldEQ(FieldHCLID, v)) } // Source applies equality check predicate on the "source" field. It's identical to SourceEQ. func Source(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSource), v)) - }) + return predicate.FileExtract(sql.FieldEQ(FieldSource, v)) } // Destination applies equality check predicate on the "destination" field. It's identical to DestinationEQ. func Destination(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldDestination), v)) - }) + return predicate.FileExtract(sql.FieldEQ(FieldDestination, v)) } // Type applies equality check predicate on the "type" field. It's identical to TypeEQ. func Type(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldType), v)) - }) + return predicate.FileExtract(sql.FieldEQ(FieldType, v)) } -// HclIDEQ applies the EQ predicate on the "hcl_id" field. -func HclIDEQ(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldHclID), v)) - }) +// HCLIDEQ applies the EQ predicate on the "hcl_id" field. +func HCLIDEQ(v string) predicate.FileExtract { + return predicate.FileExtract(sql.FieldEQ(FieldHCLID, v)) } -// HclIDNEQ applies the NEQ predicate on the "hcl_id" field. -func HclIDNEQ(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldHclID), v)) - }) +// HCLIDNEQ applies the NEQ predicate on the "hcl_id" field. +func HCLIDNEQ(v string) predicate.FileExtract { + return predicate.FileExtract(sql.FieldNEQ(FieldHCLID, v)) } -// HclIDIn applies the In predicate on the "hcl_id" field. -func HclIDIn(vs ...string) predicate.FileExtract { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.FileExtract(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldHclID), v...)) - }) +// HCLIDIn applies the In predicate on the "hcl_id" field. +func HCLIDIn(vs ...string) predicate.FileExtract { + return predicate.FileExtract(sql.FieldIn(FieldHCLID, vs...)) } -// HclIDNotIn applies the NotIn predicate on the "hcl_id" field. -func HclIDNotIn(vs ...string) predicate.FileExtract { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.FileExtract(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldHclID), v...)) - }) +// HCLIDNotIn applies the NotIn predicate on the "hcl_id" field. +func HCLIDNotIn(vs ...string) predicate.FileExtract { + return predicate.FileExtract(sql.FieldNotIn(FieldHCLID, vs...)) } -// HclIDGT applies the GT predicate on the "hcl_id" field. -func HclIDGT(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldHclID), v)) - }) +// HCLIDGT applies the GT predicate on the "hcl_id" field. +func HCLIDGT(v string) predicate.FileExtract { + return predicate.FileExtract(sql.FieldGT(FieldHCLID, v)) } -// HclIDGTE applies the GTE predicate on the "hcl_id" field. -func HclIDGTE(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldHclID), v)) - }) +// HCLIDGTE applies the GTE predicate on the "hcl_id" field. +func HCLIDGTE(v string) predicate.FileExtract { + return predicate.FileExtract(sql.FieldGTE(FieldHCLID, v)) } -// HclIDLT applies the LT predicate on the "hcl_id" field. -func HclIDLT(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldHclID), v)) - }) +// HCLIDLT applies the LT predicate on the "hcl_id" field. +func HCLIDLT(v string) predicate.FileExtract { + return predicate.FileExtract(sql.FieldLT(FieldHCLID, v)) } -// HclIDLTE applies the LTE predicate on the "hcl_id" field. -func HclIDLTE(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldHclID), v)) - }) +// HCLIDLTE applies the LTE predicate on the "hcl_id" field. +func HCLIDLTE(v string) predicate.FileExtract { + return predicate.FileExtract(sql.FieldLTE(FieldHCLID, v)) } -// HclIDContains applies the Contains predicate on the "hcl_id" field. -func HclIDContains(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldHclID), v)) - }) +// HCLIDContains applies the Contains predicate on the "hcl_id" field. +func HCLIDContains(v string) predicate.FileExtract { + return predicate.FileExtract(sql.FieldContains(FieldHCLID, v)) } -// HclIDHasPrefix applies the HasPrefix predicate on the "hcl_id" field. -func HclIDHasPrefix(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldHclID), v)) - }) +// HCLIDHasPrefix applies the HasPrefix predicate on the "hcl_id" field. +func HCLIDHasPrefix(v string) predicate.FileExtract { + return predicate.FileExtract(sql.FieldHasPrefix(FieldHCLID, v)) } -// HclIDHasSuffix applies the HasSuffix predicate on the "hcl_id" field. -func HclIDHasSuffix(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldHclID), v)) - }) +// HCLIDHasSuffix applies the HasSuffix predicate on the "hcl_id" field. +func HCLIDHasSuffix(v string) predicate.FileExtract { + return predicate.FileExtract(sql.FieldHasSuffix(FieldHCLID, v)) } -// HclIDEqualFold applies the EqualFold predicate on the "hcl_id" field. -func HclIDEqualFold(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldHclID), v)) - }) +// HCLIDEqualFold applies the EqualFold predicate on the "hcl_id" field. +func HCLIDEqualFold(v string) predicate.FileExtract { + return predicate.FileExtract(sql.FieldEqualFold(FieldHCLID, v)) } -// HclIDContainsFold applies the ContainsFold predicate on the "hcl_id" field. -func HclIDContainsFold(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldHclID), v)) - }) +// HCLIDContainsFold applies the ContainsFold predicate on the "hcl_id" field. +func HCLIDContainsFold(v string) predicate.FileExtract { + return predicate.FileExtract(sql.FieldContainsFold(FieldHCLID, v)) } // SourceEQ applies the EQ predicate on the "source" field. func SourceEQ(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSource), v)) - }) + return predicate.FileExtract(sql.FieldEQ(FieldSource, v)) } // SourceNEQ applies the NEQ predicate on the "source" field. func SourceNEQ(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldSource), v)) - }) + return predicate.FileExtract(sql.FieldNEQ(FieldSource, v)) } // SourceIn applies the In predicate on the "source" field. func SourceIn(vs ...string) predicate.FileExtract { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.FileExtract(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldSource), v...)) - }) + return predicate.FileExtract(sql.FieldIn(FieldSource, vs...)) } // SourceNotIn applies the NotIn predicate on the "source" field. func SourceNotIn(vs ...string) predicate.FileExtract { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.FileExtract(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldSource), v...)) - }) + return predicate.FileExtract(sql.FieldNotIn(FieldSource, vs...)) } // SourceGT applies the GT predicate on the "source" field. func SourceGT(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldSource), v)) - }) + return predicate.FileExtract(sql.FieldGT(FieldSource, v)) } // SourceGTE applies the GTE predicate on the "source" field. func SourceGTE(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldSource), v)) - }) + return predicate.FileExtract(sql.FieldGTE(FieldSource, v)) } // SourceLT applies the LT predicate on the "source" field. func SourceLT(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldSource), v)) - }) + return predicate.FileExtract(sql.FieldLT(FieldSource, v)) } // SourceLTE applies the LTE predicate on the "source" field. func SourceLTE(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldSource), v)) - }) + return predicate.FileExtract(sql.FieldLTE(FieldSource, v)) } // SourceContains applies the Contains predicate on the "source" field. func SourceContains(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldSource), v)) - }) + return predicate.FileExtract(sql.FieldContains(FieldSource, v)) } // SourceHasPrefix applies the HasPrefix predicate on the "source" field. func SourceHasPrefix(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldSource), v)) - }) + return predicate.FileExtract(sql.FieldHasPrefix(FieldSource, v)) } // SourceHasSuffix applies the HasSuffix predicate on the "source" field. func SourceHasSuffix(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldSource), v)) - }) + return predicate.FileExtract(sql.FieldHasSuffix(FieldSource, v)) } // SourceEqualFold applies the EqualFold predicate on the "source" field. func SourceEqualFold(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldSource), v)) - }) + return predicate.FileExtract(sql.FieldEqualFold(FieldSource, v)) } // SourceContainsFold applies the ContainsFold predicate on the "source" field. func SourceContainsFold(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldSource), v)) - }) + return predicate.FileExtract(sql.FieldContainsFold(FieldSource, v)) } // DestinationEQ applies the EQ predicate on the "destination" field. func DestinationEQ(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldDestination), v)) - }) + return predicate.FileExtract(sql.FieldEQ(FieldDestination, v)) } // DestinationNEQ applies the NEQ predicate on the "destination" field. func DestinationNEQ(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldDestination), v)) - }) + return predicate.FileExtract(sql.FieldNEQ(FieldDestination, v)) } // DestinationIn applies the In predicate on the "destination" field. func DestinationIn(vs ...string) predicate.FileExtract { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.FileExtract(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldDestination), v...)) - }) + return predicate.FileExtract(sql.FieldIn(FieldDestination, vs...)) } // DestinationNotIn applies the NotIn predicate on the "destination" field. func DestinationNotIn(vs ...string) predicate.FileExtract { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.FileExtract(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldDestination), v...)) - }) + return predicate.FileExtract(sql.FieldNotIn(FieldDestination, vs...)) } // DestinationGT applies the GT predicate on the "destination" field. func DestinationGT(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldDestination), v)) - }) + return predicate.FileExtract(sql.FieldGT(FieldDestination, v)) } // DestinationGTE applies the GTE predicate on the "destination" field. func DestinationGTE(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldDestination), v)) - }) + return predicate.FileExtract(sql.FieldGTE(FieldDestination, v)) } // DestinationLT applies the LT predicate on the "destination" field. func DestinationLT(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldDestination), v)) - }) + return predicate.FileExtract(sql.FieldLT(FieldDestination, v)) } // DestinationLTE applies the LTE predicate on the "destination" field. func DestinationLTE(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldDestination), v)) - }) + return predicate.FileExtract(sql.FieldLTE(FieldDestination, v)) } // DestinationContains applies the Contains predicate on the "destination" field. func DestinationContains(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldDestination), v)) - }) + return predicate.FileExtract(sql.FieldContains(FieldDestination, v)) } // DestinationHasPrefix applies the HasPrefix predicate on the "destination" field. func DestinationHasPrefix(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldDestination), v)) - }) + return predicate.FileExtract(sql.FieldHasPrefix(FieldDestination, v)) } // DestinationHasSuffix applies the HasSuffix predicate on the "destination" field. func DestinationHasSuffix(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldDestination), v)) - }) + return predicate.FileExtract(sql.FieldHasSuffix(FieldDestination, v)) } // DestinationEqualFold applies the EqualFold predicate on the "destination" field. func DestinationEqualFold(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldDestination), v)) - }) + return predicate.FileExtract(sql.FieldEqualFold(FieldDestination, v)) } // DestinationContainsFold applies the ContainsFold predicate on the "destination" field. func DestinationContainsFold(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldDestination), v)) - }) + return predicate.FileExtract(sql.FieldContainsFold(FieldDestination, v)) } // TypeEQ applies the EQ predicate on the "type" field. func TypeEQ(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldType), v)) - }) + return predicate.FileExtract(sql.FieldEQ(FieldType, v)) } // TypeNEQ applies the NEQ predicate on the "type" field. func TypeNEQ(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldType), v)) - }) + return predicate.FileExtract(sql.FieldNEQ(FieldType, v)) } // TypeIn applies the In predicate on the "type" field. func TypeIn(vs ...string) predicate.FileExtract { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.FileExtract(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldType), v...)) - }) + return predicate.FileExtract(sql.FieldIn(FieldType, vs...)) } // TypeNotIn applies the NotIn predicate on the "type" field. func TypeNotIn(vs ...string) predicate.FileExtract { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.FileExtract(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldType), v...)) - }) + return predicate.FileExtract(sql.FieldNotIn(FieldType, vs...)) } // TypeGT applies the GT predicate on the "type" field. func TypeGT(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldType), v)) - }) + return predicate.FileExtract(sql.FieldGT(FieldType, v)) } // TypeGTE applies the GTE predicate on the "type" field. func TypeGTE(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldType), v)) - }) + return predicate.FileExtract(sql.FieldGTE(FieldType, v)) } // TypeLT applies the LT predicate on the "type" field. func TypeLT(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldType), v)) - }) + return predicate.FileExtract(sql.FieldLT(FieldType, v)) } // TypeLTE applies the LTE predicate on the "type" field. func TypeLTE(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldType), v)) - }) + return predicate.FileExtract(sql.FieldLTE(FieldType, v)) } // TypeContains applies the Contains predicate on the "type" field. func TypeContains(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldType), v)) - }) + return predicate.FileExtract(sql.FieldContains(FieldType, v)) } // TypeHasPrefix applies the HasPrefix predicate on the "type" field. func TypeHasPrefix(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldType), v)) - }) + return predicate.FileExtract(sql.FieldHasPrefix(FieldType, v)) } // TypeHasSuffix applies the HasSuffix predicate on the "type" field. func TypeHasSuffix(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldType), v)) - }) + return predicate.FileExtract(sql.FieldHasSuffix(FieldType, v)) } // TypeEqualFold applies the EqualFold predicate on the "type" field. func TypeEqualFold(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldType), v)) - }) + return predicate.FileExtract(sql.FieldEqualFold(FieldType, v)) } // TypeContainsFold applies the ContainsFold predicate on the "type" field. func TypeContainsFold(v string) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldType), v)) - }) + return predicate.FileExtract(sql.FieldContainsFold(FieldType, v)) } // HasFileExtractToEnvironment applies the HasEdge predicate on the "FileExtractToEnvironment" edge. @@ -569,7 +339,6 @@ func HasFileExtractToEnvironment() predicate.FileExtract { return predicate.FileExtract(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(FileExtractToEnvironmentTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, true, FileExtractToEnvironmentTable, FileExtractToEnvironmentColumn), ) sqlgraph.HasNeighbors(s, step) @@ -579,11 +348,7 @@ func HasFileExtractToEnvironment() predicate.FileExtract { // HasFileExtractToEnvironmentWith applies the HasEdge predicate on the "FileExtractToEnvironment" edge with a given conditions (other predicates). func HasFileExtractToEnvironmentWith(preds ...predicate.Environment) predicate.FileExtract { return predicate.FileExtract(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(FileExtractToEnvironmentInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, FileExtractToEnvironmentTable, FileExtractToEnvironmentColumn), - ) + step := newFileExtractToEnvironmentStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -594,32 +359,15 @@ func HasFileExtractToEnvironmentWith(preds ...predicate.Environment) predicate.F // And groups predicates with the AND operator between them. func And(predicates ...predicate.FileExtract) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.FileExtract(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.FileExtract) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.FileExtract(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.FileExtract) predicate.FileExtract { - return predicate.FileExtract(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.FileExtract(sql.NotPredicates(p)) } diff --git a/ent/fileextract_create.go b/ent/fileextract_create.go index abe5b086..4c13f875 100755 --- a/ent/fileextract_create.go +++ b/ent/fileextract_create.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -21,9 +21,9 @@ type FileExtractCreate struct { hooks []Hook } -// SetHclID sets the "hcl_id" field. -func (fec *FileExtractCreate) SetHclID(s string) *FileExtractCreate { - fec.mutation.SetHclID(s) +// SetHCLID sets the "hcl_id" field. +func (fec *FileExtractCreate) SetHCLID(s string) *FileExtractCreate { + fec.mutation.SetHCLID(s) return fec } @@ -91,44 +91,8 @@ func (fec *FileExtractCreate) Mutation() *FileExtractMutation { // Save creates the FileExtract in the database. func (fec *FileExtractCreate) Save(ctx context.Context) (*FileExtract, error) { - var ( - err error - node *FileExtract - ) fec.defaults() - if len(fec.hooks) == 0 { - if err = fec.check(); err != nil { - return nil, err - } - node, err = fec.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*FileExtractMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = fec.check(); err != nil { - return nil, err - } - fec.mutation = mutation - if node, err = fec.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(fec.hooks) - 1; i >= 0; i-- { - if fec.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = fec.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, fec.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, fec.sqlSave, fec.mutation, fec.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -163,7 +127,7 @@ func (fec *FileExtractCreate) defaults() { // check runs all checks and user-defined validators on the builder. func (fec *FileExtractCreate) check() error { - if _, ok := fec.mutation.HclID(); !ok { + if _, ok := fec.mutation.HCLID(); !ok { return &ValidationError{Name: "hcl_id", err: errors.New(`ent: missing required field "FileExtract.hcl_id"`)} } if _, ok := fec.mutation.Source(); !ok { @@ -182,10 +146,13 @@ func (fec *FileExtractCreate) check() error { } func (fec *FileExtractCreate) sqlSave(ctx context.Context) (*FileExtract, error) { + if err := fec.check(); err != nil { + return nil, err + } _node, _spec := fec.createSpec() if err := sqlgraph.CreateNode(ctx, fec.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } @@ -196,62 +163,38 @@ func (fec *FileExtractCreate) sqlSave(ctx context.Context) (*FileExtract, error) return nil, err } } + fec.mutation.id = &_node.ID + fec.mutation.done = true return _node, nil } func (fec *FileExtractCreate) createSpec() (*FileExtract, *sqlgraph.CreateSpec) { var ( _node = &FileExtract{config: fec.config} - _spec = &sqlgraph.CreateSpec{ - Table: fileextract.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: fileextract.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(fileextract.Table, sqlgraph.NewFieldSpec(fileextract.FieldID, field.TypeUUID)) ) if id, ok := fec.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } - if value, ok := fec.mutation.HclID(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: fileextract.FieldHclID, - }) - _node.HclID = value + if value, ok := fec.mutation.HCLID(); ok { + _spec.SetField(fileextract.FieldHCLID, field.TypeString, value) + _node.HCLID = value } if value, ok := fec.mutation.Source(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: fileextract.FieldSource, - }) + _spec.SetField(fileextract.FieldSource, field.TypeString, value) _node.Source = value } if value, ok := fec.mutation.Destination(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: fileextract.FieldDestination, - }) + _spec.SetField(fileextract.FieldDestination, field.TypeString, value) _node.Destination = value } if value, ok := fec.mutation.GetType(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: fileextract.FieldType, - }) + _spec.SetField(fileextract.FieldType, field.TypeString, value) _node.Type = value } if value, ok := fec.mutation.Tags(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: fileextract.FieldTags, - }) + _spec.SetField(fileextract.FieldTags, field.TypeJSON, value) _node.Tags = value } if nodes := fec.mutation.FileExtractToEnvironmentIDs(); len(nodes) > 0 { @@ -262,10 +205,7 @@ func (fec *FileExtractCreate) createSpec() (*FileExtract, *sqlgraph.CreateSpec) Columns: []string{fileextract.FileExtractToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -280,11 +220,15 @@ func (fec *FileExtractCreate) createSpec() (*FileExtract, *sqlgraph.CreateSpec) // FileExtractCreateBulk is the builder for creating many FileExtract entities in bulk. type FileExtractCreateBulk struct { config + err error builders []*FileExtractCreate } // Save creates the FileExtract entities in the database. func (fecb *FileExtractCreateBulk) Save(ctx context.Context) ([]*FileExtract, error) { + if fecb.err != nil { + return nil, fecb.err + } specs := make([]*sqlgraph.CreateSpec, len(fecb.builders)) nodes := make([]*FileExtract, len(fecb.builders)) mutators := make([]Mutator, len(fecb.builders)) @@ -301,8 +245,8 @@ func (fecb *FileExtractCreateBulk) Save(ctx context.Context) ([]*FileExtract, er return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, fecb.builders[i+1].mutation) } else { @@ -310,7 +254,7 @@ func (fecb *FileExtractCreateBulk) Save(ctx context.Context) ([]*FileExtract, er // Invoke the actual operation on the latest mutation in the chain. if err = sqlgraph.BatchCreate(ctx, fecb.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } } } diff --git a/ent/fileextract_delete.go b/ent/fileextract_delete.go index 3ed0dc34..d28a8e3f 100755 --- a/ent/fileextract_delete.go +++ b/ent/fileextract_delete.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (fed *FileExtractDelete) Where(ps ...predicate.FileExtract) *FileExtractDel // Exec executes the deletion query and returns how many vertices were deleted. func (fed *FileExtractDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(fed.hooks) == 0 { - affected, err = fed.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*FileExtractMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - fed.mutation = mutation - affected, err = fed.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(fed.hooks) - 1; i >= 0; i-- { - if fed.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = fed.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, fed.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, fed.sqlExec, fed.mutation, fed.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (fed *FileExtractDelete) ExecX(ctx context.Context) int { } func (fed *FileExtractDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: fileextract.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: fileextract.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(fileextract.Table, sqlgraph.NewFieldSpec(fileextract.FieldID, field.TypeUUID)) if ps := fed.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -84,7 +48,12 @@ func (fed *FileExtractDelete) sqlExec(ctx context.Context) (int, error) { } } } - return sqlgraph.DeleteNodes(ctx, fed.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, fed.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + fed.mutation.done = true + return affected, err } // FileExtractDeleteOne is the builder for deleting a single FileExtract entity. @@ -92,6 +61,12 @@ type FileExtractDeleteOne struct { fed *FileExtractDelete } +// Where appends a list predicates to the FileExtractDelete builder. +func (fedo *FileExtractDeleteOne) Where(ps ...predicate.FileExtract) *FileExtractDeleteOne { + fedo.fed.mutation.Where(ps...) + return fedo +} + // Exec executes the deletion query. func (fedo *FileExtractDeleteOne) Exec(ctx context.Context) error { n, err := fedo.fed.Exec(ctx) @@ -107,5 +82,7 @@ func (fedo *FileExtractDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (fedo *FileExtractDeleteOne) ExecX(ctx context.Context) { - fedo.fed.ExecX(ctx) + if err := fedo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/ent/fileextract_query.go b/ent/fileextract_query.go index bbca8f15..c36cb01e 100755 --- a/ent/fileextract_query.go +++ b/ent/fileextract_query.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "errors" "fmt" "math" @@ -20,15 +19,14 @@ import ( // FileExtractQuery is the builder for querying FileExtract entities. type FileExtractQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string - predicates []predicate.FileExtract - // eager-loading edges. + ctx *QueryContext + order []fileextract.OrderOption + inters []Interceptor + predicates []predicate.FileExtract withFileExtractToEnvironment *EnvironmentQuery withFKs bool + modifiers []func(*sql.Selector) + loadTotal []func(context.Context, []*FileExtract) error // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -40,34 +38,34 @@ func (feq *FileExtractQuery) Where(ps ...predicate.FileExtract) *FileExtractQuer return feq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (feq *FileExtractQuery) Limit(limit int) *FileExtractQuery { - feq.limit = &limit + feq.ctx.Limit = &limit return feq } -// Offset adds an offset step to the query. +// Offset to start from. func (feq *FileExtractQuery) Offset(offset int) *FileExtractQuery { - feq.offset = &offset + feq.ctx.Offset = &offset return feq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (feq *FileExtractQuery) Unique(unique bool) *FileExtractQuery { - feq.unique = &unique + feq.ctx.Unique = &unique return feq } -// Order adds an order step to the query. -func (feq *FileExtractQuery) Order(o ...OrderFunc) *FileExtractQuery { +// Order specifies how the records should be ordered. +func (feq *FileExtractQuery) Order(o ...fileextract.OrderOption) *FileExtractQuery { feq.order = append(feq.order, o...) return feq } // QueryFileExtractToEnvironment chains the current query on the "FileExtractToEnvironment" edge. func (feq *FileExtractQuery) QueryFileExtractToEnvironment() *EnvironmentQuery { - query := &EnvironmentQuery{config: feq.config} + query := (&EnvironmentClient{config: feq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := feq.prepareQuery(ctx); err != nil { return nil, err @@ -90,7 +88,7 @@ func (feq *FileExtractQuery) QueryFileExtractToEnvironment() *EnvironmentQuery { // First returns the first FileExtract entity from the query. // Returns a *NotFoundError when no FileExtract was found. func (feq *FileExtractQuery) First(ctx context.Context) (*FileExtract, error) { - nodes, err := feq.Limit(1).All(ctx) + nodes, err := feq.Limit(1).All(setContextOp(ctx, feq.ctx, "First")) if err != nil { return nil, err } @@ -113,7 +111,7 @@ func (feq *FileExtractQuery) FirstX(ctx context.Context) *FileExtract { // Returns a *NotFoundError when no FileExtract ID was found. func (feq *FileExtractQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = feq.Limit(1).IDs(ctx); err != nil { + if ids, err = feq.Limit(1).IDs(setContextOp(ctx, feq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -136,7 +134,7 @@ func (feq *FileExtractQuery) FirstIDX(ctx context.Context) uuid.UUID { // Returns a *NotSingularError when more than one FileExtract entity is found. // Returns a *NotFoundError when no FileExtract entities are found. func (feq *FileExtractQuery) Only(ctx context.Context) (*FileExtract, error) { - nodes, err := feq.Limit(2).All(ctx) + nodes, err := feq.Limit(2).All(setContextOp(ctx, feq.ctx, "Only")) if err != nil { return nil, err } @@ -164,7 +162,7 @@ func (feq *FileExtractQuery) OnlyX(ctx context.Context) *FileExtract { // Returns a *NotFoundError when no entities are found. func (feq *FileExtractQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = feq.Limit(2).IDs(ctx); err != nil { + if ids, err = feq.Limit(2).IDs(setContextOp(ctx, feq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -189,10 +187,12 @@ func (feq *FileExtractQuery) OnlyIDX(ctx context.Context) uuid.UUID { // All executes the query and returns a list of FileExtracts. func (feq *FileExtractQuery) All(ctx context.Context) ([]*FileExtract, error) { + ctx = setContextOp(ctx, feq.ctx, "All") if err := feq.prepareQuery(ctx); err != nil { return nil, err } - return feq.sqlAll(ctx) + qr := querierAll[[]*FileExtract, *FileExtractQuery]() + return withInterceptors[[]*FileExtract](ctx, feq, qr, feq.inters) } // AllX is like All, but panics if an error occurs. @@ -205,9 +205,12 @@ func (feq *FileExtractQuery) AllX(ctx context.Context) []*FileExtract { } // IDs executes the query and returns a list of FileExtract IDs. -func (feq *FileExtractQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { - var ids []uuid.UUID - if err := feq.Select(fileextract.FieldID).Scan(ctx, &ids); err != nil { +func (feq *FileExtractQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if feq.ctx.Unique == nil && feq.path != nil { + feq.Unique(true) + } + ctx = setContextOp(ctx, feq.ctx, "IDs") + if err = feq.Select(fileextract.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -224,10 +227,11 @@ func (feq *FileExtractQuery) IDsX(ctx context.Context) []uuid.UUID { // Count returns the count of the given query. func (feq *FileExtractQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, feq.ctx, "Count") if err := feq.prepareQuery(ctx); err != nil { return 0, err } - return feq.sqlCount(ctx) + return withInterceptors[int](ctx, feq, querierCount[*FileExtractQuery](), feq.inters) } // CountX is like Count, but panics if an error occurs. @@ -241,10 +245,15 @@ func (feq *FileExtractQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (feq *FileExtractQuery) Exist(ctx context.Context) (bool, error) { - if err := feq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, feq.ctx, "Exist") + switch _, err := feq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return feq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -264,22 +273,21 @@ func (feq *FileExtractQuery) Clone() *FileExtractQuery { } return &FileExtractQuery{ config: feq.config, - limit: feq.limit, - offset: feq.offset, - order: append([]OrderFunc{}, feq.order...), + ctx: feq.ctx.Clone(), + order: append([]fileextract.OrderOption{}, feq.order...), + inters: append([]Interceptor{}, feq.inters...), predicates: append([]predicate.FileExtract{}, feq.predicates...), withFileExtractToEnvironment: feq.withFileExtractToEnvironment.Clone(), // clone intermediate query. - sql: feq.sql.Clone(), - path: feq.path, - unique: feq.unique, + sql: feq.sql.Clone(), + path: feq.path, } } // WithFileExtractToEnvironment tells the query-builder to eager-load the nodes that are connected to // the "FileExtractToEnvironment" edge. The optional arguments are used to configure the query builder of the edge. func (feq *FileExtractQuery) WithFileExtractToEnvironment(opts ...func(*EnvironmentQuery)) *FileExtractQuery { - query := &EnvironmentQuery{config: feq.config} + query := (&EnvironmentClient{config: feq.config}).Query() for _, opt := range opts { opt(query) } @@ -293,25 +301,21 @@ func (feq *FileExtractQuery) WithFileExtractToEnvironment(opts ...func(*Environm // Example: // // var v []struct { -// HclID string `json:"hcl_id,omitempty" hcl:"id,label"` +// HCLID string `json:"hcl_id,omitempty" hcl:"id,label"` // Count int `json:"count,omitempty"` // } // // client.FileExtract.Query(). -// GroupBy(fileextract.FieldHclID). +// GroupBy(fileextract.FieldHCLID). // Aggregate(ent.Count()). // Scan(ctx, &v) -// func (feq *FileExtractQuery) GroupBy(field string, fields ...string) *FileExtractGroupBy { - group := &FileExtractGroupBy{config: feq.config} - group.fields = append([]string{field}, fields...) - group.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := feq.prepareQuery(ctx); err != nil { - return nil, err - } - return feq.sqlQuery(ctx), nil - } - return group + feq.ctx.Fields = append([]string{field}, fields...) + grbuild := &FileExtractGroupBy{build: feq} + grbuild.flds = &feq.ctx.Fields + grbuild.label = fileextract.Label + grbuild.scan = grbuild.Scan + return grbuild } // Select allows the selection one or more fields/columns for the given query, @@ -320,20 +324,37 @@ func (feq *FileExtractQuery) GroupBy(field string, fields ...string) *FileExtrac // Example: // // var v []struct { -// HclID string `json:"hcl_id,omitempty" hcl:"id,label"` +// HCLID string `json:"hcl_id,omitempty" hcl:"id,label"` // } // // client.FileExtract.Query(). -// Select(fileextract.FieldHclID). +// Select(fileextract.FieldHCLID). // Scan(ctx, &v) -// func (feq *FileExtractQuery) Select(fields ...string) *FileExtractSelect { - feq.fields = append(feq.fields, fields...) - return &FileExtractSelect{FileExtractQuery: feq} + feq.ctx.Fields = append(feq.ctx.Fields, fields...) + sbuild := &FileExtractSelect{FileExtractQuery: feq} + sbuild.label = fileextract.Label + sbuild.flds, sbuild.scan = &feq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a FileExtractSelect configured with the given aggregations. +func (feq *FileExtractQuery) Aggregate(fns ...AggregateFunc) *FileExtractSelect { + return feq.Select().Aggregate(fns...) } func (feq *FileExtractQuery) prepareQuery(ctx context.Context) error { - for _, f := range feq.fields { + for _, inter := range feq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, feq); err != nil { + return err + } + } + } + for _, f := range feq.ctx.Fields { if !fileextract.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -348,7 +369,7 @@ func (feq *FileExtractQuery) prepareQuery(ctx context.Context) error { return nil } -func (feq *FileExtractQuery) sqlAll(ctx context.Context) ([]*FileExtract, error) { +func (feq *FileExtractQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*FileExtract, error) { var ( nodes = []*FileExtract{} withFKs = feq.withFKs @@ -363,92 +384,95 @@ func (feq *FileExtractQuery) sqlAll(ctx context.Context) ([]*FileExtract, error) if withFKs { _spec.Node.Columns = append(_spec.Node.Columns, fileextract.ForeignKeys...) } - _spec.ScanValues = func(columns []string) ([]interface{}, error) { + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*FileExtract).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { node := &FileExtract{config: feq.config} nodes = append(nodes, node) - return node.scanValues(columns) - } - _spec.Assign = func(columns []string, values []interface{}) error { - if len(nodes) == 0 { - return fmt.Errorf("ent: Assign called without calling ScanValues") - } - node := nodes[len(nodes)-1] node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(feq.modifiers) > 0 { + _spec.Modifiers = feq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } if err := sqlgraph.QueryNodes(ctx, feq.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := feq.withFileExtractToEnvironment; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*FileExtract) - for i := range nodes { - if nodes[i].environment_environment_to_file_extract == nil { - continue - } - fk := *nodes[i].environment_environment_to_file_extract - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) - } - query.Where(environment.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + if err := feq.loadFileExtractToEnvironment(ctx, query, nodes, nil, + func(n *FileExtract, e *Environment) { n.Edges.FileExtractToEnvironment = e }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "environment_environment_to_file_extract" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.FileExtractToEnvironment = n - } + } + for i := range feq.loadTotal { + if err := feq.loadTotal[i](ctx, nodes); err != nil { + return nil, err } } - return nodes, nil } -func (feq *FileExtractQuery) sqlCount(ctx context.Context) (int, error) { - _spec := feq.querySpec() - _spec.Node.Columns = feq.fields - if len(feq.fields) > 0 { - _spec.Unique = feq.unique != nil && *feq.unique +func (feq *FileExtractQuery) loadFileExtractToEnvironment(ctx context.Context, query *EnvironmentQuery, nodes []*FileExtract, init func(*FileExtract), assign func(*FileExtract, *Environment)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*FileExtract) + for i := range nodes { + if nodes[i].environment_environment_to_file_extract == nil { + continue + } + fk := *nodes[i].environment_environment_to_file_extract + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) } - return sqlgraph.CountNodes(ctx, feq.driver, _spec) + if len(ids) == 0 { + return nil + } + query.Where(environment.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "environment_environment_to_file_extract" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil } -func (feq *FileExtractQuery) sqlExist(ctx context.Context) (bool, error) { - n, err := feq.sqlCount(ctx) - if err != nil { - return false, fmt.Errorf("ent: check existence: %w", err) +func (feq *FileExtractQuery) sqlCount(ctx context.Context) (int, error) { + _spec := feq.querySpec() + if len(feq.modifiers) > 0 { + _spec.Modifiers = feq.modifiers } - return n > 0, nil + _spec.Node.Columns = feq.ctx.Fields + if len(feq.ctx.Fields) > 0 { + _spec.Unique = feq.ctx.Unique != nil && *feq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, feq.driver, _spec) } func (feq *FileExtractQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: fileextract.Table, - Columns: fileextract.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: fileextract.FieldID, - }, - }, - From: feq.sql, - Unique: true, - } - if unique := feq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(fileextract.Table, fileextract.Columns, sqlgraph.NewFieldSpec(fileextract.FieldID, field.TypeUUID)) + _spec.From = feq.sql + if unique := feq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if feq.path != nil { + _spec.Unique = true } - if fields := feq.fields; len(fields) > 0 { + if fields := feq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, fileextract.FieldID) for i := range fields { @@ -464,10 +488,10 @@ func (feq *FileExtractQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := feq.limit; limit != nil { + if limit := feq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := feq.offset; offset != nil { + if offset := feq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := feq.order; len(ps) > 0 { @@ -483,7 +507,7 @@ func (feq *FileExtractQuery) querySpec() *sqlgraph.QuerySpec { func (feq *FileExtractQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(feq.driver.Dialect()) t1 := builder.Table(fileextract.Table) - columns := feq.fields + columns := feq.ctx.Fields if len(columns) == 0 { columns = fileextract.Columns } @@ -492,7 +516,7 @@ func (feq *FileExtractQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = feq.sql selector.Select(selector.Columns(columns...)...) } - if feq.unique != nil && *feq.unique { + if feq.ctx.Unique != nil && *feq.ctx.Unique { selector.Distinct() } for _, p := range feq.predicates { @@ -501,12 +525,12 @@ func (feq *FileExtractQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range feq.order { p(selector) } - if offset := feq.offset; offset != nil { + if offset := feq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := feq.limit; limit != nil { + if limit := feq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -514,12 +538,8 @@ func (feq *FileExtractQuery) sqlQuery(ctx context.Context) *sql.Selector { // FileExtractGroupBy is the group-by builder for FileExtract entities. type FileExtractGroupBy struct { - config - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) + selector + build *FileExtractQuery } // Aggregate adds the given aggregation functions to the group-by query. @@ -528,471 +548,77 @@ func (fegb *FileExtractGroupBy) Aggregate(fns ...AggregateFunc) *FileExtractGrou return fegb } -// Scan applies the group-by query and scans the result into the given value. -func (fegb *FileExtractGroupBy) Scan(ctx context.Context, v interface{}) error { - query, err := fegb.path(ctx) - if err != nil { +// Scan applies the selector query and scans the result into the given value. +func (fegb *FileExtractGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, fegb.build.ctx, "GroupBy") + if err := fegb.build.prepareQuery(ctx); err != nil { return err } - fegb.sql = query - return fegb.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (fegb *FileExtractGroupBy) ScanX(ctx context.Context, v interface{}) { - if err := fegb.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from group-by. -// It is only allowed when executing a group-by query with one field. -func (fegb *FileExtractGroupBy) Strings(ctx context.Context) ([]string, error) { - if len(fegb.fields) > 1 { - return nil, errors.New("ent: FileExtractGroupBy.Strings is not achievable when grouping more than 1 field") - } - var v []string - if err := fegb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (fegb *FileExtractGroupBy) StringsX(ctx context.Context) []string { - v, err := fegb.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (fegb *FileExtractGroupBy) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = fegb.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{fileextract.Label} - default: - err = fmt.Errorf("ent: FileExtractGroupBy.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (fegb *FileExtractGroupBy) StringX(ctx context.Context) string { - v, err := fegb.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from group-by. -// It is only allowed when executing a group-by query with one field. -func (fegb *FileExtractGroupBy) Ints(ctx context.Context) ([]int, error) { - if len(fegb.fields) > 1 { - return nil, errors.New("ent: FileExtractGroupBy.Ints is not achievable when grouping more than 1 field") - } - var v []int - if err := fegb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (fegb *FileExtractGroupBy) IntsX(ctx context.Context) []int { - v, err := fegb.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (fegb *FileExtractGroupBy) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = fegb.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{fileextract.Label} - default: - err = fmt.Errorf("ent: FileExtractGroupBy.Ints returned %d results when one was expected", len(v)) - } - return + return scanWithInterceptors[*FileExtractQuery, *FileExtractGroupBy](ctx, fegb.build, fegb, fegb.build.inters, v) } -// IntX is like Int, but panics if an error occurs. -func (fegb *FileExtractGroupBy) IntX(ctx context.Context) int { - v, err := fegb.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from group-by. -// It is only allowed when executing a group-by query with one field. -func (fegb *FileExtractGroupBy) Float64s(ctx context.Context) ([]float64, error) { - if len(fegb.fields) > 1 { - return nil, errors.New("ent: FileExtractGroupBy.Float64s is not achievable when grouping more than 1 field") - } - var v []float64 - if err := fegb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (fegb *FileExtractGroupBy) Float64sX(ctx context.Context) []float64 { - v, err := fegb.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (fegb *FileExtractGroupBy) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = fegb.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{fileextract.Label} - default: - err = fmt.Errorf("ent: FileExtractGroupBy.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (fegb *FileExtractGroupBy) Float64X(ctx context.Context) float64 { - v, err := fegb.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from group-by. -// It is only allowed when executing a group-by query with one field. -func (fegb *FileExtractGroupBy) Bools(ctx context.Context) ([]bool, error) { - if len(fegb.fields) > 1 { - return nil, errors.New("ent: FileExtractGroupBy.Bools is not achievable when grouping more than 1 field") - } - var v []bool - if err := fegb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (fegb *FileExtractGroupBy) BoolsX(ctx context.Context) []bool { - v, err := fegb.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (fegb *FileExtractGroupBy) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = fegb.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{fileextract.Label} - default: - err = fmt.Errorf("ent: FileExtractGroupBy.Bools returned %d results when one was expected", len(v)) - } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (fegb *FileExtractGroupBy) BoolX(ctx context.Context) bool { - v, err := fegb.Bool(ctx) - if err != nil { - panic(err) +func (fegb *FileExtractGroupBy) sqlScan(ctx context.Context, root *FileExtractQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(fegb.fns)) + for _, fn := range fegb.fns { + aggregation = append(aggregation, fn(selector)) } - return v -} - -func (fegb *FileExtractGroupBy) sqlScan(ctx context.Context, v interface{}) error { - for _, f := range fegb.fields { - if !fileextract.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*fegb.flds)+len(fegb.fns)) + for _, f := range *fegb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := fegb.sqlQuery() + selector.GroupBy(selector.Columns(*fegb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := fegb.driver.Query(ctx, query, args, rows); err != nil { + if err := fegb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (fegb *FileExtractGroupBy) sqlQuery() *sql.Selector { - selector := fegb.sql.Select() - aggregation := make([]string, 0, len(fegb.fns)) - for _, fn := range fegb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(fegb.fields)+len(fegb.fns)) - for _, f := range fegb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(fegb.fields...)...) -} - // FileExtractSelect is the builder for selecting fields of FileExtract entities. type FileExtractSelect struct { *FileExtractQuery - // intermediate query (i.e. traversal path). - sql *sql.Selector + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (fes *FileExtractSelect) Aggregate(fns ...AggregateFunc) *FileExtractSelect { + fes.fns = append(fes.fns, fns...) + return fes } // Scan applies the selector query and scans the result into the given value. -func (fes *FileExtractSelect) Scan(ctx context.Context, v interface{}) error { +func (fes *FileExtractSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, fes.ctx, "Select") if err := fes.prepareQuery(ctx); err != nil { return err } - fes.sql = fes.FileExtractQuery.sqlQuery(ctx) - return fes.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (fes *FileExtractSelect) ScanX(ctx context.Context, v interface{}) { - if err := fes.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from a selector. It is only allowed when selecting one field. -func (fes *FileExtractSelect) Strings(ctx context.Context) ([]string, error) { - if len(fes.fields) > 1 { - return nil, errors.New("ent: FileExtractSelect.Strings is not achievable when selecting more than 1 field") - } - var v []string - if err := fes.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (fes *FileExtractSelect) StringsX(ctx context.Context) []string { - v, err := fes.Strings(ctx) - if err != nil { - panic(err) - } - return v + return scanWithInterceptors[*FileExtractQuery, *FileExtractSelect](ctx, fes.FileExtractQuery, fes, fes.inters, v) } -// String returns a single string from a selector. It is only allowed when selecting one field. -func (fes *FileExtractSelect) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = fes.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{fileextract.Label} - default: - err = fmt.Errorf("ent: FileExtractSelect.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (fes *FileExtractSelect) StringX(ctx context.Context) string { - v, err := fes.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from a selector. It is only allowed when selecting one field. -func (fes *FileExtractSelect) Ints(ctx context.Context) ([]int, error) { - if len(fes.fields) > 1 { - return nil, errors.New("ent: FileExtractSelect.Ints is not achievable when selecting more than 1 field") - } - var v []int - if err := fes.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (fes *FileExtractSelect) IntsX(ctx context.Context) []int { - v, err := fes.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a selector. It is only allowed when selecting one field. -func (fes *FileExtractSelect) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = fes.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{fileextract.Label} - default: - err = fmt.Errorf("ent: FileExtractSelect.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (fes *FileExtractSelect) IntX(ctx context.Context) int { - v, err := fes.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. -func (fes *FileExtractSelect) Float64s(ctx context.Context) ([]float64, error) { - if len(fes.fields) > 1 { - return nil, errors.New("ent: FileExtractSelect.Float64s is not achievable when selecting more than 1 field") - } - var v []float64 - if err := fes.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (fes *FileExtractSelect) Float64sX(ctx context.Context) []float64 { - v, err := fes.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. -func (fes *FileExtractSelect) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = fes.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{fileextract.Label} - default: - err = fmt.Errorf("ent: FileExtractSelect.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (fes *FileExtractSelect) Float64X(ctx context.Context) float64 { - v, err := fes.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from a selector. It is only allowed when selecting one field. -func (fes *FileExtractSelect) Bools(ctx context.Context) ([]bool, error) { - if len(fes.fields) > 1 { - return nil, errors.New("ent: FileExtractSelect.Bools is not achievable when selecting more than 1 field") - } - var v []bool - if err := fes.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (fes *FileExtractSelect) BoolsX(ctx context.Context) []bool { - v, err := fes.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a selector. It is only allowed when selecting one field. -func (fes *FileExtractSelect) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = fes.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{fileextract.Label} - default: - err = fmt.Errorf("ent: FileExtractSelect.Bools returned %d results when one was expected", len(v)) +func (fes *FileExtractSelect) sqlScan(ctx context.Context, root *FileExtractQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(fes.fns)) + for _, fn := range fes.fns { + aggregation = append(aggregation, fn(selector)) } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (fes *FileExtractSelect) BoolX(ctx context.Context) bool { - v, err := fes.Bool(ctx) - if err != nil { - panic(err) + switch n := len(*fes.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) } - return v -} - -func (fes *FileExtractSelect) sqlScan(ctx context.Context, v interface{}) error { rows := &sql.Rows{} - query, args := fes.sql.Query() + query, args := selector.Query() if err := fes.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/ent/fileextract_update.go b/ent/fileextract_update.go index b9b039fd..1fe10580 100755 --- a/ent/fileextract_update.go +++ b/ent/fileextract_update.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -29,9 +29,17 @@ func (feu *FileExtractUpdate) Where(ps ...predicate.FileExtract) *FileExtractUpd return feu } -// SetHclID sets the "hcl_id" field. -func (feu *FileExtractUpdate) SetHclID(s string) *FileExtractUpdate { - feu.mutation.SetHclID(s) +// SetHCLID sets the "hcl_id" field. +func (feu *FileExtractUpdate) SetHCLID(s string) *FileExtractUpdate { + feu.mutation.SetHCLID(s) + return feu +} + +// SetNillableHCLID sets the "hcl_id" field if the given value is not nil. +func (feu *FileExtractUpdate) SetNillableHCLID(s *string) *FileExtractUpdate { + if s != nil { + feu.SetHCLID(*s) + } return feu } @@ -41,18 +49,42 @@ func (feu *FileExtractUpdate) SetSource(s string) *FileExtractUpdate { return feu } +// SetNillableSource sets the "source" field if the given value is not nil. +func (feu *FileExtractUpdate) SetNillableSource(s *string) *FileExtractUpdate { + if s != nil { + feu.SetSource(*s) + } + return feu +} + // SetDestination sets the "destination" field. func (feu *FileExtractUpdate) SetDestination(s string) *FileExtractUpdate { feu.mutation.SetDestination(s) return feu } +// SetNillableDestination sets the "destination" field if the given value is not nil. +func (feu *FileExtractUpdate) SetNillableDestination(s *string) *FileExtractUpdate { + if s != nil { + feu.SetDestination(*s) + } + return feu +} + // SetType sets the "type" field. func (feu *FileExtractUpdate) SetType(s string) *FileExtractUpdate { feu.mutation.SetType(s) return feu } +// SetNillableType sets the "type" field if the given value is not nil. +func (feu *FileExtractUpdate) SetNillableType(s *string) *FileExtractUpdate { + if s != nil { + feu.SetType(*s) + } + return feu +} + // SetTags sets the "tags" field. func (feu *FileExtractUpdate) SetTags(m map[string]string) *FileExtractUpdate { feu.mutation.SetTags(m) @@ -91,34 +123,7 @@ func (feu *FileExtractUpdate) ClearFileExtractToEnvironment() *FileExtractUpdate // Save executes the query and returns the number of nodes affected by the update operation. func (feu *FileExtractUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(feu.hooks) == 0 { - affected, err = feu.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*FileExtractMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - feu.mutation = mutation - affected, err = feu.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(feu.hooks) - 1; i >= 0; i-- { - if feu.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = feu.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, feu.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, feu.sqlSave, feu.mutation, feu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -144,16 +149,7 @@ func (feu *FileExtractUpdate) ExecX(ctx context.Context) { } func (feu *FileExtractUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: fileextract.Table, - Columns: fileextract.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: fileextract.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(fileextract.Table, fileextract.Columns, sqlgraph.NewFieldSpec(fileextract.FieldID, field.TypeUUID)) if ps := feu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -161,40 +157,20 @@ func (feu *FileExtractUpdate) sqlSave(ctx context.Context) (n int, err error) { } } } - if value, ok := feu.mutation.HclID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: fileextract.FieldHclID, - }) + if value, ok := feu.mutation.HCLID(); ok { + _spec.SetField(fileextract.FieldHCLID, field.TypeString, value) } if value, ok := feu.mutation.Source(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: fileextract.FieldSource, - }) + _spec.SetField(fileextract.FieldSource, field.TypeString, value) } if value, ok := feu.mutation.Destination(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: fileextract.FieldDestination, - }) + _spec.SetField(fileextract.FieldDestination, field.TypeString, value) } if value, ok := feu.mutation.GetType(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: fileextract.FieldType, - }) + _spec.SetField(fileextract.FieldType, field.TypeString, value) } if value, ok := feu.mutation.Tags(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: fileextract.FieldTags, - }) + _spec.SetField(fileextract.FieldTags, field.TypeJSON, value) } if feu.mutation.FileExtractToEnvironmentCleared() { edge := &sqlgraph.EdgeSpec{ @@ -204,10 +180,7 @@ func (feu *FileExtractUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{fileextract.FileExtractToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -220,10 +193,7 @@ func (feu *FileExtractUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{fileextract.FileExtractToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -235,10 +205,11 @@ func (feu *FileExtractUpdate) sqlSave(ctx context.Context) (n int, err error) { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{fileextract.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return 0, err } + feu.mutation.done = true return n, nil } @@ -250,9 +221,17 @@ type FileExtractUpdateOne struct { mutation *FileExtractMutation } -// SetHclID sets the "hcl_id" field. -func (feuo *FileExtractUpdateOne) SetHclID(s string) *FileExtractUpdateOne { - feuo.mutation.SetHclID(s) +// SetHCLID sets the "hcl_id" field. +func (feuo *FileExtractUpdateOne) SetHCLID(s string) *FileExtractUpdateOne { + feuo.mutation.SetHCLID(s) + return feuo +} + +// SetNillableHCLID sets the "hcl_id" field if the given value is not nil. +func (feuo *FileExtractUpdateOne) SetNillableHCLID(s *string) *FileExtractUpdateOne { + if s != nil { + feuo.SetHCLID(*s) + } return feuo } @@ -262,18 +241,42 @@ func (feuo *FileExtractUpdateOne) SetSource(s string) *FileExtractUpdateOne { return feuo } +// SetNillableSource sets the "source" field if the given value is not nil. +func (feuo *FileExtractUpdateOne) SetNillableSource(s *string) *FileExtractUpdateOne { + if s != nil { + feuo.SetSource(*s) + } + return feuo +} + // SetDestination sets the "destination" field. func (feuo *FileExtractUpdateOne) SetDestination(s string) *FileExtractUpdateOne { feuo.mutation.SetDestination(s) return feuo } +// SetNillableDestination sets the "destination" field if the given value is not nil. +func (feuo *FileExtractUpdateOne) SetNillableDestination(s *string) *FileExtractUpdateOne { + if s != nil { + feuo.SetDestination(*s) + } + return feuo +} + // SetType sets the "type" field. func (feuo *FileExtractUpdateOne) SetType(s string) *FileExtractUpdateOne { feuo.mutation.SetType(s) return feuo } +// SetNillableType sets the "type" field if the given value is not nil. +func (feuo *FileExtractUpdateOne) SetNillableType(s *string) *FileExtractUpdateOne { + if s != nil { + feuo.SetType(*s) + } + return feuo +} + // SetTags sets the "tags" field. func (feuo *FileExtractUpdateOne) SetTags(m map[string]string) *FileExtractUpdateOne { feuo.mutation.SetTags(m) @@ -310,6 +313,12 @@ func (feuo *FileExtractUpdateOne) ClearFileExtractToEnvironment() *FileExtractUp return feuo } +// Where appends a list predicates to the FileExtractUpdate builder. +func (feuo *FileExtractUpdateOne) Where(ps ...predicate.FileExtract) *FileExtractUpdateOne { + feuo.mutation.Where(ps...) + return feuo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (feuo *FileExtractUpdateOne) Select(field string, fields ...string) *FileExtractUpdateOne { @@ -319,34 +328,7 @@ func (feuo *FileExtractUpdateOne) Select(field string, fields ...string) *FileEx // Save executes the query and returns the updated FileExtract entity. func (feuo *FileExtractUpdateOne) Save(ctx context.Context) (*FileExtract, error) { - var ( - err error - node *FileExtract - ) - if len(feuo.hooks) == 0 { - node, err = feuo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*FileExtractMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - feuo.mutation = mutation - node, err = feuo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(feuo.hooks) - 1; i >= 0; i-- { - if feuo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = feuo.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, feuo.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, feuo.sqlSave, feuo.mutation, feuo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -372,16 +354,7 @@ func (feuo *FileExtractUpdateOne) ExecX(ctx context.Context) { } func (feuo *FileExtractUpdateOne) sqlSave(ctx context.Context) (_node *FileExtract, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: fileextract.Table, - Columns: fileextract.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: fileextract.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(fileextract.Table, fileextract.Columns, sqlgraph.NewFieldSpec(fileextract.FieldID, field.TypeUUID)) id, ok := feuo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "FileExtract.id" for update`)} @@ -406,40 +379,20 @@ func (feuo *FileExtractUpdateOne) sqlSave(ctx context.Context) (_node *FileExtra } } } - if value, ok := feuo.mutation.HclID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: fileextract.FieldHclID, - }) + if value, ok := feuo.mutation.HCLID(); ok { + _spec.SetField(fileextract.FieldHCLID, field.TypeString, value) } if value, ok := feuo.mutation.Source(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: fileextract.FieldSource, - }) + _spec.SetField(fileextract.FieldSource, field.TypeString, value) } if value, ok := feuo.mutation.Destination(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: fileextract.FieldDestination, - }) + _spec.SetField(fileextract.FieldDestination, field.TypeString, value) } if value, ok := feuo.mutation.GetType(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: fileextract.FieldType, - }) + _spec.SetField(fileextract.FieldType, field.TypeString, value) } if value, ok := feuo.mutation.Tags(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: fileextract.FieldTags, - }) + _spec.SetField(fileextract.FieldTags, field.TypeJSON, value) } if feuo.mutation.FileExtractToEnvironmentCleared() { edge := &sqlgraph.EdgeSpec{ @@ -449,10 +402,7 @@ func (feuo *FileExtractUpdateOne) sqlSave(ctx context.Context) (_node *FileExtra Columns: []string{fileextract.FileExtractToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -465,10 +415,7 @@ func (feuo *FileExtractUpdateOne) sqlSave(ctx context.Context) (_node *FileExtra Columns: []string{fileextract.FileExtractToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -483,9 +430,10 @@ func (feuo *FileExtractUpdateOne) sqlSave(ctx context.Context) (_node *FileExtra if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{fileextract.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } + feuo.mutation.done = true return _node, nil } diff --git a/ent/finding.go b/ent/finding.go index e4c0a23f..b443d0a5 100755 --- a/ent/finding.go +++ b/ent/finding.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -7,6 +7,7 @@ import ( "fmt" "strings" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/gen0cide/laforge/ent/environment" "github.com/gen0cide/laforge/ent/finding" @@ -34,6 +35,7 @@ type Finding struct { // The values are being populated by the FindingQuery when eager-loading is set. Edges FindingEdges `json:"edges"` + // vvvvvvvvvvvv CUSTOM vvvvvvvvvvvv // Edges put into the main struct to be loaded via hcl // FindingToUser holds the value of the FindingToUser edge. HCLFindingToUser []*User `json:"FindingToUser,omitempty" hcl:"maintainer,block"` @@ -43,10 +45,11 @@ type Finding struct { HCLFindingToScript *Script `json:"FindingToScript,omitempty"` // FindingToEnvironment holds the value of the FindingToEnvironment edge. HCLFindingToEnvironment *Environment `json:"FindingToEnvironment,omitempty"` - // + // ^^^^^^^^^^^^ CUSTOM ^^^^^^^^^^^^^ environment_environment_to_finding *uuid.UUID finding_finding_to_host *uuid.UUID script_script_to_finding *uuid.UUID + selectValues sql.SelectValues } // FindingEdges holds the relations/edges for other nodes in the graph. @@ -62,6 +65,10 @@ type FindingEdges struct { // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. loadedTypes [4]bool + // totalCount holds the count of the edges above. + totalCount [4]map[string]int + + namedFindingToUser map[string][]*User } // FindingToUserOrErr returns the FindingToUser value or an error if the edge @@ -78,8 +85,7 @@ func (e FindingEdges) FindingToUserOrErr() ([]*User, error) { func (e FindingEdges) FindingToHostOrErr() (*Host, error) { if e.loadedTypes[1] { if e.FindingToHost == nil { - // The edge FindingToHost was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: host.Label} } return e.FindingToHost, nil @@ -92,8 +98,7 @@ func (e FindingEdges) FindingToHostOrErr() (*Host, error) { func (e FindingEdges) FindingToScriptOrErr() (*Script, error) { if e.loadedTypes[2] { if e.FindingToScript == nil { - // The edge FindingToScript was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: script.Label} } return e.FindingToScript, nil @@ -106,8 +111,7 @@ func (e FindingEdges) FindingToScriptOrErr() (*Script, error) { func (e FindingEdges) FindingToEnvironmentOrErr() (*Environment, error) { if e.loadedTypes[3] { if e.FindingToEnvironment == nil { - // The edge FindingToEnvironment was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: environment.Label} } return e.FindingToEnvironment, nil @@ -116,8 +120,8 @@ func (e FindingEdges) FindingToEnvironmentOrErr() (*Environment, error) { } // scanValues returns the types for scanning values from sql.Rows. -func (*Finding) scanValues(columns []string) ([]interface{}, error) { - values := make([]interface{}, len(columns)) +func (*Finding) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) for i := range columns { switch columns[i] { case finding.FieldTags: @@ -133,7 +137,7 @@ func (*Finding) scanValues(columns []string) ([]interface{}, error) { case finding.ForeignKeys[2]: // script_script_to_finding values[i] = &sql.NullScanner{S: new(uuid.UUID)} default: - return nil, fmt.Errorf("unexpected column %q for type Finding", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -141,7 +145,7 @@ func (*Finding) scanValues(columns []string) ([]interface{}, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the Finding fields. -func (f *Finding) assignValues(columns []string, values []interface{}) error { +func (f *Finding) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -206,46 +210,54 @@ func (f *Finding) assignValues(columns []string, values []interface{}) error { f.script_script_to_finding = new(uuid.UUID) *f.script_script_to_finding = *value.S.(*uuid.UUID) } + default: + f.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the Finding. +// This includes values selected through modifiers, order, etc. +func (f *Finding) Value(name string) (ent.Value, error) { + return f.selectValues.Get(name) +} + // QueryFindingToUser queries the "FindingToUser" edge of the Finding entity. func (f *Finding) QueryFindingToUser() *UserQuery { - return (&FindingClient{config: f.config}).QueryFindingToUser(f) + return NewFindingClient(f.config).QueryFindingToUser(f) } // QueryFindingToHost queries the "FindingToHost" edge of the Finding entity. func (f *Finding) QueryFindingToHost() *HostQuery { - return (&FindingClient{config: f.config}).QueryFindingToHost(f) + return NewFindingClient(f.config).QueryFindingToHost(f) } // QueryFindingToScript queries the "FindingToScript" edge of the Finding entity. func (f *Finding) QueryFindingToScript() *ScriptQuery { - return (&FindingClient{config: f.config}).QueryFindingToScript(f) + return NewFindingClient(f.config).QueryFindingToScript(f) } // QueryFindingToEnvironment queries the "FindingToEnvironment" edge of the Finding entity. func (f *Finding) QueryFindingToEnvironment() *EnvironmentQuery { - return (&FindingClient{config: f.config}).QueryFindingToEnvironment(f) + return NewFindingClient(f.config).QueryFindingToEnvironment(f) } // Update returns a builder for updating this Finding. // Note that you need to call Finding.Unwrap() before calling this method if this Finding // was returned from a transaction, and the transaction was committed or rolled back. func (f *Finding) Update() *FindingUpdateOne { - return (&FindingClient{config: f.config}).UpdateOne(f) + return NewFindingClient(f.config).UpdateOne(f) } // Unwrap unwraps the Finding entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. func (f *Finding) Unwrap() *Finding { - tx, ok := f.config.driver.(*txDriver) + _tx, ok := f.config.driver.(*txDriver) if !ok { panic("ent: Finding is not a transactional entity") } - f.config.driver = tx.drv + f.config.driver = _tx.drv return f } @@ -253,26 +265,48 @@ func (f *Finding) Unwrap() *Finding { func (f *Finding) String() string { var builder strings.Builder builder.WriteString("Finding(") - builder.WriteString(fmt.Sprintf("id=%v", f.ID)) - builder.WriteString(", name=") + builder.WriteString(fmt.Sprintf("id=%v, ", f.ID)) + builder.WriteString("name=") builder.WriteString(f.Name) - builder.WriteString(", description=") + builder.WriteString(", ") + builder.WriteString("description=") builder.WriteString(f.Description) - builder.WriteString(", severity=") + builder.WriteString(", ") + builder.WriteString("severity=") builder.WriteString(fmt.Sprintf("%v", f.Severity)) - builder.WriteString(", difficulty=") + builder.WriteString(", ") + builder.WriteString("difficulty=") builder.WriteString(fmt.Sprintf("%v", f.Difficulty)) - builder.WriteString(", tags=") + builder.WriteString(", ") + builder.WriteString("tags=") builder.WriteString(fmt.Sprintf("%v", f.Tags)) builder.WriteByte(')') return builder.String() } -// Findings is a parsable slice of Finding. -type Findings []*Finding +// NamedFindingToUser returns the FindingToUser named value or an error if the edge was not +// loaded in eager-loading with this name. +func (f *Finding) NamedFindingToUser(name string) ([]*User, error) { + if f.Edges.namedFindingToUser == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := f.Edges.namedFindingToUser[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} -func (f Findings) config(cfg config) { - for _i := range f { - f[_i].config = cfg +func (f *Finding) appendNamedFindingToUser(name string, edges ...*User) { + if f.Edges.namedFindingToUser == nil { + f.Edges.namedFindingToUser = make(map[string][]*User) + } + if len(edges) == 0 { + f.Edges.namedFindingToUser[name] = []*User{} + } else { + f.Edges.namedFindingToUser[name] = append(f.Edges.namedFindingToUser[name], edges...) } } + +// Findings is a parsable slice of Finding. +type Findings []*Finding diff --git a/ent/finding/finding.go b/ent/finding/finding.go index 20dd5799..0dac7fbc 100755 --- a/ent/finding/finding.go +++ b/ent/finding/finding.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package finding @@ -7,6 +7,8 @@ import ( "io" "strconv" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -156,37 +158,128 @@ func DifficultyValidator(d Difficulty) error { } } +// OrderOption defines the ordering options for the Finding queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByDescription orders the results by the description field. +func ByDescription(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDescription, opts...).ToFunc() +} + +// BySeverity orders the results by the severity field. +func BySeverity(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSeverity, opts...).ToFunc() +} + +// ByDifficulty orders the results by the difficulty field. +func ByDifficulty(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDifficulty, opts...).ToFunc() +} + +// ByFindingToUserCount orders the results by FindingToUser count. +func ByFindingToUserCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newFindingToUserStep(), opts...) + } +} + +// ByFindingToUser orders the results by FindingToUser terms. +func ByFindingToUser(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newFindingToUserStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByFindingToHostField orders the results by FindingToHost field. +func ByFindingToHostField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newFindingToHostStep(), sql.OrderByField(field, opts...)) + } +} + +// ByFindingToScriptField orders the results by FindingToScript field. +func ByFindingToScriptField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newFindingToScriptStep(), sql.OrderByField(field, opts...)) + } +} + +// ByFindingToEnvironmentField orders the results by FindingToEnvironment field. +func ByFindingToEnvironmentField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newFindingToEnvironmentStep(), sql.OrderByField(field, opts...)) + } +} +func newFindingToUserStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(FindingToUserInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, FindingToUserTable, FindingToUserColumn), + ) +} +func newFindingToHostStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(FindingToHostInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, FindingToHostTable, FindingToHostColumn), + ) +} +func newFindingToScriptStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(FindingToScriptInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, FindingToScriptTable, FindingToScriptColumn), + ) +} +func newFindingToEnvironmentStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(FindingToEnvironmentInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, FindingToEnvironmentTable, FindingToEnvironmentColumn), + ) +} + // MarshalGQL implements graphql.Marshaler interface. -func (s Severity) MarshalGQL(w io.Writer) { - io.WriteString(w, strconv.Quote(s.String())) +func (e Severity) MarshalGQL(w io.Writer) { + io.WriteString(w, strconv.Quote(e.String())) } // UnmarshalGQL implements graphql.Unmarshaler interface. -func (s *Severity) UnmarshalGQL(val interface{}) error { +func (e *Severity) UnmarshalGQL(val interface{}) error { str, ok := val.(string) if !ok { return fmt.Errorf("enum %T must be a string", val) } - *s = Severity(str) - if err := SeverityValidator(*s); err != nil { + *e = Severity(str) + if err := SeverityValidator(*e); err != nil { return fmt.Errorf("%s is not a valid Severity", str) } return nil } // MarshalGQL implements graphql.Marshaler interface. -func (d Difficulty) MarshalGQL(w io.Writer) { - io.WriteString(w, strconv.Quote(d.String())) +func (e Difficulty) MarshalGQL(w io.Writer) { + io.WriteString(w, strconv.Quote(e.String())) } // UnmarshalGQL implements graphql.Unmarshaler interface. -func (d *Difficulty) UnmarshalGQL(val interface{}) error { +func (e *Difficulty) UnmarshalGQL(val interface{}) error { str, ok := val.(string) if !ok { return fmt.Errorf("enum %T must be a string", val) } - *d = Difficulty(str) - if err := DifficultyValidator(*d); err != nil { + *e = Difficulty(str) + if err := DifficultyValidator(*e); err != nil { return fmt.Errorf("%s is not a valid Difficulty", str) } return nil diff --git a/ent/finding/where.go b/ent/finding/where.go index 103fa818..5de2211f 100755 --- a/ent/finding/where.go +++ b/ent/finding/where.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package finding @@ -11,417 +11,227 @@ import ( // ID filters vertices based on their ID field. func ID(id uuid.UUID) predicate.Finding { - return predicate.Finding(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Finding(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id uuid.UUID) predicate.Finding { - return predicate.Finding(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Finding(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id uuid.UUID) predicate.Finding { - return predicate.Finding(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.Finding(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...uuid.UUID) predicate.Finding { - return predicate.Finding(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.Finding(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...uuid.UUID) predicate.Finding { - return predicate.Finding(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.Finding(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id uuid.UUID) predicate.Finding { - return predicate.Finding(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.Finding(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id uuid.UUID) predicate.Finding { - return predicate.Finding(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.Finding(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id uuid.UUID) predicate.Finding { - return predicate.Finding(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.Finding(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id uuid.UUID) predicate.Finding { - return predicate.Finding(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.Finding(sql.FieldLTE(FieldID, id)) } // Name applies equality check predicate on the "name" field. It's identical to NameEQ. func Name(v string) predicate.Finding { - return predicate.Finding(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldName), v)) - }) + return predicate.Finding(sql.FieldEQ(FieldName, v)) } // Description applies equality check predicate on the "description" field. It's identical to DescriptionEQ. func Description(v string) predicate.Finding { - return predicate.Finding(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldDescription), v)) - }) + return predicate.Finding(sql.FieldEQ(FieldDescription, v)) } // NameEQ applies the EQ predicate on the "name" field. func NameEQ(v string) predicate.Finding { - return predicate.Finding(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldName), v)) - }) + return predicate.Finding(sql.FieldEQ(FieldName, v)) } // NameNEQ applies the NEQ predicate on the "name" field. func NameNEQ(v string) predicate.Finding { - return predicate.Finding(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldName), v)) - }) + return predicate.Finding(sql.FieldNEQ(FieldName, v)) } // NameIn applies the In predicate on the "name" field. func NameIn(vs ...string) predicate.Finding { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Finding(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldName), v...)) - }) + return predicate.Finding(sql.FieldIn(FieldName, vs...)) } // NameNotIn applies the NotIn predicate on the "name" field. func NameNotIn(vs ...string) predicate.Finding { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Finding(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldName), v...)) - }) + return predicate.Finding(sql.FieldNotIn(FieldName, vs...)) } // NameGT applies the GT predicate on the "name" field. func NameGT(v string) predicate.Finding { - return predicate.Finding(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldName), v)) - }) + return predicate.Finding(sql.FieldGT(FieldName, v)) } // NameGTE applies the GTE predicate on the "name" field. func NameGTE(v string) predicate.Finding { - return predicate.Finding(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldName), v)) - }) + return predicate.Finding(sql.FieldGTE(FieldName, v)) } // NameLT applies the LT predicate on the "name" field. func NameLT(v string) predicate.Finding { - return predicate.Finding(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldName), v)) - }) + return predicate.Finding(sql.FieldLT(FieldName, v)) } // NameLTE applies the LTE predicate on the "name" field. func NameLTE(v string) predicate.Finding { - return predicate.Finding(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldName), v)) - }) + return predicate.Finding(sql.FieldLTE(FieldName, v)) } // NameContains applies the Contains predicate on the "name" field. func NameContains(v string) predicate.Finding { - return predicate.Finding(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldName), v)) - }) + return predicate.Finding(sql.FieldContains(FieldName, v)) } // NameHasPrefix applies the HasPrefix predicate on the "name" field. func NameHasPrefix(v string) predicate.Finding { - return predicate.Finding(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldName), v)) - }) + return predicate.Finding(sql.FieldHasPrefix(FieldName, v)) } // NameHasSuffix applies the HasSuffix predicate on the "name" field. func NameHasSuffix(v string) predicate.Finding { - return predicate.Finding(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldName), v)) - }) + return predicate.Finding(sql.FieldHasSuffix(FieldName, v)) } // NameEqualFold applies the EqualFold predicate on the "name" field. func NameEqualFold(v string) predicate.Finding { - return predicate.Finding(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldName), v)) - }) + return predicate.Finding(sql.FieldEqualFold(FieldName, v)) } // NameContainsFold applies the ContainsFold predicate on the "name" field. func NameContainsFold(v string) predicate.Finding { - return predicate.Finding(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldName), v)) - }) + return predicate.Finding(sql.FieldContainsFold(FieldName, v)) } // DescriptionEQ applies the EQ predicate on the "description" field. func DescriptionEQ(v string) predicate.Finding { - return predicate.Finding(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldDescription), v)) - }) + return predicate.Finding(sql.FieldEQ(FieldDescription, v)) } // DescriptionNEQ applies the NEQ predicate on the "description" field. func DescriptionNEQ(v string) predicate.Finding { - return predicate.Finding(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldDescription), v)) - }) + return predicate.Finding(sql.FieldNEQ(FieldDescription, v)) } // DescriptionIn applies the In predicate on the "description" field. func DescriptionIn(vs ...string) predicate.Finding { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Finding(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldDescription), v...)) - }) + return predicate.Finding(sql.FieldIn(FieldDescription, vs...)) } // DescriptionNotIn applies the NotIn predicate on the "description" field. func DescriptionNotIn(vs ...string) predicate.Finding { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Finding(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldDescription), v...)) - }) + return predicate.Finding(sql.FieldNotIn(FieldDescription, vs...)) } // DescriptionGT applies the GT predicate on the "description" field. func DescriptionGT(v string) predicate.Finding { - return predicate.Finding(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldDescription), v)) - }) + return predicate.Finding(sql.FieldGT(FieldDescription, v)) } // DescriptionGTE applies the GTE predicate on the "description" field. func DescriptionGTE(v string) predicate.Finding { - return predicate.Finding(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldDescription), v)) - }) + return predicate.Finding(sql.FieldGTE(FieldDescription, v)) } // DescriptionLT applies the LT predicate on the "description" field. func DescriptionLT(v string) predicate.Finding { - return predicate.Finding(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldDescription), v)) - }) + return predicate.Finding(sql.FieldLT(FieldDescription, v)) } // DescriptionLTE applies the LTE predicate on the "description" field. func DescriptionLTE(v string) predicate.Finding { - return predicate.Finding(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldDescription), v)) - }) + return predicate.Finding(sql.FieldLTE(FieldDescription, v)) } // DescriptionContains applies the Contains predicate on the "description" field. func DescriptionContains(v string) predicate.Finding { - return predicate.Finding(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldDescription), v)) - }) + return predicate.Finding(sql.FieldContains(FieldDescription, v)) } // DescriptionHasPrefix applies the HasPrefix predicate on the "description" field. func DescriptionHasPrefix(v string) predicate.Finding { - return predicate.Finding(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldDescription), v)) - }) + return predicate.Finding(sql.FieldHasPrefix(FieldDescription, v)) } // DescriptionHasSuffix applies the HasSuffix predicate on the "description" field. func DescriptionHasSuffix(v string) predicate.Finding { - return predicate.Finding(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldDescription), v)) - }) + return predicate.Finding(sql.FieldHasSuffix(FieldDescription, v)) } // DescriptionEqualFold applies the EqualFold predicate on the "description" field. func DescriptionEqualFold(v string) predicate.Finding { - return predicate.Finding(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldDescription), v)) - }) + return predicate.Finding(sql.FieldEqualFold(FieldDescription, v)) } // DescriptionContainsFold applies the ContainsFold predicate on the "description" field. func DescriptionContainsFold(v string) predicate.Finding { - return predicate.Finding(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldDescription), v)) - }) + return predicate.Finding(sql.FieldContainsFold(FieldDescription, v)) } // SeverityEQ applies the EQ predicate on the "severity" field. func SeverityEQ(v Severity) predicate.Finding { - return predicate.Finding(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSeverity), v)) - }) + return predicate.Finding(sql.FieldEQ(FieldSeverity, v)) } // SeverityNEQ applies the NEQ predicate on the "severity" field. func SeverityNEQ(v Severity) predicate.Finding { - return predicate.Finding(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldSeverity), v)) - }) + return predicate.Finding(sql.FieldNEQ(FieldSeverity, v)) } // SeverityIn applies the In predicate on the "severity" field. func SeverityIn(vs ...Severity) predicate.Finding { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Finding(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldSeverity), v...)) - }) + return predicate.Finding(sql.FieldIn(FieldSeverity, vs...)) } // SeverityNotIn applies the NotIn predicate on the "severity" field. func SeverityNotIn(vs ...Severity) predicate.Finding { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Finding(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldSeverity), v...)) - }) + return predicate.Finding(sql.FieldNotIn(FieldSeverity, vs...)) } // DifficultyEQ applies the EQ predicate on the "difficulty" field. func DifficultyEQ(v Difficulty) predicate.Finding { - return predicate.Finding(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldDifficulty), v)) - }) + return predicate.Finding(sql.FieldEQ(FieldDifficulty, v)) } // DifficultyNEQ applies the NEQ predicate on the "difficulty" field. func DifficultyNEQ(v Difficulty) predicate.Finding { - return predicate.Finding(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldDifficulty), v)) - }) + return predicate.Finding(sql.FieldNEQ(FieldDifficulty, v)) } // DifficultyIn applies the In predicate on the "difficulty" field. func DifficultyIn(vs ...Difficulty) predicate.Finding { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Finding(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldDifficulty), v...)) - }) + return predicate.Finding(sql.FieldIn(FieldDifficulty, vs...)) } // DifficultyNotIn applies the NotIn predicate on the "difficulty" field. func DifficultyNotIn(vs ...Difficulty) predicate.Finding { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Finding(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldDifficulty), v...)) - }) + return predicate.Finding(sql.FieldNotIn(FieldDifficulty, vs...)) } // HasFindingToUser applies the HasEdge predicate on the "FindingToUser" edge. @@ -429,7 +239,6 @@ func HasFindingToUser() predicate.Finding { return predicate.Finding(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(FindingToUserTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, false, FindingToUserTable, FindingToUserColumn), ) sqlgraph.HasNeighbors(s, step) @@ -439,11 +248,7 @@ func HasFindingToUser() predicate.Finding { // HasFindingToUserWith applies the HasEdge predicate on the "FindingToUser" edge with a given conditions (other predicates). func HasFindingToUserWith(preds ...predicate.User) predicate.Finding { return predicate.Finding(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(FindingToUserInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, FindingToUserTable, FindingToUserColumn), - ) + step := newFindingToUserStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -457,7 +262,6 @@ func HasFindingToHost() predicate.Finding { return predicate.Finding(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(FindingToHostTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, false, FindingToHostTable, FindingToHostColumn), ) sqlgraph.HasNeighbors(s, step) @@ -467,11 +271,7 @@ func HasFindingToHost() predicate.Finding { // HasFindingToHostWith applies the HasEdge predicate on the "FindingToHost" edge with a given conditions (other predicates). func HasFindingToHostWith(preds ...predicate.Host) predicate.Finding { return predicate.Finding(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(FindingToHostInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, FindingToHostTable, FindingToHostColumn), - ) + step := newFindingToHostStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -485,7 +285,6 @@ func HasFindingToScript() predicate.Finding { return predicate.Finding(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(FindingToScriptTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, true, FindingToScriptTable, FindingToScriptColumn), ) sqlgraph.HasNeighbors(s, step) @@ -495,11 +294,7 @@ func HasFindingToScript() predicate.Finding { // HasFindingToScriptWith applies the HasEdge predicate on the "FindingToScript" edge with a given conditions (other predicates). func HasFindingToScriptWith(preds ...predicate.Script) predicate.Finding { return predicate.Finding(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(FindingToScriptInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, FindingToScriptTable, FindingToScriptColumn), - ) + step := newFindingToScriptStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -513,7 +308,6 @@ func HasFindingToEnvironment() predicate.Finding { return predicate.Finding(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(FindingToEnvironmentTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, true, FindingToEnvironmentTable, FindingToEnvironmentColumn), ) sqlgraph.HasNeighbors(s, step) @@ -523,11 +317,7 @@ func HasFindingToEnvironment() predicate.Finding { // HasFindingToEnvironmentWith applies the HasEdge predicate on the "FindingToEnvironment" edge with a given conditions (other predicates). func HasFindingToEnvironmentWith(preds ...predicate.Environment) predicate.Finding { return predicate.Finding(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(FindingToEnvironmentInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, FindingToEnvironmentTable, FindingToEnvironmentColumn), - ) + step := newFindingToEnvironmentStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -538,32 +328,15 @@ func HasFindingToEnvironmentWith(preds ...predicate.Environment) predicate.Findi // And groups predicates with the AND operator between them. func And(predicates ...predicate.Finding) predicate.Finding { - return predicate.Finding(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Finding(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.Finding) predicate.Finding { - return predicate.Finding(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Finding(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.Finding) predicate.Finding { - return predicate.Finding(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.Finding(sql.NotPredicates(p)) } diff --git a/ent/finding_create.go b/ent/finding_create.go index dbf145c3..b725343f 100755 --- a/ent/finding_create.go +++ b/ent/finding_create.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -147,44 +147,8 @@ func (fc *FindingCreate) Mutation() *FindingMutation { // Save creates the Finding in the database. func (fc *FindingCreate) Save(ctx context.Context) (*Finding, error) { - var ( - err error - node *Finding - ) fc.defaults() - if len(fc.hooks) == 0 { - if err = fc.check(); err != nil { - return nil, err - } - node, err = fc.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*FindingMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = fc.check(); err != nil { - return nil, err - } - fc.mutation = mutation - if node, err = fc.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(fc.hooks) - 1; i >= 0; i-- { - if fc.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = fc.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, fc.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, fc.sqlSave, fc.mutation, fc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -248,10 +212,13 @@ func (fc *FindingCreate) check() error { } func (fc *FindingCreate) sqlSave(ctx context.Context) (*Finding, error) { + if err := fc.check(); err != nil { + return nil, err + } _node, _spec := fc.createSpec() if err := sqlgraph.CreateNode(ctx, fc.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } @@ -262,62 +229,38 @@ func (fc *FindingCreate) sqlSave(ctx context.Context) (*Finding, error) { return nil, err } } + fc.mutation.id = &_node.ID + fc.mutation.done = true return _node, nil } func (fc *FindingCreate) createSpec() (*Finding, *sqlgraph.CreateSpec) { var ( _node = &Finding{config: fc.config} - _spec = &sqlgraph.CreateSpec{ - Table: finding.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: finding.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(finding.Table, sqlgraph.NewFieldSpec(finding.FieldID, field.TypeUUID)) ) if id, ok := fc.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } if value, ok := fc.mutation.Name(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: finding.FieldName, - }) + _spec.SetField(finding.FieldName, field.TypeString, value) _node.Name = value } if value, ok := fc.mutation.Description(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: finding.FieldDescription, - }) + _spec.SetField(finding.FieldDescription, field.TypeString, value) _node.Description = value } if value, ok := fc.mutation.Severity(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: finding.FieldSeverity, - }) + _spec.SetField(finding.FieldSeverity, field.TypeEnum, value) _node.Severity = value } if value, ok := fc.mutation.Difficulty(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: finding.FieldDifficulty, - }) + _spec.SetField(finding.FieldDifficulty, field.TypeEnum, value) _node.Difficulty = value } if value, ok := fc.mutation.Tags(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: finding.FieldTags, - }) + _spec.SetField(finding.FieldTags, field.TypeJSON, value) _node.Tags = value } if nodes := fc.mutation.FindingToUserIDs(); len(nodes) > 0 { @@ -328,10 +271,7 @@ func (fc *FindingCreate) createSpec() (*Finding, *sqlgraph.CreateSpec) { Columns: []string{finding.FindingToUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -347,10 +287,7 @@ func (fc *FindingCreate) createSpec() (*Finding, *sqlgraph.CreateSpec) { Columns: []string{finding.FindingToHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: host.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(host.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -367,10 +304,7 @@ func (fc *FindingCreate) createSpec() (*Finding, *sqlgraph.CreateSpec) { Columns: []string{finding.FindingToScriptColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: script.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(script.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -387,10 +321,7 @@ func (fc *FindingCreate) createSpec() (*Finding, *sqlgraph.CreateSpec) { Columns: []string{finding.FindingToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -405,11 +336,15 @@ func (fc *FindingCreate) createSpec() (*Finding, *sqlgraph.CreateSpec) { // FindingCreateBulk is the builder for creating many Finding entities in bulk. type FindingCreateBulk struct { config + err error builders []*FindingCreate } // Save creates the Finding entities in the database. func (fcb *FindingCreateBulk) Save(ctx context.Context) ([]*Finding, error) { + if fcb.err != nil { + return nil, fcb.err + } specs := make([]*sqlgraph.CreateSpec, len(fcb.builders)) nodes := make([]*Finding, len(fcb.builders)) mutators := make([]Mutator, len(fcb.builders)) @@ -426,8 +361,8 @@ func (fcb *FindingCreateBulk) Save(ctx context.Context) ([]*Finding, error) { return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, fcb.builders[i+1].mutation) } else { @@ -435,7 +370,7 @@ func (fcb *FindingCreateBulk) Save(ctx context.Context) ([]*Finding, error) { // Invoke the actual operation on the latest mutation in the chain. if err = sqlgraph.BatchCreate(ctx, fcb.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } } } diff --git a/ent/finding_delete.go b/ent/finding_delete.go index 0c3e57ac..cf087e88 100755 --- a/ent/finding_delete.go +++ b/ent/finding_delete.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (fd *FindingDelete) Where(ps ...predicate.Finding) *FindingDelete { // Exec executes the deletion query and returns how many vertices were deleted. func (fd *FindingDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(fd.hooks) == 0 { - affected, err = fd.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*FindingMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - fd.mutation = mutation - affected, err = fd.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(fd.hooks) - 1; i >= 0; i-- { - if fd.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = fd.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, fd.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, fd.sqlExec, fd.mutation, fd.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (fd *FindingDelete) ExecX(ctx context.Context) int { } func (fd *FindingDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: finding.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: finding.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(finding.Table, sqlgraph.NewFieldSpec(finding.FieldID, field.TypeUUID)) if ps := fd.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -84,7 +48,12 @@ func (fd *FindingDelete) sqlExec(ctx context.Context) (int, error) { } } } - return sqlgraph.DeleteNodes(ctx, fd.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, fd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + fd.mutation.done = true + return affected, err } // FindingDeleteOne is the builder for deleting a single Finding entity. @@ -92,6 +61,12 @@ type FindingDeleteOne struct { fd *FindingDelete } +// Where appends a list predicates to the FindingDelete builder. +func (fdo *FindingDeleteOne) Where(ps ...predicate.Finding) *FindingDeleteOne { + fdo.fd.mutation.Where(ps...) + return fdo +} + // Exec executes the deletion query. func (fdo *FindingDeleteOne) Exec(ctx context.Context) error { n, err := fdo.fd.Exec(ctx) @@ -107,5 +82,7 @@ func (fdo *FindingDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (fdo *FindingDeleteOne) ExecX(ctx context.Context) { - fdo.fd.ExecX(ctx) + if err := fdo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/ent/finding_query.go b/ent/finding_query.go index e6bd1df1..8a983716 100755 --- a/ent/finding_query.go +++ b/ent/finding_query.go @@ -1,11 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" "database/sql/driver" - "errors" "fmt" "math" @@ -24,18 +23,18 @@ import ( // FindingQuery is the builder for querying Finding entities. type FindingQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string - predicates []predicate.Finding - // eager-loading edges. + ctx *QueryContext + order []finding.OrderOption + inters []Interceptor + predicates []predicate.Finding withFindingToUser *UserQuery withFindingToHost *HostQuery withFindingToScript *ScriptQuery withFindingToEnvironment *EnvironmentQuery withFKs bool + modifiers []func(*sql.Selector) + loadTotal []func(context.Context, []*Finding) error + withNamedFindingToUser map[string]*UserQuery // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -47,34 +46,34 @@ func (fq *FindingQuery) Where(ps ...predicate.Finding) *FindingQuery { return fq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (fq *FindingQuery) Limit(limit int) *FindingQuery { - fq.limit = &limit + fq.ctx.Limit = &limit return fq } -// Offset adds an offset step to the query. +// Offset to start from. func (fq *FindingQuery) Offset(offset int) *FindingQuery { - fq.offset = &offset + fq.ctx.Offset = &offset return fq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (fq *FindingQuery) Unique(unique bool) *FindingQuery { - fq.unique = &unique + fq.ctx.Unique = &unique return fq } -// Order adds an order step to the query. -func (fq *FindingQuery) Order(o ...OrderFunc) *FindingQuery { +// Order specifies how the records should be ordered. +func (fq *FindingQuery) Order(o ...finding.OrderOption) *FindingQuery { fq.order = append(fq.order, o...) return fq } // QueryFindingToUser chains the current query on the "FindingToUser" edge. func (fq *FindingQuery) QueryFindingToUser() *UserQuery { - query := &UserQuery{config: fq.config} + query := (&UserClient{config: fq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := fq.prepareQuery(ctx); err != nil { return nil, err @@ -96,7 +95,7 @@ func (fq *FindingQuery) QueryFindingToUser() *UserQuery { // QueryFindingToHost chains the current query on the "FindingToHost" edge. func (fq *FindingQuery) QueryFindingToHost() *HostQuery { - query := &HostQuery{config: fq.config} + query := (&HostClient{config: fq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := fq.prepareQuery(ctx); err != nil { return nil, err @@ -118,7 +117,7 @@ func (fq *FindingQuery) QueryFindingToHost() *HostQuery { // QueryFindingToScript chains the current query on the "FindingToScript" edge. func (fq *FindingQuery) QueryFindingToScript() *ScriptQuery { - query := &ScriptQuery{config: fq.config} + query := (&ScriptClient{config: fq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := fq.prepareQuery(ctx); err != nil { return nil, err @@ -140,7 +139,7 @@ func (fq *FindingQuery) QueryFindingToScript() *ScriptQuery { // QueryFindingToEnvironment chains the current query on the "FindingToEnvironment" edge. func (fq *FindingQuery) QueryFindingToEnvironment() *EnvironmentQuery { - query := &EnvironmentQuery{config: fq.config} + query := (&EnvironmentClient{config: fq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := fq.prepareQuery(ctx); err != nil { return nil, err @@ -163,7 +162,7 @@ func (fq *FindingQuery) QueryFindingToEnvironment() *EnvironmentQuery { // First returns the first Finding entity from the query. // Returns a *NotFoundError when no Finding was found. func (fq *FindingQuery) First(ctx context.Context) (*Finding, error) { - nodes, err := fq.Limit(1).All(ctx) + nodes, err := fq.Limit(1).All(setContextOp(ctx, fq.ctx, "First")) if err != nil { return nil, err } @@ -186,7 +185,7 @@ func (fq *FindingQuery) FirstX(ctx context.Context) *Finding { // Returns a *NotFoundError when no Finding ID was found. func (fq *FindingQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = fq.Limit(1).IDs(ctx); err != nil { + if ids, err = fq.Limit(1).IDs(setContextOp(ctx, fq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -209,7 +208,7 @@ func (fq *FindingQuery) FirstIDX(ctx context.Context) uuid.UUID { // Returns a *NotSingularError when more than one Finding entity is found. // Returns a *NotFoundError when no Finding entities are found. func (fq *FindingQuery) Only(ctx context.Context) (*Finding, error) { - nodes, err := fq.Limit(2).All(ctx) + nodes, err := fq.Limit(2).All(setContextOp(ctx, fq.ctx, "Only")) if err != nil { return nil, err } @@ -237,7 +236,7 @@ func (fq *FindingQuery) OnlyX(ctx context.Context) *Finding { // Returns a *NotFoundError when no entities are found. func (fq *FindingQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = fq.Limit(2).IDs(ctx); err != nil { + if ids, err = fq.Limit(2).IDs(setContextOp(ctx, fq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -262,10 +261,12 @@ func (fq *FindingQuery) OnlyIDX(ctx context.Context) uuid.UUID { // All executes the query and returns a list of Findings. func (fq *FindingQuery) All(ctx context.Context) ([]*Finding, error) { + ctx = setContextOp(ctx, fq.ctx, "All") if err := fq.prepareQuery(ctx); err != nil { return nil, err } - return fq.sqlAll(ctx) + qr := querierAll[[]*Finding, *FindingQuery]() + return withInterceptors[[]*Finding](ctx, fq, qr, fq.inters) } // AllX is like All, but panics if an error occurs. @@ -278,9 +279,12 @@ func (fq *FindingQuery) AllX(ctx context.Context) []*Finding { } // IDs executes the query and returns a list of Finding IDs. -func (fq *FindingQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { - var ids []uuid.UUID - if err := fq.Select(finding.FieldID).Scan(ctx, &ids); err != nil { +func (fq *FindingQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if fq.ctx.Unique == nil && fq.path != nil { + fq.Unique(true) + } + ctx = setContextOp(ctx, fq.ctx, "IDs") + if err = fq.Select(finding.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -297,10 +301,11 @@ func (fq *FindingQuery) IDsX(ctx context.Context) []uuid.UUID { // Count returns the count of the given query. func (fq *FindingQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, fq.ctx, "Count") if err := fq.prepareQuery(ctx); err != nil { return 0, err } - return fq.sqlCount(ctx) + return withInterceptors[int](ctx, fq, querierCount[*FindingQuery](), fq.inters) } // CountX is like Count, but panics if an error occurs. @@ -314,10 +319,15 @@ func (fq *FindingQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (fq *FindingQuery) Exist(ctx context.Context) (bool, error) { - if err := fq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, fq.ctx, "Exist") + switch _, err := fq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return fq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -337,25 +347,24 @@ func (fq *FindingQuery) Clone() *FindingQuery { } return &FindingQuery{ config: fq.config, - limit: fq.limit, - offset: fq.offset, - order: append([]OrderFunc{}, fq.order...), + ctx: fq.ctx.Clone(), + order: append([]finding.OrderOption{}, fq.order...), + inters: append([]Interceptor{}, fq.inters...), predicates: append([]predicate.Finding{}, fq.predicates...), withFindingToUser: fq.withFindingToUser.Clone(), withFindingToHost: fq.withFindingToHost.Clone(), withFindingToScript: fq.withFindingToScript.Clone(), withFindingToEnvironment: fq.withFindingToEnvironment.Clone(), // clone intermediate query. - sql: fq.sql.Clone(), - path: fq.path, - unique: fq.unique, + sql: fq.sql.Clone(), + path: fq.path, } } // WithFindingToUser tells the query-builder to eager-load the nodes that are connected to // the "FindingToUser" edge. The optional arguments are used to configure the query builder of the edge. func (fq *FindingQuery) WithFindingToUser(opts ...func(*UserQuery)) *FindingQuery { - query := &UserQuery{config: fq.config} + query := (&UserClient{config: fq.config}).Query() for _, opt := range opts { opt(query) } @@ -366,7 +375,7 @@ func (fq *FindingQuery) WithFindingToUser(opts ...func(*UserQuery)) *FindingQuer // WithFindingToHost tells the query-builder to eager-load the nodes that are connected to // the "FindingToHost" edge. The optional arguments are used to configure the query builder of the edge. func (fq *FindingQuery) WithFindingToHost(opts ...func(*HostQuery)) *FindingQuery { - query := &HostQuery{config: fq.config} + query := (&HostClient{config: fq.config}).Query() for _, opt := range opts { opt(query) } @@ -377,7 +386,7 @@ func (fq *FindingQuery) WithFindingToHost(opts ...func(*HostQuery)) *FindingQuer // WithFindingToScript tells the query-builder to eager-load the nodes that are connected to // the "FindingToScript" edge. The optional arguments are used to configure the query builder of the edge. func (fq *FindingQuery) WithFindingToScript(opts ...func(*ScriptQuery)) *FindingQuery { - query := &ScriptQuery{config: fq.config} + query := (&ScriptClient{config: fq.config}).Query() for _, opt := range opts { opt(query) } @@ -388,7 +397,7 @@ func (fq *FindingQuery) WithFindingToScript(opts ...func(*ScriptQuery)) *Finding // WithFindingToEnvironment tells the query-builder to eager-load the nodes that are connected to // the "FindingToEnvironment" edge. The optional arguments are used to configure the query builder of the edge. func (fq *FindingQuery) WithFindingToEnvironment(opts ...func(*EnvironmentQuery)) *FindingQuery { - query := &EnvironmentQuery{config: fq.config} + query := (&EnvironmentClient{config: fq.config}).Query() for _, opt := range opts { opt(query) } @@ -410,17 +419,13 @@ func (fq *FindingQuery) WithFindingToEnvironment(opts ...func(*EnvironmentQuery) // GroupBy(finding.FieldName). // Aggregate(ent.Count()). // Scan(ctx, &v) -// func (fq *FindingQuery) GroupBy(field string, fields ...string) *FindingGroupBy { - group := &FindingGroupBy{config: fq.config} - group.fields = append([]string{field}, fields...) - group.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := fq.prepareQuery(ctx); err != nil { - return nil, err - } - return fq.sqlQuery(ctx), nil - } - return group + fq.ctx.Fields = append([]string{field}, fields...) + grbuild := &FindingGroupBy{build: fq} + grbuild.flds = &fq.ctx.Fields + grbuild.label = finding.Label + grbuild.scan = grbuild.Scan + return grbuild } // Select allows the selection one or more fields/columns for the given query, @@ -435,14 +440,31 @@ func (fq *FindingQuery) GroupBy(field string, fields ...string) *FindingGroupBy // client.Finding.Query(). // Select(finding.FieldName). // Scan(ctx, &v) -// func (fq *FindingQuery) Select(fields ...string) *FindingSelect { - fq.fields = append(fq.fields, fields...) - return &FindingSelect{FindingQuery: fq} + fq.ctx.Fields = append(fq.ctx.Fields, fields...) + sbuild := &FindingSelect{FindingQuery: fq} + sbuild.label = finding.Label + sbuild.flds, sbuild.scan = &fq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a FindingSelect configured with the given aggregations. +func (fq *FindingQuery) Aggregate(fns ...AggregateFunc) *FindingSelect { + return fq.Select().Aggregate(fns...) } func (fq *FindingQuery) prepareQuery(ctx context.Context) error { - for _, f := range fq.fields { + for _, inter := range fq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, fq); err != nil { + return err + } + } + } + for _, f := range fq.ctx.Fields { if !finding.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -457,7 +479,7 @@ func (fq *FindingQuery) prepareQuery(ctx context.Context) error { return nil } -func (fq *FindingQuery) sqlAll(ctx context.Context) ([]*Finding, error) { +func (fq *FindingQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Finding, error) { var ( nodes = []*Finding{} withFKs = fq.withFKs @@ -475,179 +497,216 @@ func (fq *FindingQuery) sqlAll(ctx context.Context) ([]*Finding, error) { if withFKs { _spec.Node.Columns = append(_spec.Node.Columns, finding.ForeignKeys...) } - _spec.ScanValues = func(columns []string) ([]interface{}, error) { + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Finding).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { node := &Finding{config: fq.config} nodes = append(nodes, node) - return node.scanValues(columns) - } - _spec.Assign = func(columns []string, values []interface{}) error { - if len(nodes) == 0 { - return fmt.Errorf("ent: Assign called without calling ScanValues") - } - node := nodes[len(nodes)-1] node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(fq.modifiers) > 0 { + _spec.Modifiers = fq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } if err := sqlgraph.QueryNodes(ctx, fq.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := fq.withFindingToUser; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*Finding) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.FindingToUser = []*User{} + if err := fq.loadFindingToUser(ctx, query, nodes, + func(n *Finding) { n.Edges.FindingToUser = []*User{} }, + func(n *Finding, e *User) { n.Edges.FindingToUser = append(n.Edges.FindingToUser, e) }); err != nil { + return nil, err } - query.withFKs = true - query.Where(predicate.User(func(s *sql.Selector) { - s.Where(sql.InValues(finding.FindingToUserColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { + } + if query := fq.withFindingToHost; query != nil { + if err := fq.loadFindingToHost(ctx, query, nodes, nil, + func(n *Finding, e *Host) { n.Edges.FindingToHost = e }); err != nil { return nil, err } - for _, n := range neighbors { - fk := n.finding_finding_to_user - if fk == nil { - return nil, fmt.Errorf(`foreign-key "finding_finding_to_user" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "finding_finding_to_user" returned %v for node %v`, *fk, n.ID) - } - node.Edges.FindingToUser = append(node.Edges.FindingToUser, n) + } + if query := fq.withFindingToScript; query != nil { + if err := fq.loadFindingToScript(ctx, query, nodes, nil, + func(n *Finding, e *Script) { n.Edges.FindingToScript = e }); err != nil { + return nil, err } } - - if query := fq.withFindingToHost; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*Finding) - for i := range nodes { - if nodes[i].finding_finding_to_host == nil { - continue - } - fk := *nodes[i].finding_finding_to_host - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + if query := fq.withFindingToEnvironment; query != nil { + if err := fq.loadFindingToEnvironment(ctx, query, nodes, nil, + func(n *Finding, e *Environment) { n.Edges.FindingToEnvironment = e }); err != nil { + return nil, err } - query.Where(host.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + } + for name, query := range fq.withNamedFindingToUser { + if err := fq.loadFindingToUser(ctx, query, nodes, + func(n *Finding) { n.appendNamedFindingToUser(name) }, + func(n *Finding, e *User) { n.appendNamedFindingToUser(name, e) }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "finding_finding_to_host" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.FindingToHost = n - } + } + for i := range fq.loadTotal { + if err := fq.loadTotal[i](ctx, nodes); err != nil { + return nil, err } } + return nodes, nil +} - if query := fq.withFindingToScript; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*Finding) +func (fq *FindingQuery) loadFindingToUser(ctx context.Context, query *UserQuery, nodes []*Finding, init func(*Finding), assign func(*Finding, *User)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Finding) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.User(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(finding.FindingToUserColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.finding_finding_to_user + if fk == nil { + return fmt.Errorf(`foreign-key "finding_finding_to_user" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "finding_finding_to_user" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} +func (fq *FindingQuery) loadFindingToHost(ctx context.Context, query *HostQuery, nodes []*Finding, init func(*Finding), assign func(*Finding, *Host)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*Finding) + for i := range nodes { + if nodes[i].finding_finding_to_host == nil { + continue + } + fk := *nodes[i].finding_finding_to_host + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(host.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "finding_finding_to_host" returned %v`, n.ID) + } for i := range nodes { - if nodes[i].script_script_to_finding == nil { - continue - } - fk := *nodes[i].script_script_to_finding - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + assign(nodes[i], n) } - query.Where(script.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + } + return nil +} +func (fq *FindingQuery) loadFindingToScript(ctx context.Context, query *ScriptQuery, nodes []*Finding, init func(*Finding), assign func(*Finding, *Script)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*Finding) + for i := range nodes { + if nodes[i].script_script_to_finding == nil { + continue } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "script_script_to_finding" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.FindingToScript = n - } + fk := *nodes[i].script_script_to_finding + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) } + nodeids[fk] = append(nodeids[fk], nodes[i]) } - - if query := fq.withFindingToEnvironment; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*Finding) + if len(ids) == 0 { + return nil + } + query.Where(script.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "script_script_to_finding" returned %v`, n.ID) + } for i := range nodes { - if nodes[i].environment_environment_to_finding == nil { - continue - } - fk := *nodes[i].environment_environment_to_finding - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + assign(nodes[i], n) } - query.Where(environment.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + } + return nil +} +func (fq *FindingQuery) loadFindingToEnvironment(ctx context.Context, query *EnvironmentQuery, nodes []*Finding, init func(*Finding), assign func(*Finding, *Environment)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*Finding) + for i := range nodes { + if nodes[i].environment_environment_to_finding == nil { + continue } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "environment_environment_to_finding" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.FindingToEnvironment = n - } + fk := *nodes[i].environment_environment_to_finding + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) } + nodeids[fk] = append(nodeids[fk], nodes[i]) } - - return nodes, nil + if len(ids) == 0 { + return nil + } + query.Where(environment.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "environment_environment_to_finding" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil } func (fq *FindingQuery) sqlCount(ctx context.Context) (int, error) { _spec := fq.querySpec() - _spec.Node.Columns = fq.fields - if len(fq.fields) > 0 { - _spec.Unique = fq.unique != nil && *fq.unique + if len(fq.modifiers) > 0 { + _spec.Modifiers = fq.modifiers } - return sqlgraph.CountNodes(ctx, fq.driver, _spec) -} - -func (fq *FindingQuery) sqlExist(ctx context.Context) (bool, error) { - n, err := fq.sqlCount(ctx) - if err != nil { - return false, fmt.Errorf("ent: check existence: %w", err) + _spec.Node.Columns = fq.ctx.Fields + if len(fq.ctx.Fields) > 0 { + _spec.Unique = fq.ctx.Unique != nil && *fq.ctx.Unique } - return n > 0, nil + return sqlgraph.CountNodes(ctx, fq.driver, _spec) } func (fq *FindingQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: finding.Table, - Columns: finding.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: finding.FieldID, - }, - }, - From: fq.sql, - Unique: true, - } - if unique := fq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(finding.Table, finding.Columns, sqlgraph.NewFieldSpec(finding.FieldID, field.TypeUUID)) + _spec.From = fq.sql + if unique := fq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if fq.path != nil { + _spec.Unique = true } - if fields := fq.fields; len(fields) > 0 { + if fields := fq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, finding.FieldID) for i := range fields { @@ -663,10 +722,10 @@ func (fq *FindingQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := fq.limit; limit != nil { + if limit := fq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := fq.offset; offset != nil { + if offset := fq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := fq.order; len(ps) > 0 { @@ -682,7 +741,7 @@ func (fq *FindingQuery) querySpec() *sqlgraph.QuerySpec { func (fq *FindingQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(fq.driver.Dialect()) t1 := builder.Table(finding.Table) - columns := fq.fields + columns := fq.ctx.Fields if len(columns) == 0 { columns = finding.Columns } @@ -691,7 +750,7 @@ func (fq *FindingQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = fq.sql selector.Select(selector.Columns(columns...)...) } - if fq.unique != nil && *fq.unique { + if fq.ctx.Unique != nil && *fq.ctx.Unique { selector.Distinct() } for _, p := range fq.predicates { @@ -700,25 +759,35 @@ func (fq *FindingQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range fq.order { p(selector) } - if offset := fq.offset; offset != nil { + if offset := fq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := fq.limit; limit != nil { + if limit := fq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector } +// WithNamedFindingToUser tells the query-builder to eager-load the nodes that are connected to the "FindingToUser" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (fq *FindingQuery) WithNamedFindingToUser(name string, opts ...func(*UserQuery)) *FindingQuery { + query := (&UserClient{config: fq.config}).Query() + for _, opt := range opts { + opt(query) + } + if fq.withNamedFindingToUser == nil { + fq.withNamedFindingToUser = make(map[string]*UserQuery) + } + fq.withNamedFindingToUser[name] = query + return fq +} + // FindingGroupBy is the group-by builder for Finding entities. type FindingGroupBy struct { - config - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) + selector + build *FindingQuery } // Aggregate adds the given aggregation functions to the group-by query. @@ -727,471 +796,77 @@ func (fgb *FindingGroupBy) Aggregate(fns ...AggregateFunc) *FindingGroupBy { return fgb } -// Scan applies the group-by query and scans the result into the given value. -func (fgb *FindingGroupBy) Scan(ctx context.Context, v interface{}) error { - query, err := fgb.path(ctx) - if err != nil { +// Scan applies the selector query and scans the result into the given value. +func (fgb *FindingGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, fgb.build.ctx, "GroupBy") + if err := fgb.build.prepareQuery(ctx); err != nil { return err } - fgb.sql = query - return fgb.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (fgb *FindingGroupBy) ScanX(ctx context.Context, v interface{}) { - if err := fgb.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from group-by. -// It is only allowed when executing a group-by query with one field. -func (fgb *FindingGroupBy) Strings(ctx context.Context) ([]string, error) { - if len(fgb.fields) > 1 { - return nil, errors.New("ent: FindingGroupBy.Strings is not achievable when grouping more than 1 field") - } - var v []string - if err := fgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (fgb *FindingGroupBy) StringsX(ctx context.Context) []string { - v, err := fgb.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (fgb *FindingGroupBy) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = fgb.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{finding.Label} - default: - err = fmt.Errorf("ent: FindingGroupBy.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (fgb *FindingGroupBy) StringX(ctx context.Context) string { - v, err := fgb.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from group-by. -// It is only allowed when executing a group-by query with one field. -func (fgb *FindingGroupBy) Ints(ctx context.Context) ([]int, error) { - if len(fgb.fields) > 1 { - return nil, errors.New("ent: FindingGroupBy.Ints is not achievable when grouping more than 1 field") - } - var v []int - if err := fgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (fgb *FindingGroupBy) IntsX(ctx context.Context) []int { - v, err := fgb.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (fgb *FindingGroupBy) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = fgb.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{finding.Label} - default: - err = fmt.Errorf("ent: FindingGroupBy.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (fgb *FindingGroupBy) IntX(ctx context.Context) int { - v, err := fgb.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from group-by. -// It is only allowed when executing a group-by query with one field. -func (fgb *FindingGroupBy) Float64s(ctx context.Context) ([]float64, error) { - if len(fgb.fields) > 1 { - return nil, errors.New("ent: FindingGroupBy.Float64s is not achievable when grouping more than 1 field") - } - var v []float64 - if err := fgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (fgb *FindingGroupBy) Float64sX(ctx context.Context) []float64 { - v, err := fgb.Float64s(ctx) - if err != nil { - panic(err) - } - return v + return scanWithInterceptors[*FindingQuery, *FindingGroupBy](ctx, fgb.build, fgb, fgb.build.inters, v) } -// Float64 returns a single float64 from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (fgb *FindingGroupBy) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = fgb.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{finding.Label} - default: - err = fmt.Errorf("ent: FindingGroupBy.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (fgb *FindingGroupBy) Float64X(ctx context.Context) float64 { - v, err := fgb.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from group-by. -// It is only allowed when executing a group-by query with one field. -func (fgb *FindingGroupBy) Bools(ctx context.Context) ([]bool, error) { - if len(fgb.fields) > 1 { - return nil, errors.New("ent: FindingGroupBy.Bools is not achievable when grouping more than 1 field") - } - var v []bool - if err := fgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (fgb *FindingGroupBy) BoolsX(ctx context.Context) []bool { - v, err := fgb.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (fgb *FindingGroupBy) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = fgb.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{finding.Label} - default: - err = fmt.Errorf("ent: FindingGroupBy.Bools returned %d results when one was expected", len(v)) - } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (fgb *FindingGroupBy) BoolX(ctx context.Context) bool { - v, err := fgb.Bool(ctx) - if err != nil { - panic(err) +func (fgb *FindingGroupBy) sqlScan(ctx context.Context, root *FindingQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(fgb.fns)) + for _, fn := range fgb.fns { + aggregation = append(aggregation, fn(selector)) } - return v -} - -func (fgb *FindingGroupBy) sqlScan(ctx context.Context, v interface{}) error { - for _, f := range fgb.fields { - if !finding.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*fgb.flds)+len(fgb.fns)) + for _, f := range *fgb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := fgb.sqlQuery() + selector.GroupBy(selector.Columns(*fgb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := fgb.driver.Query(ctx, query, args, rows); err != nil { + if err := fgb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (fgb *FindingGroupBy) sqlQuery() *sql.Selector { - selector := fgb.sql.Select() - aggregation := make([]string, 0, len(fgb.fns)) - for _, fn := range fgb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(fgb.fields)+len(fgb.fns)) - for _, f := range fgb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(fgb.fields...)...) -} - // FindingSelect is the builder for selecting fields of Finding entities. type FindingSelect struct { *FindingQuery - // intermediate query (i.e. traversal path). - sql *sql.Selector + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (fs *FindingSelect) Aggregate(fns ...AggregateFunc) *FindingSelect { + fs.fns = append(fs.fns, fns...) + return fs } // Scan applies the selector query and scans the result into the given value. -func (fs *FindingSelect) Scan(ctx context.Context, v interface{}) error { +func (fs *FindingSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, fs.ctx, "Select") if err := fs.prepareQuery(ctx); err != nil { return err } - fs.sql = fs.FindingQuery.sqlQuery(ctx) - return fs.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (fs *FindingSelect) ScanX(ctx context.Context, v interface{}) { - if err := fs.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from a selector. It is only allowed when selecting one field. -func (fs *FindingSelect) Strings(ctx context.Context) ([]string, error) { - if len(fs.fields) > 1 { - return nil, errors.New("ent: FindingSelect.Strings is not achievable when selecting more than 1 field") - } - var v []string - if err := fs.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil + return scanWithInterceptors[*FindingQuery, *FindingSelect](ctx, fs.FindingQuery, fs, fs.inters, v) } -// StringsX is like Strings, but panics if an error occurs. -func (fs *FindingSelect) StringsX(ctx context.Context) []string { - v, err := fs.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a selector. It is only allowed when selecting one field. -func (fs *FindingSelect) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = fs.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{finding.Label} - default: - err = fmt.Errorf("ent: FindingSelect.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (fs *FindingSelect) StringX(ctx context.Context) string { - v, err := fs.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from a selector. It is only allowed when selecting one field. -func (fs *FindingSelect) Ints(ctx context.Context) ([]int, error) { - if len(fs.fields) > 1 { - return nil, errors.New("ent: FindingSelect.Ints is not achievable when selecting more than 1 field") - } - var v []int - if err := fs.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (fs *FindingSelect) IntsX(ctx context.Context) []int { - v, err := fs.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a selector. It is only allowed when selecting one field. -func (fs *FindingSelect) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = fs.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{finding.Label} - default: - err = fmt.Errorf("ent: FindingSelect.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (fs *FindingSelect) IntX(ctx context.Context) int { - v, err := fs.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. -func (fs *FindingSelect) Float64s(ctx context.Context) ([]float64, error) { - if len(fs.fields) > 1 { - return nil, errors.New("ent: FindingSelect.Float64s is not achievable when selecting more than 1 field") - } - var v []float64 - if err := fs.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (fs *FindingSelect) Float64sX(ctx context.Context) []float64 { - v, err := fs.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. -func (fs *FindingSelect) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = fs.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{finding.Label} - default: - err = fmt.Errorf("ent: FindingSelect.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (fs *FindingSelect) Float64X(ctx context.Context) float64 { - v, err := fs.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from a selector. It is only allowed when selecting one field. -func (fs *FindingSelect) Bools(ctx context.Context) ([]bool, error) { - if len(fs.fields) > 1 { - return nil, errors.New("ent: FindingSelect.Bools is not achievable when selecting more than 1 field") - } - var v []bool - if err := fs.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (fs *FindingSelect) BoolsX(ctx context.Context) []bool { - v, err := fs.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a selector. It is only allowed when selecting one field. -func (fs *FindingSelect) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = fs.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{finding.Label} - default: - err = fmt.Errorf("ent: FindingSelect.Bools returned %d results when one was expected", len(v)) +func (fs *FindingSelect) sqlScan(ctx context.Context, root *FindingQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(fs.fns)) + for _, fn := range fs.fns { + aggregation = append(aggregation, fn(selector)) } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (fs *FindingSelect) BoolX(ctx context.Context) bool { - v, err := fs.Bool(ctx) - if err != nil { - panic(err) + switch n := len(*fs.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) } - return v -} - -func (fs *FindingSelect) sqlScan(ctx context.Context, v interface{}) error { rows := &sql.Rows{} - query, args := fs.sql.Query() + query, args := selector.Query() if err := fs.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/ent/finding_update.go b/ent/finding_update.go index 59ffa516..232c8e1b 100755 --- a/ent/finding_update.go +++ b/ent/finding_update.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -38,24 +38,56 @@ func (fu *FindingUpdate) SetName(s string) *FindingUpdate { return fu } +// SetNillableName sets the "name" field if the given value is not nil. +func (fu *FindingUpdate) SetNillableName(s *string) *FindingUpdate { + if s != nil { + fu.SetName(*s) + } + return fu +} + // SetDescription sets the "description" field. func (fu *FindingUpdate) SetDescription(s string) *FindingUpdate { fu.mutation.SetDescription(s) return fu } +// SetNillableDescription sets the "description" field if the given value is not nil. +func (fu *FindingUpdate) SetNillableDescription(s *string) *FindingUpdate { + if s != nil { + fu.SetDescription(*s) + } + return fu +} + // SetSeverity sets the "severity" field. func (fu *FindingUpdate) SetSeverity(f finding.Severity) *FindingUpdate { fu.mutation.SetSeverity(f) return fu } +// SetNillableSeverity sets the "severity" field if the given value is not nil. +func (fu *FindingUpdate) SetNillableSeverity(f *finding.Severity) *FindingUpdate { + if f != nil { + fu.SetSeverity(*f) + } + return fu +} + // SetDifficulty sets the "difficulty" field. func (fu *FindingUpdate) SetDifficulty(f finding.Difficulty) *FindingUpdate { fu.mutation.SetDifficulty(f) return fu } +// SetNillableDifficulty sets the "difficulty" field if the given value is not nil. +func (fu *FindingUpdate) SetNillableDifficulty(f *finding.Difficulty) *FindingUpdate { + if f != nil { + fu.SetDifficulty(*f) + } + return fu +} + // SetTags sets the "tags" field. func (fu *FindingUpdate) SetTags(m map[string]string) *FindingUpdate { fu.mutation.SetTags(m) @@ -180,40 +212,7 @@ func (fu *FindingUpdate) ClearFindingToEnvironment() *FindingUpdate { // Save executes the query and returns the number of nodes affected by the update operation. func (fu *FindingUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(fu.hooks) == 0 { - if err = fu.check(); err != nil { - return 0, err - } - affected, err = fu.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*FindingMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = fu.check(); err != nil { - return 0, err - } - fu.mutation = mutation - affected, err = fu.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(fu.hooks) - 1; i >= 0; i-- { - if fu.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = fu.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, fu.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, fu.sqlSave, fu.mutation, fu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -254,16 +253,10 @@ func (fu *FindingUpdate) check() error { } func (fu *FindingUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: finding.Table, - Columns: finding.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: finding.FieldID, - }, - }, + if err := fu.check(); err != nil { + return n, err } + _spec := sqlgraph.NewUpdateSpec(finding.Table, finding.Columns, sqlgraph.NewFieldSpec(finding.FieldID, field.TypeUUID)) if ps := fu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -272,39 +265,19 @@ func (fu *FindingUpdate) sqlSave(ctx context.Context) (n int, err error) { } } if value, ok := fu.mutation.Name(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: finding.FieldName, - }) + _spec.SetField(finding.FieldName, field.TypeString, value) } if value, ok := fu.mutation.Description(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: finding.FieldDescription, - }) + _spec.SetField(finding.FieldDescription, field.TypeString, value) } if value, ok := fu.mutation.Severity(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: finding.FieldSeverity, - }) + _spec.SetField(finding.FieldSeverity, field.TypeEnum, value) } if value, ok := fu.mutation.Difficulty(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: finding.FieldDifficulty, - }) + _spec.SetField(finding.FieldDifficulty, field.TypeEnum, value) } if value, ok := fu.mutation.Tags(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: finding.FieldTags, - }) + _spec.SetField(finding.FieldTags, field.TypeJSON, value) } if fu.mutation.FindingToUserCleared() { edge := &sqlgraph.EdgeSpec{ @@ -314,10 +287,7 @@ func (fu *FindingUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{finding.FindingToUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -330,10 +300,7 @@ func (fu *FindingUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{finding.FindingToUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -349,10 +316,7 @@ func (fu *FindingUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{finding.FindingToUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -368,10 +332,7 @@ func (fu *FindingUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{finding.FindingToHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: host.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(host.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -384,10 +345,7 @@ func (fu *FindingUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{finding.FindingToHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: host.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(host.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -403,10 +361,7 @@ func (fu *FindingUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{finding.FindingToScriptColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: script.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(script.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -419,10 +374,7 @@ func (fu *FindingUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{finding.FindingToScriptColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: script.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(script.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -438,10 +390,7 @@ func (fu *FindingUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{finding.FindingToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -454,10 +403,7 @@ func (fu *FindingUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{finding.FindingToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -469,10 +415,11 @@ func (fu *FindingUpdate) sqlSave(ctx context.Context) (n int, err error) { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{finding.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return 0, err } + fu.mutation.done = true return n, nil } @@ -490,24 +437,56 @@ func (fuo *FindingUpdateOne) SetName(s string) *FindingUpdateOne { return fuo } +// SetNillableName sets the "name" field if the given value is not nil. +func (fuo *FindingUpdateOne) SetNillableName(s *string) *FindingUpdateOne { + if s != nil { + fuo.SetName(*s) + } + return fuo +} + // SetDescription sets the "description" field. func (fuo *FindingUpdateOne) SetDescription(s string) *FindingUpdateOne { fuo.mutation.SetDescription(s) return fuo } +// SetNillableDescription sets the "description" field if the given value is not nil. +func (fuo *FindingUpdateOne) SetNillableDescription(s *string) *FindingUpdateOne { + if s != nil { + fuo.SetDescription(*s) + } + return fuo +} + // SetSeverity sets the "severity" field. func (fuo *FindingUpdateOne) SetSeverity(f finding.Severity) *FindingUpdateOne { fuo.mutation.SetSeverity(f) return fuo } +// SetNillableSeverity sets the "severity" field if the given value is not nil. +func (fuo *FindingUpdateOne) SetNillableSeverity(f *finding.Severity) *FindingUpdateOne { + if f != nil { + fuo.SetSeverity(*f) + } + return fuo +} + // SetDifficulty sets the "difficulty" field. func (fuo *FindingUpdateOne) SetDifficulty(f finding.Difficulty) *FindingUpdateOne { fuo.mutation.SetDifficulty(f) return fuo } +// SetNillableDifficulty sets the "difficulty" field if the given value is not nil. +func (fuo *FindingUpdateOne) SetNillableDifficulty(f *finding.Difficulty) *FindingUpdateOne { + if f != nil { + fuo.SetDifficulty(*f) + } + return fuo +} + // SetTags sets the "tags" field. func (fuo *FindingUpdateOne) SetTags(m map[string]string) *FindingUpdateOne { fuo.mutation.SetTags(m) @@ -630,6 +609,12 @@ func (fuo *FindingUpdateOne) ClearFindingToEnvironment() *FindingUpdateOne { return fuo } +// Where appends a list predicates to the FindingUpdate builder. +func (fuo *FindingUpdateOne) Where(ps ...predicate.Finding) *FindingUpdateOne { + fuo.mutation.Where(ps...) + return fuo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (fuo *FindingUpdateOne) Select(field string, fields ...string) *FindingUpdateOne { @@ -639,40 +624,7 @@ func (fuo *FindingUpdateOne) Select(field string, fields ...string) *FindingUpda // Save executes the query and returns the updated Finding entity. func (fuo *FindingUpdateOne) Save(ctx context.Context) (*Finding, error) { - var ( - err error - node *Finding - ) - if len(fuo.hooks) == 0 { - if err = fuo.check(); err != nil { - return nil, err - } - node, err = fuo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*FindingMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = fuo.check(); err != nil { - return nil, err - } - fuo.mutation = mutation - node, err = fuo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(fuo.hooks) - 1; i >= 0; i-- { - if fuo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = fuo.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, fuo.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, fuo.sqlSave, fuo.mutation, fuo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -713,16 +665,10 @@ func (fuo *FindingUpdateOne) check() error { } func (fuo *FindingUpdateOne) sqlSave(ctx context.Context) (_node *Finding, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: finding.Table, - Columns: finding.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: finding.FieldID, - }, - }, + if err := fuo.check(); err != nil { + return _node, err } + _spec := sqlgraph.NewUpdateSpec(finding.Table, finding.Columns, sqlgraph.NewFieldSpec(finding.FieldID, field.TypeUUID)) id, ok := fuo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Finding.id" for update`)} @@ -748,39 +694,19 @@ func (fuo *FindingUpdateOne) sqlSave(ctx context.Context) (_node *Finding, err e } } if value, ok := fuo.mutation.Name(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: finding.FieldName, - }) + _spec.SetField(finding.FieldName, field.TypeString, value) } if value, ok := fuo.mutation.Description(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: finding.FieldDescription, - }) + _spec.SetField(finding.FieldDescription, field.TypeString, value) } if value, ok := fuo.mutation.Severity(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: finding.FieldSeverity, - }) + _spec.SetField(finding.FieldSeverity, field.TypeEnum, value) } if value, ok := fuo.mutation.Difficulty(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: finding.FieldDifficulty, - }) + _spec.SetField(finding.FieldDifficulty, field.TypeEnum, value) } if value, ok := fuo.mutation.Tags(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: finding.FieldTags, - }) + _spec.SetField(finding.FieldTags, field.TypeJSON, value) } if fuo.mutation.FindingToUserCleared() { edge := &sqlgraph.EdgeSpec{ @@ -790,10 +716,7 @@ func (fuo *FindingUpdateOne) sqlSave(ctx context.Context) (_node *Finding, err e Columns: []string{finding.FindingToUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -806,10 +729,7 @@ func (fuo *FindingUpdateOne) sqlSave(ctx context.Context) (_node *Finding, err e Columns: []string{finding.FindingToUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -825,10 +745,7 @@ func (fuo *FindingUpdateOne) sqlSave(ctx context.Context) (_node *Finding, err e Columns: []string{finding.FindingToUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -844,10 +761,7 @@ func (fuo *FindingUpdateOne) sqlSave(ctx context.Context) (_node *Finding, err e Columns: []string{finding.FindingToHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: host.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(host.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -860,10 +774,7 @@ func (fuo *FindingUpdateOne) sqlSave(ctx context.Context) (_node *Finding, err e Columns: []string{finding.FindingToHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: host.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(host.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -879,10 +790,7 @@ func (fuo *FindingUpdateOne) sqlSave(ctx context.Context) (_node *Finding, err e Columns: []string{finding.FindingToScriptColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: script.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(script.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -895,10 +803,7 @@ func (fuo *FindingUpdateOne) sqlSave(ctx context.Context) (_node *Finding, err e Columns: []string{finding.FindingToScriptColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: script.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(script.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -914,10 +819,7 @@ func (fuo *FindingUpdateOne) sqlSave(ctx context.Context) (_node *Finding, err e Columns: []string{finding.FindingToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -930,10 +832,7 @@ func (fuo *FindingUpdateOne) sqlSave(ctx context.Context) (_node *Finding, err e Columns: []string{finding.FindingToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -948,9 +847,10 @@ func (fuo *FindingUpdateOne) sqlSave(ctx context.Context) (_node *Finding, err e if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{finding.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } + fuo.mutation.done = true return _node, nil } diff --git a/ent/ginfilemiddleware.go b/ent/ginfilemiddleware.go index 8b482b59..6c9f8d49 100755 --- a/ent/ginfilemiddleware.go +++ b/ent/ginfilemiddleware.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -6,6 +6,7 @@ import ( "fmt" "strings" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/gen0cide/laforge/ent/ginfilemiddleware" "github.com/gen0cide/laforge/ent/provisionedhost" @@ -28,13 +29,15 @@ type GinFileMiddleware struct { // The values are being populated by the GinFileMiddlewareQuery when eager-loading is set. Edges GinFileMiddlewareEdges `json:"edges"` + // vvvvvvvvvvvv CUSTOM vvvvvvvvvvvv // Edges put into the main struct to be loaded via hcl // GinFileMiddlewareToProvisionedHost holds the value of the GinFileMiddlewareToProvisionedHost edge. HCLGinFileMiddlewareToProvisionedHost *ProvisionedHost `json:"GinFileMiddlewareToProvisionedHost,omitempty"` // GinFileMiddlewareToProvisioningStep holds the value of the GinFileMiddlewareToProvisioningStep edge. HCLGinFileMiddlewareToProvisioningStep *ProvisioningStep `json:"GinFileMiddlewareToProvisioningStep,omitempty"` - // + // ^^^^^^^^^^^^ CUSTOM ^^^^^^^^^^^^^ server_task_server_task_to_gin_file_middleware *uuid.UUID + selectValues sql.SelectValues } // GinFileMiddlewareEdges holds the relations/edges for other nodes in the graph. @@ -46,6 +49,8 @@ type GinFileMiddlewareEdges struct { // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. loadedTypes [2]bool + // totalCount holds the count of the edges above. + totalCount [2]map[string]int } // GinFileMiddlewareToProvisionedHostOrErr returns the GinFileMiddlewareToProvisionedHost value or an error if the edge @@ -53,8 +58,7 @@ type GinFileMiddlewareEdges struct { func (e GinFileMiddlewareEdges) GinFileMiddlewareToProvisionedHostOrErr() (*ProvisionedHost, error) { if e.loadedTypes[0] { if e.GinFileMiddlewareToProvisionedHost == nil { - // The edge GinFileMiddlewareToProvisionedHost was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: provisionedhost.Label} } return e.GinFileMiddlewareToProvisionedHost, nil @@ -67,8 +71,7 @@ func (e GinFileMiddlewareEdges) GinFileMiddlewareToProvisionedHostOrErr() (*Prov func (e GinFileMiddlewareEdges) GinFileMiddlewareToProvisioningStepOrErr() (*ProvisioningStep, error) { if e.loadedTypes[1] { if e.GinFileMiddlewareToProvisioningStep == nil { - // The edge GinFileMiddlewareToProvisioningStep was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: provisioningstep.Label} } return e.GinFileMiddlewareToProvisioningStep, nil @@ -77,8 +80,8 @@ func (e GinFileMiddlewareEdges) GinFileMiddlewareToProvisioningStepOrErr() (*Pro } // scanValues returns the types for scanning values from sql.Rows. -func (*GinFileMiddleware) scanValues(columns []string) ([]interface{}, error) { - values := make([]interface{}, len(columns)) +func (*GinFileMiddleware) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) for i := range columns { switch columns[i] { case ginfilemiddleware.FieldAccessed: @@ -90,7 +93,7 @@ func (*GinFileMiddleware) scanValues(columns []string) ([]interface{}, error) { case ginfilemiddleware.ForeignKeys[0]: // server_task_server_task_to_gin_file_middleware values[i] = &sql.NullScanner{S: new(uuid.UUID)} default: - return nil, fmt.Errorf("unexpected column %q for type GinFileMiddleware", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -98,7 +101,7 @@ func (*GinFileMiddleware) scanValues(columns []string) ([]interface{}, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the GinFileMiddleware fields. -func (gfm *GinFileMiddleware) assignValues(columns []string, values []interface{}) error { +func (gfm *GinFileMiddleware) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -135,36 +138,44 @@ func (gfm *GinFileMiddleware) assignValues(columns []string, values []interface{ gfm.server_task_server_task_to_gin_file_middleware = new(uuid.UUID) *gfm.server_task_server_task_to_gin_file_middleware = *value.S.(*uuid.UUID) } + default: + gfm.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the GinFileMiddleware. +// This includes values selected through modifiers, order, etc. +func (gfm *GinFileMiddleware) Value(name string) (ent.Value, error) { + return gfm.selectValues.Get(name) +} + // QueryGinFileMiddlewareToProvisionedHost queries the "GinFileMiddlewareToProvisionedHost" edge of the GinFileMiddleware entity. func (gfm *GinFileMiddleware) QueryGinFileMiddlewareToProvisionedHost() *ProvisionedHostQuery { - return (&GinFileMiddlewareClient{config: gfm.config}).QueryGinFileMiddlewareToProvisionedHost(gfm) + return NewGinFileMiddlewareClient(gfm.config).QueryGinFileMiddlewareToProvisionedHost(gfm) } // QueryGinFileMiddlewareToProvisioningStep queries the "GinFileMiddlewareToProvisioningStep" edge of the GinFileMiddleware entity. func (gfm *GinFileMiddleware) QueryGinFileMiddlewareToProvisioningStep() *ProvisioningStepQuery { - return (&GinFileMiddlewareClient{config: gfm.config}).QueryGinFileMiddlewareToProvisioningStep(gfm) + return NewGinFileMiddlewareClient(gfm.config).QueryGinFileMiddlewareToProvisioningStep(gfm) } // Update returns a builder for updating this GinFileMiddleware. // Note that you need to call GinFileMiddleware.Unwrap() before calling this method if this GinFileMiddleware // was returned from a transaction, and the transaction was committed or rolled back. func (gfm *GinFileMiddleware) Update() *GinFileMiddlewareUpdateOne { - return (&GinFileMiddlewareClient{config: gfm.config}).UpdateOne(gfm) + return NewGinFileMiddlewareClient(gfm.config).UpdateOne(gfm) } // Unwrap unwraps the GinFileMiddleware entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. func (gfm *GinFileMiddleware) Unwrap() *GinFileMiddleware { - tx, ok := gfm.config.driver.(*txDriver) + _tx, ok := gfm.config.driver.(*txDriver) if !ok { panic("ent: GinFileMiddleware is not a transactional entity") } - gfm.config.driver = tx.drv + gfm.config.driver = _tx.drv return gfm } @@ -172,12 +183,14 @@ func (gfm *GinFileMiddleware) Unwrap() *GinFileMiddleware { func (gfm *GinFileMiddleware) String() string { var builder strings.Builder builder.WriteString("GinFileMiddleware(") - builder.WriteString(fmt.Sprintf("id=%v", gfm.ID)) - builder.WriteString(", url_id=") + builder.WriteString(fmt.Sprintf("id=%v, ", gfm.ID)) + builder.WriteString("url_id=") builder.WriteString(gfm.URLID) - builder.WriteString(", file_path=") + builder.WriteString(", ") + builder.WriteString("file_path=") builder.WriteString(gfm.FilePath) - builder.WriteString(", accessed=") + builder.WriteString(", ") + builder.WriteString("accessed=") builder.WriteString(fmt.Sprintf("%v", gfm.Accessed)) builder.WriteByte(')') return builder.String() @@ -185,9 +198,3 @@ func (gfm *GinFileMiddleware) String() string { // GinFileMiddlewares is a parsable slice of GinFileMiddleware. type GinFileMiddlewares []*GinFileMiddleware - -func (gfm GinFileMiddlewares) config(cfg config) { - for _i := range gfm { - gfm[_i].config = cfg - } -} diff --git a/ent/ginfilemiddleware/ginfilemiddleware.go b/ent/ginfilemiddleware/ginfilemiddleware.go index b107441e..9d792d2e 100755 --- a/ent/ginfilemiddleware/ginfilemiddleware.go +++ b/ent/ginfilemiddleware/ginfilemiddleware.go @@ -1,8 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ginfilemiddleware import ( + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -74,3 +76,54 @@ var ( // DefaultID holds the default value on creation for the "id" field. DefaultID func() uuid.UUID ) + +// OrderOption defines the ordering options for the GinFileMiddleware queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByURLID orders the results by the url_id field. +func ByURLID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldURLID, opts...).ToFunc() +} + +// ByFilePath orders the results by the file_path field. +func ByFilePath(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldFilePath, opts...).ToFunc() +} + +// ByAccessed orders the results by the accessed field. +func ByAccessed(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAccessed, opts...).ToFunc() +} + +// ByGinFileMiddlewareToProvisionedHostField orders the results by GinFileMiddlewareToProvisionedHost field. +func ByGinFileMiddlewareToProvisionedHostField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newGinFileMiddlewareToProvisionedHostStep(), sql.OrderByField(field, opts...)) + } +} + +// ByGinFileMiddlewareToProvisioningStepField orders the results by GinFileMiddlewareToProvisioningStep field. +func ByGinFileMiddlewareToProvisioningStepField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newGinFileMiddlewareToProvisioningStepStep(), sql.OrderByField(field, opts...)) + } +} +func newGinFileMiddlewareToProvisionedHostStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(GinFileMiddlewareToProvisionedHostInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2O, false, GinFileMiddlewareToProvisionedHostTable, GinFileMiddlewareToProvisionedHostColumn), + ) +} +func newGinFileMiddlewareToProvisioningStepStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(GinFileMiddlewareToProvisioningStepInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2O, false, GinFileMiddlewareToProvisioningStepTable, GinFileMiddlewareToProvisioningStepColumn), + ) +} diff --git a/ent/ginfilemiddleware/where.go b/ent/ginfilemiddleware/where.go index b8c6d0b1..e76558fe 100755 --- a/ent/ginfilemiddleware/where.go +++ b/ent/ginfilemiddleware/where.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ginfilemiddleware @@ -11,342 +11,202 @@ import ( // ID filters vertices based on their ID field. func ID(id uuid.UUID) predicate.GinFileMiddleware { - return predicate.GinFileMiddleware(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.GinFileMiddleware(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id uuid.UUID) predicate.GinFileMiddleware { - return predicate.GinFileMiddleware(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.GinFileMiddleware(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id uuid.UUID) predicate.GinFileMiddleware { - return predicate.GinFileMiddleware(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.GinFileMiddleware(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...uuid.UUID) predicate.GinFileMiddleware { - return predicate.GinFileMiddleware(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.GinFileMiddleware(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...uuid.UUID) predicate.GinFileMiddleware { - return predicate.GinFileMiddleware(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.GinFileMiddleware(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id uuid.UUID) predicate.GinFileMiddleware { - return predicate.GinFileMiddleware(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.GinFileMiddleware(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id uuid.UUID) predicate.GinFileMiddleware { - return predicate.GinFileMiddleware(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.GinFileMiddleware(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id uuid.UUID) predicate.GinFileMiddleware { - return predicate.GinFileMiddleware(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.GinFileMiddleware(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id uuid.UUID) predicate.GinFileMiddleware { - return predicate.GinFileMiddleware(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.GinFileMiddleware(sql.FieldLTE(FieldID, id)) } // URLID applies equality check predicate on the "url_id" field. It's identical to URLIDEQ. func URLID(v string) predicate.GinFileMiddleware { - return predicate.GinFileMiddleware(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldURLID), v)) - }) + return predicate.GinFileMiddleware(sql.FieldEQ(FieldURLID, v)) } // FilePath applies equality check predicate on the "file_path" field. It's identical to FilePathEQ. func FilePath(v string) predicate.GinFileMiddleware { - return predicate.GinFileMiddleware(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldFilePath), v)) - }) + return predicate.GinFileMiddleware(sql.FieldEQ(FieldFilePath, v)) } // Accessed applies equality check predicate on the "accessed" field. It's identical to AccessedEQ. func Accessed(v bool) predicate.GinFileMiddleware { - return predicate.GinFileMiddleware(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldAccessed), v)) - }) + return predicate.GinFileMiddleware(sql.FieldEQ(FieldAccessed, v)) } // URLIDEQ applies the EQ predicate on the "url_id" field. func URLIDEQ(v string) predicate.GinFileMiddleware { - return predicate.GinFileMiddleware(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldURLID), v)) - }) + return predicate.GinFileMiddleware(sql.FieldEQ(FieldURLID, v)) } // URLIDNEQ applies the NEQ predicate on the "url_id" field. func URLIDNEQ(v string) predicate.GinFileMiddleware { - return predicate.GinFileMiddleware(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldURLID), v)) - }) + return predicate.GinFileMiddleware(sql.FieldNEQ(FieldURLID, v)) } // URLIDIn applies the In predicate on the "url_id" field. func URLIDIn(vs ...string) predicate.GinFileMiddleware { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.GinFileMiddleware(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldURLID), v...)) - }) + return predicate.GinFileMiddleware(sql.FieldIn(FieldURLID, vs...)) } // URLIDNotIn applies the NotIn predicate on the "url_id" field. func URLIDNotIn(vs ...string) predicate.GinFileMiddleware { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.GinFileMiddleware(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldURLID), v...)) - }) + return predicate.GinFileMiddleware(sql.FieldNotIn(FieldURLID, vs...)) } // URLIDGT applies the GT predicate on the "url_id" field. func URLIDGT(v string) predicate.GinFileMiddleware { - return predicate.GinFileMiddleware(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldURLID), v)) - }) + return predicate.GinFileMiddleware(sql.FieldGT(FieldURLID, v)) } // URLIDGTE applies the GTE predicate on the "url_id" field. func URLIDGTE(v string) predicate.GinFileMiddleware { - return predicate.GinFileMiddleware(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldURLID), v)) - }) + return predicate.GinFileMiddleware(sql.FieldGTE(FieldURLID, v)) } // URLIDLT applies the LT predicate on the "url_id" field. func URLIDLT(v string) predicate.GinFileMiddleware { - return predicate.GinFileMiddleware(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldURLID), v)) - }) + return predicate.GinFileMiddleware(sql.FieldLT(FieldURLID, v)) } // URLIDLTE applies the LTE predicate on the "url_id" field. func URLIDLTE(v string) predicate.GinFileMiddleware { - return predicate.GinFileMiddleware(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldURLID), v)) - }) + return predicate.GinFileMiddleware(sql.FieldLTE(FieldURLID, v)) } // URLIDContains applies the Contains predicate on the "url_id" field. func URLIDContains(v string) predicate.GinFileMiddleware { - return predicate.GinFileMiddleware(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldURLID), v)) - }) + return predicate.GinFileMiddleware(sql.FieldContains(FieldURLID, v)) } // URLIDHasPrefix applies the HasPrefix predicate on the "url_id" field. func URLIDHasPrefix(v string) predicate.GinFileMiddleware { - return predicate.GinFileMiddleware(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldURLID), v)) - }) + return predicate.GinFileMiddleware(sql.FieldHasPrefix(FieldURLID, v)) } // URLIDHasSuffix applies the HasSuffix predicate on the "url_id" field. func URLIDHasSuffix(v string) predicate.GinFileMiddleware { - return predicate.GinFileMiddleware(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldURLID), v)) - }) + return predicate.GinFileMiddleware(sql.FieldHasSuffix(FieldURLID, v)) } // URLIDEqualFold applies the EqualFold predicate on the "url_id" field. func URLIDEqualFold(v string) predicate.GinFileMiddleware { - return predicate.GinFileMiddleware(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldURLID), v)) - }) + return predicate.GinFileMiddleware(sql.FieldEqualFold(FieldURLID, v)) } // URLIDContainsFold applies the ContainsFold predicate on the "url_id" field. func URLIDContainsFold(v string) predicate.GinFileMiddleware { - return predicate.GinFileMiddleware(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldURLID), v)) - }) + return predicate.GinFileMiddleware(sql.FieldContainsFold(FieldURLID, v)) } // FilePathEQ applies the EQ predicate on the "file_path" field. func FilePathEQ(v string) predicate.GinFileMiddleware { - return predicate.GinFileMiddleware(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldFilePath), v)) - }) + return predicate.GinFileMiddleware(sql.FieldEQ(FieldFilePath, v)) } // FilePathNEQ applies the NEQ predicate on the "file_path" field. func FilePathNEQ(v string) predicate.GinFileMiddleware { - return predicate.GinFileMiddleware(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldFilePath), v)) - }) + return predicate.GinFileMiddleware(sql.FieldNEQ(FieldFilePath, v)) } // FilePathIn applies the In predicate on the "file_path" field. func FilePathIn(vs ...string) predicate.GinFileMiddleware { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.GinFileMiddleware(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldFilePath), v...)) - }) + return predicate.GinFileMiddleware(sql.FieldIn(FieldFilePath, vs...)) } // FilePathNotIn applies the NotIn predicate on the "file_path" field. func FilePathNotIn(vs ...string) predicate.GinFileMiddleware { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.GinFileMiddleware(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldFilePath), v...)) - }) + return predicate.GinFileMiddleware(sql.FieldNotIn(FieldFilePath, vs...)) } // FilePathGT applies the GT predicate on the "file_path" field. func FilePathGT(v string) predicate.GinFileMiddleware { - return predicate.GinFileMiddleware(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldFilePath), v)) - }) + return predicate.GinFileMiddleware(sql.FieldGT(FieldFilePath, v)) } // FilePathGTE applies the GTE predicate on the "file_path" field. func FilePathGTE(v string) predicate.GinFileMiddleware { - return predicate.GinFileMiddleware(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldFilePath), v)) - }) + return predicate.GinFileMiddleware(sql.FieldGTE(FieldFilePath, v)) } // FilePathLT applies the LT predicate on the "file_path" field. func FilePathLT(v string) predicate.GinFileMiddleware { - return predicate.GinFileMiddleware(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldFilePath), v)) - }) + return predicate.GinFileMiddleware(sql.FieldLT(FieldFilePath, v)) } // FilePathLTE applies the LTE predicate on the "file_path" field. func FilePathLTE(v string) predicate.GinFileMiddleware { - return predicate.GinFileMiddleware(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldFilePath), v)) - }) + return predicate.GinFileMiddleware(sql.FieldLTE(FieldFilePath, v)) } // FilePathContains applies the Contains predicate on the "file_path" field. func FilePathContains(v string) predicate.GinFileMiddleware { - return predicate.GinFileMiddleware(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldFilePath), v)) - }) + return predicate.GinFileMiddleware(sql.FieldContains(FieldFilePath, v)) } // FilePathHasPrefix applies the HasPrefix predicate on the "file_path" field. func FilePathHasPrefix(v string) predicate.GinFileMiddleware { - return predicate.GinFileMiddleware(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldFilePath), v)) - }) + return predicate.GinFileMiddleware(sql.FieldHasPrefix(FieldFilePath, v)) } // FilePathHasSuffix applies the HasSuffix predicate on the "file_path" field. func FilePathHasSuffix(v string) predicate.GinFileMiddleware { - return predicate.GinFileMiddleware(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldFilePath), v)) - }) + return predicate.GinFileMiddleware(sql.FieldHasSuffix(FieldFilePath, v)) } // FilePathEqualFold applies the EqualFold predicate on the "file_path" field. func FilePathEqualFold(v string) predicate.GinFileMiddleware { - return predicate.GinFileMiddleware(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldFilePath), v)) - }) + return predicate.GinFileMiddleware(sql.FieldEqualFold(FieldFilePath, v)) } // FilePathContainsFold applies the ContainsFold predicate on the "file_path" field. func FilePathContainsFold(v string) predicate.GinFileMiddleware { - return predicate.GinFileMiddleware(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldFilePath), v)) - }) + return predicate.GinFileMiddleware(sql.FieldContainsFold(FieldFilePath, v)) } // AccessedEQ applies the EQ predicate on the "accessed" field. func AccessedEQ(v bool) predicate.GinFileMiddleware { - return predicate.GinFileMiddleware(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldAccessed), v)) - }) + return predicate.GinFileMiddleware(sql.FieldEQ(FieldAccessed, v)) } // AccessedNEQ applies the NEQ predicate on the "accessed" field. func AccessedNEQ(v bool) predicate.GinFileMiddleware { - return predicate.GinFileMiddleware(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldAccessed), v)) - }) + return predicate.GinFileMiddleware(sql.FieldNEQ(FieldAccessed, v)) } // HasGinFileMiddlewareToProvisionedHost applies the HasEdge predicate on the "GinFileMiddlewareToProvisionedHost" edge. @@ -354,7 +214,6 @@ func HasGinFileMiddlewareToProvisionedHost() predicate.GinFileMiddleware { return predicate.GinFileMiddleware(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(GinFileMiddlewareToProvisionedHostTable, FieldID), sqlgraph.Edge(sqlgraph.O2O, false, GinFileMiddlewareToProvisionedHostTable, GinFileMiddlewareToProvisionedHostColumn), ) sqlgraph.HasNeighbors(s, step) @@ -364,11 +223,7 @@ func HasGinFileMiddlewareToProvisionedHost() predicate.GinFileMiddleware { // HasGinFileMiddlewareToProvisionedHostWith applies the HasEdge predicate on the "GinFileMiddlewareToProvisionedHost" edge with a given conditions (other predicates). func HasGinFileMiddlewareToProvisionedHostWith(preds ...predicate.ProvisionedHost) predicate.GinFileMiddleware { return predicate.GinFileMiddleware(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(GinFileMiddlewareToProvisionedHostInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2O, false, GinFileMiddlewareToProvisionedHostTable, GinFileMiddlewareToProvisionedHostColumn), - ) + step := newGinFileMiddlewareToProvisionedHostStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -382,7 +237,6 @@ func HasGinFileMiddlewareToProvisioningStep() predicate.GinFileMiddleware { return predicate.GinFileMiddleware(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(GinFileMiddlewareToProvisioningStepTable, FieldID), sqlgraph.Edge(sqlgraph.O2O, false, GinFileMiddlewareToProvisioningStepTable, GinFileMiddlewareToProvisioningStepColumn), ) sqlgraph.HasNeighbors(s, step) @@ -392,11 +246,7 @@ func HasGinFileMiddlewareToProvisioningStep() predicate.GinFileMiddleware { // HasGinFileMiddlewareToProvisioningStepWith applies the HasEdge predicate on the "GinFileMiddlewareToProvisioningStep" edge with a given conditions (other predicates). func HasGinFileMiddlewareToProvisioningStepWith(preds ...predicate.ProvisioningStep) predicate.GinFileMiddleware { return predicate.GinFileMiddleware(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(GinFileMiddlewareToProvisioningStepInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2O, false, GinFileMiddlewareToProvisioningStepTable, GinFileMiddlewareToProvisioningStepColumn), - ) + step := newGinFileMiddlewareToProvisioningStepStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -407,32 +257,15 @@ func HasGinFileMiddlewareToProvisioningStepWith(preds ...predicate.ProvisioningS // And groups predicates with the AND operator between them. func And(predicates ...predicate.GinFileMiddleware) predicate.GinFileMiddleware { - return predicate.GinFileMiddleware(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.GinFileMiddleware(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.GinFileMiddleware) predicate.GinFileMiddleware { - return predicate.GinFileMiddleware(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.GinFileMiddleware(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.GinFileMiddleware) predicate.GinFileMiddleware { - return predicate.GinFileMiddleware(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.GinFileMiddleware(sql.NotPredicates(p)) } diff --git a/ent/ginfilemiddleware_create.go b/ent/ginfilemiddleware_create.go index 113e1798..77d6d17d 100755 --- a/ent/ginfilemiddleware_create.go +++ b/ent/ginfilemiddleware_create.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -107,44 +107,8 @@ func (gfmc *GinFileMiddlewareCreate) Mutation() *GinFileMiddlewareMutation { // Save creates the GinFileMiddleware in the database. func (gfmc *GinFileMiddlewareCreate) Save(ctx context.Context) (*GinFileMiddleware, error) { - var ( - err error - node *GinFileMiddleware - ) gfmc.defaults() - if len(gfmc.hooks) == 0 { - if err = gfmc.check(); err != nil { - return nil, err - } - node, err = gfmc.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*GinFileMiddlewareMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = gfmc.check(); err != nil { - return nil, err - } - gfmc.mutation = mutation - if node, err = gfmc.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(gfmc.hooks) - 1; i >= 0; i-- { - if gfmc.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = gfmc.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, gfmc.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, gfmc.sqlSave, gfmc.mutation, gfmc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -196,10 +160,13 @@ func (gfmc *GinFileMiddlewareCreate) check() error { } func (gfmc *GinFileMiddlewareCreate) sqlSave(ctx context.Context) (*GinFileMiddleware, error) { + if err := gfmc.check(); err != nil { + return nil, err + } _node, _spec := gfmc.createSpec() if err := sqlgraph.CreateNode(ctx, gfmc.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } @@ -210,46 +177,30 @@ func (gfmc *GinFileMiddlewareCreate) sqlSave(ctx context.Context) (*GinFileMiddl return nil, err } } + gfmc.mutation.id = &_node.ID + gfmc.mutation.done = true return _node, nil } func (gfmc *GinFileMiddlewareCreate) createSpec() (*GinFileMiddleware, *sqlgraph.CreateSpec) { var ( _node = &GinFileMiddleware{config: gfmc.config} - _spec = &sqlgraph.CreateSpec{ - Table: ginfilemiddleware.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: ginfilemiddleware.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(ginfilemiddleware.Table, sqlgraph.NewFieldSpec(ginfilemiddleware.FieldID, field.TypeUUID)) ) if id, ok := gfmc.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } if value, ok := gfmc.mutation.URLID(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: ginfilemiddleware.FieldURLID, - }) + _spec.SetField(ginfilemiddleware.FieldURLID, field.TypeString, value) _node.URLID = value } if value, ok := gfmc.mutation.FilePath(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: ginfilemiddleware.FieldFilePath, - }) + _spec.SetField(ginfilemiddleware.FieldFilePath, field.TypeString, value) _node.FilePath = value } if value, ok := gfmc.mutation.Accessed(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: ginfilemiddleware.FieldAccessed, - }) + _spec.SetField(ginfilemiddleware.FieldAccessed, field.TypeBool, value) _node.Accessed = value } if nodes := gfmc.mutation.GinFileMiddlewareToProvisionedHostIDs(); len(nodes) > 0 { @@ -260,10 +211,7 @@ func (gfmc *GinFileMiddlewareCreate) createSpec() (*GinFileMiddleware, *sqlgraph Columns: []string{ginfilemiddleware.GinFileMiddlewareToProvisionedHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionedhost.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionedhost.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -279,10 +227,7 @@ func (gfmc *GinFileMiddlewareCreate) createSpec() (*GinFileMiddleware, *sqlgraph Columns: []string{ginfilemiddleware.GinFileMiddlewareToProvisioningStepColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisioningstep.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisioningstep.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -296,11 +241,15 @@ func (gfmc *GinFileMiddlewareCreate) createSpec() (*GinFileMiddleware, *sqlgraph // GinFileMiddlewareCreateBulk is the builder for creating many GinFileMiddleware entities in bulk. type GinFileMiddlewareCreateBulk struct { config + err error builders []*GinFileMiddlewareCreate } // Save creates the GinFileMiddleware entities in the database. func (gfmcb *GinFileMiddlewareCreateBulk) Save(ctx context.Context) ([]*GinFileMiddleware, error) { + if gfmcb.err != nil { + return nil, gfmcb.err + } specs := make([]*sqlgraph.CreateSpec, len(gfmcb.builders)) nodes := make([]*GinFileMiddleware, len(gfmcb.builders)) mutators := make([]Mutator, len(gfmcb.builders)) @@ -317,8 +266,8 @@ func (gfmcb *GinFileMiddlewareCreateBulk) Save(ctx context.Context) ([]*GinFileM return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, gfmcb.builders[i+1].mutation) } else { @@ -326,7 +275,7 @@ func (gfmcb *GinFileMiddlewareCreateBulk) Save(ctx context.Context) ([]*GinFileM // Invoke the actual operation on the latest mutation in the chain. if err = sqlgraph.BatchCreate(ctx, gfmcb.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } } } diff --git a/ent/ginfilemiddleware_delete.go b/ent/ginfilemiddleware_delete.go index c2cc6e7d..ab7558de 100755 --- a/ent/ginfilemiddleware_delete.go +++ b/ent/ginfilemiddleware_delete.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (gfmd *GinFileMiddlewareDelete) Where(ps ...predicate.GinFileMiddleware) *G // Exec executes the deletion query and returns how many vertices were deleted. func (gfmd *GinFileMiddlewareDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(gfmd.hooks) == 0 { - affected, err = gfmd.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*GinFileMiddlewareMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - gfmd.mutation = mutation - affected, err = gfmd.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(gfmd.hooks) - 1; i >= 0; i-- { - if gfmd.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = gfmd.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, gfmd.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, gfmd.sqlExec, gfmd.mutation, gfmd.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (gfmd *GinFileMiddlewareDelete) ExecX(ctx context.Context) int { } func (gfmd *GinFileMiddlewareDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: ginfilemiddleware.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: ginfilemiddleware.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(ginfilemiddleware.Table, sqlgraph.NewFieldSpec(ginfilemiddleware.FieldID, field.TypeUUID)) if ps := gfmd.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -84,7 +48,12 @@ func (gfmd *GinFileMiddlewareDelete) sqlExec(ctx context.Context) (int, error) { } } } - return sqlgraph.DeleteNodes(ctx, gfmd.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, gfmd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + gfmd.mutation.done = true + return affected, err } // GinFileMiddlewareDeleteOne is the builder for deleting a single GinFileMiddleware entity. @@ -92,6 +61,12 @@ type GinFileMiddlewareDeleteOne struct { gfmd *GinFileMiddlewareDelete } +// Where appends a list predicates to the GinFileMiddlewareDelete builder. +func (gfmdo *GinFileMiddlewareDeleteOne) Where(ps ...predicate.GinFileMiddleware) *GinFileMiddlewareDeleteOne { + gfmdo.gfmd.mutation.Where(ps...) + return gfmdo +} + // Exec executes the deletion query. func (gfmdo *GinFileMiddlewareDeleteOne) Exec(ctx context.Context) error { n, err := gfmdo.gfmd.Exec(ctx) @@ -107,5 +82,7 @@ func (gfmdo *GinFileMiddlewareDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (gfmdo *GinFileMiddlewareDeleteOne) ExecX(ctx context.Context) { - gfmdo.gfmd.ExecX(ctx) + if err := gfmdo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/ent/ginfilemiddleware_query.go b/ent/ginfilemiddleware_query.go index cd5df699..bb5dd5b6 100755 --- a/ent/ginfilemiddleware_query.go +++ b/ent/ginfilemiddleware_query.go @@ -1,11 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" "database/sql/driver" - "errors" "fmt" "math" @@ -22,16 +21,15 @@ import ( // GinFileMiddlewareQuery is the builder for querying GinFileMiddleware entities. type GinFileMiddlewareQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string - predicates []predicate.GinFileMiddleware - // eager-loading edges. + ctx *QueryContext + order []ginfilemiddleware.OrderOption + inters []Interceptor + predicates []predicate.GinFileMiddleware withGinFileMiddlewareToProvisionedHost *ProvisionedHostQuery withGinFileMiddlewareToProvisioningStep *ProvisioningStepQuery withFKs bool + modifiers []func(*sql.Selector) + loadTotal []func(context.Context, []*GinFileMiddleware) error // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -43,34 +41,34 @@ func (gfmq *GinFileMiddlewareQuery) Where(ps ...predicate.GinFileMiddleware) *Gi return gfmq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (gfmq *GinFileMiddlewareQuery) Limit(limit int) *GinFileMiddlewareQuery { - gfmq.limit = &limit + gfmq.ctx.Limit = &limit return gfmq } -// Offset adds an offset step to the query. +// Offset to start from. func (gfmq *GinFileMiddlewareQuery) Offset(offset int) *GinFileMiddlewareQuery { - gfmq.offset = &offset + gfmq.ctx.Offset = &offset return gfmq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (gfmq *GinFileMiddlewareQuery) Unique(unique bool) *GinFileMiddlewareQuery { - gfmq.unique = &unique + gfmq.ctx.Unique = &unique return gfmq } -// Order adds an order step to the query. -func (gfmq *GinFileMiddlewareQuery) Order(o ...OrderFunc) *GinFileMiddlewareQuery { +// Order specifies how the records should be ordered. +func (gfmq *GinFileMiddlewareQuery) Order(o ...ginfilemiddleware.OrderOption) *GinFileMiddlewareQuery { gfmq.order = append(gfmq.order, o...) return gfmq } // QueryGinFileMiddlewareToProvisionedHost chains the current query on the "GinFileMiddlewareToProvisionedHost" edge. func (gfmq *GinFileMiddlewareQuery) QueryGinFileMiddlewareToProvisionedHost() *ProvisionedHostQuery { - query := &ProvisionedHostQuery{config: gfmq.config} + query := (&ProvisionedHostClient{config: gfmq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := gfmq.prepareQuery(ctx); err != nil { return nil, err @@ -92,7 +90,7 @@ func (gfmq *GinFileMiddlewareQuery) QueryGinFileMiddlewareToProvisionedHost() *P // QueryGinFileMiddlewareToProvisioningStep chains the current query on the "GinFileMiddlewareToProvisioningStep" edge. func (gfmq *GinFileMiddlewareQuery) QueryGinFileMiddlewareToProvisioningStep() *ProvisioningStepQuery { - query := &ProvisioningStepQuery{config: gfmq.config} + query := (&ProvisioningStepClient{config: gfmq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := gfmq.prepareQuery(ctx); err != nil { return nil, err @@ -115,7 +113,7 @@ func (gfmq *GinFileMiddlewareQuery) QueryGinFileMiddlewareToProvisioningStep() * // First returns the first GinFileMiddleware entity from the query. // Returns a *NotFoundError when no GinFileMiddleware was found. func (gfmq *GinFileMiddlewareQuery) First(ctx context.Context) (*GinFileMiddleware, error) { - nodes, err := gfmq.Limit(1).All(ctx) + nodes, err := gfmq.Limit(1).All(setContextOp(ctx, gfmq.ctx, "First")) if err != nil { return nil, err } @@ -138,7 +136,7 @@ func (gfmq *GinFileMiddlewareQuery) FirstX(ctx context.Context) *GinFileMiddlewa // Returns a *NotFoundError when no GinFileMiddleware ID was found. func (gfmq *GinFileMiddlewareQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = gfmq.Limit(1).IDs(ctx); err != nil { + if ids, err = gfmq.Limit(1).IDs(setContextOp(ctx, gfmq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -161,7 +159,7 @@ func (gfmq *GinFileMiddlewareQuery) FirstIDX(ctx context.Context) uuid.UUID { // Returns a *NotSingularError when more than one GinFileMiddleware entity is found. // Returns a *NotFoundError when no GinFileMiddleware entities are found. func (gfmq *GinFileMiddlewareQuery) Only(ctx context.Context) (*GinFileMiddleware, error) { - nodes, err := gfmq.Limit(2).All(ctx) + nodes, err := gfmq.Limit(2).All(setContextOp(ctx, gfmq.ctx, "Only")) if err != nil { return nil, err } @@ -189,7 +187,7 @@ func (gfmq *GinFileMiddlewareQuery) OnlyX(ctx context.Context) *GinFileMiddlewar // Returns a *NotFoundError when no entities are found. func (gfmq *GinFileMiddlewareQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = gfmq.Limit(2).IDs(ctx); err != nil { + if ids, err = gfmq.Limit(2).IDs(setContextOp(ctx, gfmq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -214,10 +212,12 @@ func (gfmq *GinFileMiddlewareQuery) OnlyIDX(ctx context.Context) uuid.UUID { // All executes the query and returns a list of GinFileMiddlewares. func (gfmq *GinFileMiddlewareQuery) All(ctx context.Context) ([]*GinFileMiddleware, error) { + ctx = setContextOp(ctx, gfmq.ctx, "All") if err := gfmq.prepareQuery(ctx); err != nil { return nil, err } - return gfmq.sqlAll(ctx) + qr := querierAll[[]*GinFileMiddleware, *GinFileMiddlewareQuery]() + return withInterceptors[[]*GinFileMiddleware](ctx, gfmq, qr, gfmq.inters) } // AllX is like All, but panics if an error occurs. @@ -230,9 +230,12 @@ func (gfmq *GinFileMiddlewareQuery) AllX(ctx context.Context) []*GinFileMiddlewa } // IDs executes the query and returns a list of GinFileMiddleware IDs. -func (gfmq *GinFileMiddlewareQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { - var ids []uuid.UUID - if err := gfmq.Select(ginfilemiddleware.FieldID).Scan(ctx, &ids); err != nil { +func (gfmq *GinFileMiddlewareQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if gfmq.ctx.Unique == nil && gfmq.path != nil { + gfmq.Unique(true) + } + ctx = setContextOp(ctx, gfmq.ctx, "IDs") + if err = gfmq.Select(ginfilemiddleware.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -249,10 +252,11 @@ func (gfmq *GinFileMiddlewareQuery) IDsX(ctx context.Context) []uuid.UUID { // Count returns the count of the given query. func (gfmq *GinFileMiddlewareQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, gfmq.ctx, "Count") if err := gfmq.prepareQuery(ctx); err != nil { return 0, err } - return gfmq.sqlCount(ctx) + return withInterceptors[int](ctx, gfmq, querierCount[*GinFileMiddlewareQuery](), gfmq.inters) } // CountX is like Count, but panics if an error occurs. @@ -266,10 +270,15 @@ func (gfmq *GinFileMiddlewareQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (gfmq *GinFileMiddlewareQuery) Exist(ctx context.Context) (bool, error) { - if err := gfmq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, gfmq.ctx, "Exist") + switch _, err := gfmq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return gfmq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -289,23 +298,22 @@ func (gfmq *GinFileMiddlewareQuery) Clone() *GinFileMiddlewareQuery { } return &GinFileMiddlewareQuery{ config: gfmq.config, - limit: gfmq.limit, - offset: gfmq.offset, - order: append([]OrderFunc{}, gfmq.order...), + ctx: gfmq.ctx.Clone(), + order: append([]ginfilemiddleware.OrderOption{}, gfmq.order...), + inters: append([]Interceptor{}, gfmq.inters...), predicates: append([]predicate.GinFileMiddleware{}, gfmq.predicates...), withGinFileMiddlewareToProvisionedHost: gfmq.withGinFileMiddlewareToProvisionedHost.Clone(), withGinFileMiddlewareToProvisioningStep: gfmq.withGinFileMiddlewareToProvisioningStep.Clone(), // clone intermediate query. - sql: gfmq.sql.Clone(), - path: gfmq.path, - unique: gfmq.unique, + sql: gfmq.sql.Clone(), + path: gfmq.path, } } // WithGinFileMiddlewareToProvisionedHost tells the query-builder to eager-load the nodes that are connected to // the "GinFileMiddlewareToProvisionedHost" edge. The optional arguments are used to configure the query builder of the edge. func (gfmq *GinFileMiddlewareQuery) WithGinFileMiddlewareToProvisionedHost(opts ...func(*ProvisionedHostQuery)) *GinFileMiddlewareQuery { - query := &ProvisionedHostQuery{config: gfmq.config} + query := (&ProvisionedHostClient{config: gfmq.config}).Query() for _, opt := range opts { opt(query) } @@ -316,7 +324,7 @@ func (gfmq *GinFileMiddlewareQuery) WithGinFileMiddlewareToProvisionedHost(opts // WithGinFileMiddlewareToProvisioningStep tells the query-builder to eager-load the nodes that are connected to // the "GinFileMiddlewareToProvisioningStep" edge. The optional arguments are used to configure the query builder of the edge. func (gfmq *GinFileMiddlewareQuery) WithGinFileMiddlewareToProvisioningStep(opts ...func(*ProvisioningStepQuery)) *GinFileMiddlewareQuery { - query := &ProvisioningStepQuery{config: gfmq.config} + query := (&ProvisioningStepClient{config: gfmq.config}).Query() for _, opt := range opts { opt(query) } @@ -338,17 +346,13 @@ func (gfmq *GinFileMiddlewareQuery) WithGinFileMiddlewareToProvisioningStep(opts // GroupBy(ginfilemiddleware.FieldURLID). // Aggregate(ent.Count()). // Scan(ctx, &v) -// func (gfmq *GinFileMiddlewareQuery) GroupBy(field string, fields ...string) *GinFileMiddlewareGroupBy { - group := &GinFileMiddlewareGroupBy{config: gfmq.config} - group.fields = append([]string{field}, fields...) - group.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := gfmq.prepareQuery(ctx); err != nil { - return nil, err - } - return gfmq.sqlQuery(ctx), nil - } - return group + gfmq.ctx.Fields = append([]string{field}, fields...) + grbuild := &GinFileMiddlewareGroupBy{build: gfmq} + grbuild.flds = &gfmq.ctx.Fields + grbuild.label = ginfilemiddleware.Label + grbuild.scan = grbuild.Scan + return grbuild } // Select allows the selection one or more fields/columns for the given query, @@ -363,14 +367,31 @@ func (gfmq *GinFileMiddlewareQuery) GroupBy(field string, fields ...string) *Gin // client.GinFileMiddleware.Query(). // Select(ginfilemiddleware.FieldURLID). // Scan(ctx, &v) -// func (gfmq *GinFileMiddlewareQuery) Select(fields ...string) *GinFileMiddlewareSelect { - gfmq.fields = append(gfmq.fields, fields...) - return &GinFileMiddlewareSelect{GinFileMiddlewareQuery: gfmq} + gfmq.ctx.Fields = append(gfmq.ctx.Fields, fields...) + sbuild := &GinFileMiddlewareSelect{GinFileMiddlewareQuery: gfmq} + sbuild.label = ginfilemiddleware.Label + sbuild.flds, sbuild.scan = &gfmq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a GinFileMiddlewareSelect configured with the given aggregations. +func (gfmq *GinFileMiddlewareQuery) Aggregate(fns ...AggregateFunc) *GinFileMiddlewareSelect { + return gfmq.Select().Aggregate(fns...) } func (gfmq *GinFileMiddlewareQuery) prepareQuery(ctx context.Context) error { - for _, f := range gfmq.fields { + for _, inter := range gfmq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, gfmq); err != nil { + return err + } + } + } + for _, f := range gfmq.ctx.Fields { if !ginfilemiddleware.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -385,7 +406,7 @@ func (gfmq *GinFileMiddlewareQuery) prepareQuery(ctx context.Context) error { return nil } -func (gfmq *GinFileMiddlewareQuery) sqlAll(ctx context.Context) ([]*GinFileMiddleware, error) { +func (gfmq *GinFileMiddlewareQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*GinFileMiddleware, error) { var ( nodes = []*GinFileMiddleware{} withFKs = gfmq.withFKs @@ -398,119 +419,125 @@ func (gfmq *GinFileMiddlewareQuery) sqlAll(ctx context.Context) ([]*GinFileMiddl if withFKs { _spec.Node.Columns = append(_spec.Node.Columns, ginfilemiddleware.ForeignKeys...) } - _spec.ScanValues = func(columns []string) ([]interface{}, error) { + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*GinFileMiddleware).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { node := &GinFileMiddleware{config: gfmq.config} nodes = append(nodes, node) - return node.scanValues(columns) - } - _spec.Assign = func(columns []string, values []interface{}) error { - if len(nodes) == 0 { - return fmt.Errorf("ent: Assign called without calling ScanValues") - } - node := nodes[len(nodes)-1] node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(gfmq.modifiers) > 0 { + _spec.Modifiers = gfmq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } if err := sqlgraph.QueryNodes(ctx, gfmq.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := gfmq.withGinFileMiddlewareToProvisionedHost; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*GinFileMiddleware) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] + if err := gfmq.loadGinFileMiddlewareToProvisionedHost(ctx, query, nodes, nil, + func(n *GinFileMiddleware, e *ProvisionedHost) { n.Edges.GinFileMiddlewareToProvisionedHost = e }); err != nil { + return nil, err } - query.withFKs = true - query.Where(predicate.ProvisionedHost(func(s *sql.Selector) { - s.Where(sql.InValues(ginfilemiddleware.GinFileMiddlewareToProvisionedHostColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { + } + if query := gfmq.withGinFileMiddlewareToProvisioningStep; query != nil { + if err := gfmq.loadGinFileMiddlewareToProvisioningStep(ctx, query, nodes, nil, + func(n *GinFileMiddleware, e *ProvisioningStep) { n.Edges.GinFileMiddlewareToProvisioningStep = e }); err != nil { return nil, err } - for _, n := range neighbors { - fk := n.gin_file_middleware_gin_file_middleware_to_provisioned_host - if fk == nil { - return nil, fmt.Errorf(`foreign-key "gin_file_middleware_gin_file_middleware_to_provisioned_host" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "gin_file_middleware_gin_file_middleware_to_provisioned_host" returned %v for node %v`, *fk, n.ID) - } - node.Edges.GinFileMiddlewareToProvisionedHost = n + } + for i := range gfmq.loadTotal { + if err := gfmq.loadTotal[i](ctx, nodes); err != nil { + return nil, err } } + return nodes, nil +} - if query := gfmq.withGinFileMiddlewareToProvisioningStep; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*GinFileMiddleware) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] +func (gfmq *GinFileMiddlewareQuery) loadGinFileMiddlewareToProvisionedHost(ctx context.Context, query *ProvisionedHostQuery, nodes []*GinFileMiddleware, init func(*GinFileMiddleware), assign func(*GinFileMiddleware, *ProvisionedHost)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*GinFileMiddleware) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + } + query.withFKs = true + query.Where(predicate.ProvisionedHost(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(ginfilemiddleware.GinFileMiddlewareToProvisionedHostColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.gin_file_middleware_gin_file_middleware_to_provisioned_host + if fk == nil { + return fmt.Errorf(`foreign-key "gin_file_middleware_gin_file_middleware_to_provisioned_host" is nil for node %v`, n.ID) } - query.withFKs = true - query.Where(predicate.ProvisioningStep(func(s *sql.Selector) { - s.Where(sql.InValues(ginfilemiddleware.GinFileMiddlewareToProvisioningStepColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "gin_file_middleware_gin_file_middleware_to_provisioned_host" returned %v for node %v`, *fk, n.ID) } - for _, n := range neighbors { - fk := n.gin_file_middleware_gin_file_middleware_to_provisioning_step - if fk == nil { - return nil, fmt.Errorf(`foreign-key "gin_file_middleware_gin_file_middleware_to_provisioning_step" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "gin_file_middleware_gin_file_middleware_to_provisioning_step" returned %v for node %v`, *fk, n.ID) - } - node.Edges.GinFileMiddlewareToProvisioningStep = n + assign(node, n) + } + return nil +} +func (gfmq *GinFileMiddlewareQuery) loadGinFileMiddlewareToProvisioningStep(ctx context.Context, query *ProvisioningStepQuery, nodes []*GinFileMiddleware, init func(*GinFileMiddleware), assign func(*GinFileMiddleware, *ProvisioningStep)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*GinFileMiddleware) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + } + query.withFKs = true + query.Where(predicate.ProvisioningStep(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(ginfilemiddleware.GinFileMiddlewareToProvisioningStepColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.gin_file_middleware_gin_file_middleware_to_provisioning_step + if fk == nil { + return fmt.Errorf(`foreign-key "gin_file_middleware_gin_file_middleware_to_provisioning_step" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "gin_file_middleware_gin_file_middleware_to_provisioning_step" returned %v for node %v`, *fk, n.ID) } + assign(node, n) } - - return nodes, nil + return nil } func (gfmq *GinFileMiddlewareQuery) sqlCount(ctx context.Context) (int, error) { _spec := gfmq.querySpec() - _spec.Node.Columns = gfmq.fields - if len(gfmq.fields) > 0 { - _spec.Unique = gfmq.unique != nil && *gfmq.unique + if len(gfmq.modifiers) > 0 { + _spec.Modifiers = gfmq.modifiers } - return sqlgraph.CountNodes(ctx, gfmq.driver, _spec) -} - -func (gfmq *GinFileMiddlewareQuery) sqlExist(ctx context.Context) (bool, error) { - n, err := gfmq.sqlCount(ctx) - if err != nil { - return false, fmt.Errorf("ent: check existence: %w", err) + _spec.Node.Columns = gfmq.ctx.Fields + if len(gfmq.ctx.Fields) > 0 { + _spec.Unique = gfmq.ctx.Unique != nil && *gfmq.ctx.Unique } - return n > 0, nil + return sqlgraph.CountNodes(ctx, gfmq.driver, _spec) } func (gfmq *GinFileMiddlewareQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: ginfilemiddleware.Table, - Columns: ginfilemiddleware.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: ginfilemiddleware.FieldID, - }, - }, - From: gfmq.sql, - Unique: true, - } - if unique := gfmq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(ginfilemiddleware.Table, ginfilemiddleware.Columns, sqlgraph.NewFieldSpec(ginfilemiddleware.FieldID, field.TypeUUID)) + _spec.From = gfmq.sql + if unique := gfmq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if gfmq.path != nil { + _spec.Unique = true } - if fields := gfmq.fields; len(fields) > 0 { + if fields := gfmq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, ginfilemiddleware.FieldID) for i := range fields { @@ -526,10 +553,10 @@ func (gfmq *GinFileMiddlewareQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := gfmq.limit; limit != nil { + if limit := gfmq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := gfmq.offset; offset != nil { + if offset := gfmq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := gfmq.order; len(ps) > 0 { @@ -545,7 +572,7 @@ func (gfmq *GinFileMiddlewareQuery) querySpec() *sqlgraph.QuerySpec { func (gfmq *GinFileMiddlewareQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(gfmq.driver.Dialect()) t1 := builder.Table(ginfilemiddleware.Table) - columns := gfmq.fields + columns := gfmq.ctx.Fields if len(columns) == 0 { columns = ginfilemiddleware.Columns } @@ -554,7 +581,7 @@ func (gfmq *GinFileMiddlewareQuery) sqlQuery(ctx context.Context) *sql.Selector selector = gfmq.sql selector.Select(selector.Columns(columns...)...) } - if gfmq.unique != nil && *gfmq.unique { + if gfmq.ctx.Unique != nil && *gfmq.ctx.Unique { selector.Distinct() } for _, p := range gfmq.predicates { @@ -563,12 +590,12 @@ func (gfmq *GinFileMiddlewareQuery) sqlQuery(ctx context.Context) *sql.Selector for _, p := range gfmq.order { p(selector) } - if offset := gfmq.offset; offset != nil { + if offset := gfmq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := gfmq.limit; limit != nil { + if limit := gfmq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -576,12 +603,8 @@ func (gfmq *GinFileMiddlewareQuery) sqlQuery(ctx context.Context) *sql.Selector // GinFileMiddlewareGroupBy is the group-by builder for GinFileMiddleware entities. type GinFileMiddlewareGroupBy struct { - config - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) + selector + build *GinFileMiddlewareQuery } // Aggregate adds the given aggregation functions to the group-by query. @@ -590,471 +613,77 @@ func (gfmgb *GinFileMiddlewareGroupBy) Aggregate(fns ...AggregateFunc) *GinFileM return gfmgb } -// Scan applies the group-by query and scans the result into the given value. -func (gfmgb *GinFileMiddlewareGroupBy) Scan(ctx context.Context, v interface{}) error { - query, err := gfmgb.path(ctx) - if err != nil { +// Scan applies the selector query and scans the result into the given value. +func (gfmgb *GinFileMiddlewareGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, gfmgb.build.ctx, "GroupBy") + if err := gfmgb.build.prepareQuery(ctx); err != nil { return err } - gfmgb.sql = query - return gfmgb.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (gfmgb *GinFileMiddlewareGroupBy) ScanX(ctx context.Context, v interface{}) { - if err := gfmgb.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from group-by. -// It is only allowed when executing a group-by query with one field. -func (gfmgb *GinFileMiddlewareGroupBy) Strings(ctx context.Context) ([]string, error) { - if len(gfmgb.fields) > 1 { - return nil, errors.New("ent: GinFileMiddlewareGroupBy.Strings is not achievable when grouping more than 1 field") - } - var v []string - if err := gfmgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (gfmgb *GinFileMiddlewareGroupBy) StringsX(ctx context.Context) []string { - v, err := gfmgb.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (gfmgb *GinFileMiddlewareGroupBy) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = gfmgb.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{ginfilemiddleware.Label} - default: - err = fmt.Errorf("ent: GinFileMiddlewareGroupBy.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (gfmgb *GinFileMiddlewareGroupBy) StringX(ctx context.Context) string { - v, err := gfmgb.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from group-by. -// It is only allowed when executing a group-by query with one field. -func (gfmgb *GinFileMiddlewareGroupBy) Ints(ctx context.Context) ([]int, error) { - if len(gfmgb.fields) > 1 { - return nil, errors.New("ent: GinFileMiddlewareGroupBy.Ints is not achievable when grouping more than 1 field") - } - var v []int - if err := gfmgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (gfmgb *GinFileMiddlewareGroupBy) IntsX(ctx context.Context) []int { - v, err := gfmgb.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (gfmgb *GinFileMiddlewareGroupBy) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = gfmgb.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{ginfilemiddleware.Label} - default: - err = fmt.Errorf("ent: GinFileMiddlewareGroupBy.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (gfmgb *GinFileMiddlewareGroupBy) IntX(ctx context.Context) int { - v, err := gfmgb.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from group-by. -// It is only allowed when executing a group-by query with one field. -func (gfmgb *GinFileMiddlewareGroupBy) Float64s(ctx context.Context) ([]float64, error) { - if len(gfmgb.fields) > 1 { - return nil, errors.New("ent: GinFileMiddlewareGroupBy.Float64s is not achievable when grouping more than 1 field") - } - var v []float64 - if err := gfmgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (gfmgb *GinFileMiddlewareGroupBy) Float64sX(ctx context.Context) []float64 { - v, err := gfmgb.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (gfmgb *GinFileMiddlewareGroupBy) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = gfmgb.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{ginfilemiddleware.Label} - default: - err = fmt.Errorf("ent: GinFileMiddlewareGroupBy.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (gfmgb *GinFileMiddlewareGroupBy) Float64X(ctx context.Context) float64 { - v, err := gfmgb.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from group-by. -// It is only allowed when executing a group-by query with one field. -func (gfmgb *GinFileMiddlewareGroupBy) Bools(ctx context.Context) ([]bool, error) { - if len(gfmgb.fields) > 1 { - return nil, errors.New("ent: GinFileMiddlewareGroupBy.Bools is not achievable when grouping more than 1 field") - } - var v []bool - if err := gfmgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (gfmgb *GinFileMiddlewareGroupBy) BoolsX(ctx context.Context) []bool { - v, err := gfmgb.Bools(ctx) - if err != nil { - panic(err) - } - return v + return scanWithInterceptors[*GinFileMiddlewareQuery, *GinFileMiddlewareGroupBy](ctx, gfmgb.build, gfmgb, gfmgb.build.inters, v) } -// Bool returns a single bool from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (gfmgb *GinFileMiddlewareGroupBy) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = gfmgb.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{ginfilemiddleware.Label} - default: - err = fmt.Errorf("ent: GinFileMiddlewareGroupBy.Bools returned %d results when one was expected", len(v)) - } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (gfmgb *GinFileMiddlewareGroupBy) BoolX(ctx context.Context) bool { - v, err := gfmgb.Bool(ctx) - if err != nil { - panic(err) +func (gfmgb *GinFileMiddlewareGroupBy) sqlScan(ctx context.Context, root *GinFileMiddlewareQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(gfmgb.fns)) + for _, fn := range gfmgb.fns { + aggregation = append(aggregation, fn(selector)) } - return v -} - -func (gfmgb *GinFileMiddlewareGroupBy) sqlScan(ctx context.Context, v interface{}) error { - for _, f := range gfmgb.fields { - if !ginfilemiddleware.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*gfmgb.flds)+len(gfmgb.fns)) + for _, f := range *gfmgb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := gfmgb.sqlQuery() + selector.GroupBy(selector.Columns(*gfmgb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := gfmgb.driver.Query(ctx, query, args, rows); err != nil { + if err := gfmgb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (gfmgb *GinFileMiddlewareGroupBy) sqlQuery() *sql.Selector { - selector := gfmgb.sql.Select() - aggregation := make([]string, 0, len(gfmgb.fns)) - for _, fn := range gfmgb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(gfmgb.fields)+len(gfmgb.fns)) - for _, f := range gfmgb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(gfmgb.fields...)...) -} - // GinFileMiddlewareSelect is the builder for selecting fields of GinFileMiddleware entities. type GinFileMiddlewareSelect struct { *GinFileMiddlewareQuery - // intermediate query (i.e. traversal path). - sql *sql.Selector + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (gfms *GinFileMiddlewareSelect) Aggregate(fns ...AggregateFunc) *GinFileMiddlewareSelect { + gfms.fns = append(gfms.fns, fns...) + return gfms } // Scan applies the selector query and scans the result into the given value. -func (gfms *GinFileMiddlewareSelect) Scan(ctx context.Context, v interface{}) error { +func (gfms *GinFileMiddlewareSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, gfms.ctx, "Select") if err := gfms.prepareQuery(ctx); err != nil { return err } - gfms.sql = gfms.GinFileMiddlewareQuery.sqlQuery(ctx) - return gfms.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (gfms *GinFileMiddlewareSelect) ScanX(ctx context.Context, v interface{}) { - if err := gfms.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from a selector. It is only allowed when selecting one field. -func (gfms *GinFileMiddlewareSelect) Strings(ctx context.Context) ([]string, error) { - if len(gfms.fields) > 1 { - return nil, errors.New("ent: GinFileMiddlewareSelect.Strings is not achievable when selecting more than 1 field") - } - var v []string - if err := gfms.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (gfms *GinFileMiddlewareSelect) StringsX(ctx context.Context) []string { - v, err := gfms.Strings(ctx) - if err != nil { - panic(err) - } - return v + return scanWithInterceptors[*GinFileMiddlewareQuery, *GinFileMiddlewareSelect](ctx, gfms.GinFileMiddlewareQuery, gfms, gfms.inters, v) } -// String returns a single string from a selector. It is only allowed when selecting one field. -func (gfms *GinFileMiddlewareSelect) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = gfms.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{ginfilemiddleware.Label} - default: - err = fmt.Errorf("ent: GinFileMiddlewareSelect.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (gfms *GinFileMiddlewareSelect) StringX(ctx context.Context) string { - v, err := gfms.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from a selector. It is only allowed when selecting one field. -func (gfms *GinFileMiddlewareSelect) Ints(ctx context.Context) ([]int, error) { - if len(gfms.fields) > 1 { - return nil, errors.New("ent: GinFileMiddlewareSelect.Ints is not achievable when selecting more than 1 field") - } - var v []int - if err := gfms.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (gfms *GinFileMiddlewareSelect) IntsX(ctx context.Context) []int { - v, err := gfms.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a selector. It is only allowed when selecting one field. -func (gfms *GinFileMiddlewareSelect) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = gfms.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{ginfilemiddleware.Label} - default: - err = fmt.Errorf("ent: GinFileMiddlewareSelect.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (gfms *GinFileMiddlewareSelect) IntX(ctx context.Context) int { - v, err := gfms.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. -func (gfms *GinFileMiddlewareSelect) Float64s(ctx context.Context) ([]float64, error) { - if len(gfms.fields) > 1 { - return nil, errors.New("ent: GinFileMiddlewareSelect.Float64s is not achievable when selecting more than 1 field") - } - var v []float64 - if err := gfms.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (gfms *GinFileMiddlewareSelect) Float64sX(ctx context.Context) []float64 { - v, err := gfms.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. -func (gfms *GinFileMiddlewareSelect) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = gfms.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{ginfilemiddleware.Label} - default: - err = fmt.Errorf("ent: GinFileMiddlewareSelect.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (gfms *GinFileMiddlewareSelect) Float64X(ctx context.Context) float64 { - v, err := gfms.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from a selector. It is only allowed when selecting one field. -func (gfms *GinFileMiddlewareSelect) Bools(ctx context.Context) ([]bool, error) { - if len(gfms.fields) > 1 { - return nil, errors.New("ent: GinFileMiddlewareSelect.Bools is not achievable when selecting more than 1 field") - } - var v []bool - if err := gfms.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (gfms *GinFileMiddlewareSelect) BoolsX(ctx context.Context) []bool { - v, err := gfms.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a selector. It is only allowed when selecting one field. -func (gfms *GinFileMiddlewareSelect) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = gfms.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{ginfilemiddleware.Label} - default: - err = fmt.Errorf("ent: GinFileMiddlewareSelect.Bools returned %d results when one was expected", len(v)) +func (gfms *GinFileMiddlewareSelect) sqlScan(ctx context.Context, root *GinFileMiddlewareQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(gfms.fns)) + for _, fn := range gfms.fns { + aggregation = append(aggregation, fn(selector)) } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (gfms *GinFileMiddlewareSelect) BoolX(ctx context.Context) bool { - v, err := gfms.Bool(ctx) - if err != nil { - panic(err) + switch n := len(*gfms.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) } - return v -} - -func (gfms *GinFileMiddlewareSelect) sqlScan(ctx context.Context, v interface{}) error { rows := &sql.Rows{} - query, args := gfms.sql.Query() + query, args := selector.Query() if err := gfms.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/ent/ginfilemiddleware_update.go b/ent/ginfilemiddleware_update.go index 0ca040a3..2290fb49 100755 --- a/ent/ginfilemiddleware_update.go +++ b/ent/ginfilemiddleware_update.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -36,12 +36,28 @@ func (gfmu *GinFileMiddlewareUpdate) SetURLID(s string) *GinFileMiddlewareUpdate return gfmu } +// SetNillableURLID sets the "url_id" field if the given value is not nil. +func (gfmu *GinFileMiddlewareUpdate) SetNillableURLID(s *string) *GinFileMiddlewareUpdate { + if s != nil { + gfmu.SetURLID(*s) + } + return gfmu +} + // SetFilePath sets the "file_path" field. func (gfmu *GinFileMiddlewareUpdate) SetFilePath(s string) *GinFileMiddlewareUpdate { gfmu.mutation.SetFilePath(s) return gfmu } +// SetNillableFilePath sets the "file_path" field if the given value is not nil. +func (gfmu *GinFileMiddlewareUpdate) SetNillableFilePath(s *string) *GinFileMiddlewareUpdate { + if s != nil { + gfmu.SetFilePath(*s) + } + return gfmu +} + // SetAccessed sets the "accessed" field. func (gfmu *GinFileMiddlewareUpdate) SetAccessed(b bool) *GinFileMiddlewareUpdate { gfmu.mutation.SetAccessed(b) @@ -113,34 +129,7 @@ func (gfmu *GinFileMiddlewareUpdate) ClearGinFileMiddlewareToProvisioningStep() // Save executes the query and returns the number of nodes affected by the update operation. func (gfmu *GinFileMiddlewareUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(gfmu.hooks) == 0 { - affected, err = gfmu.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*GinFileMiddlewareMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - gfmu.mutation = mutation - affected, err = gfmu.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(gfmu.hooks) - 1; i >= 0; i-- { - if gfmu.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = gfmu.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, gfmu.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, gfmu.sqlSave, gfmu.mutation, gfmu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -166,16 +155,7 @@ func (gfmu *GinFileMiddlewareUpdate) ExecX(ctx context.Context) { } func (gfmu *GinFileMiddlewareUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: ginfilemiddleware.Table, - Columns: ginfilemiddleware.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: ginfilemiddleware.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(ginfilemiddleware.Table, ginfilemiddleware.Columns, sqlgraph.NewFieldSpec(ginfilemiddleware.FieldID, field.TypeUUID)) if ps := gfmu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -184,25 +164,13 @@ func (gfmu *GinFileMiddlewareUpdate) sqlSave(ctx context.Context) (n int, err er } } if value, ok := gfmu.mutation.URLID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: ginfilemiddleware.FieldURLID, - }) + _spec.SetField(ginfilemiddleware.FieldURLID, field.TypeString, value) } if value, ok := gfmu.mutation.FilePath(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: ginfilemiddleware.FieldFilePath, - }) + _spec.SetField(ginfilemiddleware.FieldFilePath, field.TypeString, value) } if value, ok := gfmu.mutation.Accessed(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: ginfilemiddleware.FieldAccessed, - }) + _spec.SetField(ginfilemiddleware.FieldAccessed, field.TypeBool, value) } if gfmu.mutation.GinFileMiddlewareToProvisionedHostCleared() { edge := &sqlgraph.EdgeSpec{ @@ -212,10 +180,7 @@ func (gfmu *GinFileMiddlewareUpdate) sqlSave(ctx context.Context) (n int, err er Columns: []string{ginfilemiddleware.GinFileMiddlewareToProvisionedHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionedhost.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionedhost.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -228,10 +193,7 @@ func (gfmu *GinFileMiddlewareUpdate) sqlSave(ctx context.Context) (n int, err er Columns: []string{ginfilemiddleware.GinFileMiddlewareToProvisionedHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionedhost.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionedhost.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -247,10 +209,7 @@ func (gfmu *GinFileMiddlewareUpdate) sqlSave(ctx context.Context) (n int, err er Columns: []string{ginfilemiddleware.GinFileMiddlewareToProvisioningStepColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisioningstep.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisioningstep.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -263,10 +222,7 @@ func (gfmu *GinFileMiddlewareUpdate) sqlSave(ctx context.Context) (n int, err er Columns: []string{ginfilemiddleware.GinFileMiddlewareToProvisioningStepColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisioningstep.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisioningstep.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -278,10 +234,11 @@ func (gfmu *GinFileMiddlewareUpdate) sqlSave(ctx context.Context) (n int, err er if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{ginfilemiddleware.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return 0, err } + gfmu.mutation.done = true return n, nil } @@ -299,12 +256,28 @@ func (gfmuo *GinFileMiddlewareUpdateOne) SetURLID(s string) *GinFileMiddlewareUp return gfmuo } +// SetNillableURLID sets the "url_id" field if the given value is not nil. +func (gfmuo *GinFileMiddlewareUpdateOne) SetNillableURLID(s *string) *GinFileMiddlewareUpdateOne { + if s != nil { + gfmuo.SetURLID(*s) + } + return gfmuo +} + // SetFilePath sets the "file_path" field. func (gfmuo *GinFileMiddlewareUpdateOne) SetFilePath(s string) *GinFileMiddlewareUpdateOne { gfmuo.mutation.SetFilePath(s) return gfmuo } +// SetNillableFilePath sets the "file_path" field if the given value is not nil. +func (gfmuo *GinFileMiddlewareUpdateOne) SetNillableFilePath(s *string) *GinFileMiddlewareUpdateOne { + if s != nil { + gfmuo.SetFilePath(*s) + } + return gfmuo +} + // SetAccessed sets the "accessed" field. func (gfmuo *GinFileMiddlewareUpdateOne) SetAccessed(b bool) *GinFileMiddlewareUpdateOne { gfmuo.mutation.SetAccessed(b) @@ -374,6 +347,12 @@ func (gfmuo *GinFileMiddlewareUpdateOne) ClearGinFileMiddlewareToProvisioningSte return gfmuo } +// Where appends a list predicates to the GinFileMiddlewareUpdate builder. +func (gfmuo *GinFileMiddlewareUpdateOne) Where(ps ...predicate.GinFileMiddleware) *GinFileMiddlewareUpdateOne { + gfmuo.mutation.Where(ps...) + return gfmuo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (gfmuo *GinFileMiddlewareUpdateOne) Select(field string, fields ...string) *GinFileMiddlewareUpdateOne { @@ -383,34 +362,7 @@ func (gfmuo *GinFileMiddlewareUpdateOne) Select(field string, fields ...string) // Save executes the query and returns the updated GinFileMiddleware entity. func (gfmuo *GinFileMiddlewareUpdateOne) Save(ctx context.Context) (*GinFileMiddleware, error) { - var ( - err error - node *GinFileMiddleware - ) - if len(gfmuo.hooks) == 0 { - node, err = gfmuo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*GinFileMiddlewareMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - gfmuo.mutation = mutation - node, err = gfmuo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(gfmuo.hooks) - 1; i >= 0; i-- { - if gfmuo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = gfmuo.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, gfmuo.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, gfmuo.sqlSave, gfmuo.mutation, gfmuo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -436,16 +388,7 @@ func (gfmuo *GinFileMiddlewareUpdateOne) ExecX(ctx context.Context) { } func (gfmuo *GinFileMiddlewareUpdateOne) sqlSave(ctx context.Context) (_node *GinFileMiddleware, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: ginfilemiddleware.Table, - Columns: ginfilemiddleware.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: ginfilemiddleware.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(ginfilemiddleware.Table, ginfilemiddleware.Columns, sqlgraph.NewFieldSpec(ginfilemiddleware.FieldID, field.TypeUUID)) id, ok := gfmuo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "GinFileMiddleware.id" for update`)} @@ -471,25 +414,13 @@ func (gfmuo *GinFileMiddlewareUpdateOne) sqlSave(ctx context.Context) (_node *Gi } } if value, ok := gfmuo.mutation.URLID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: ginfilemiddleware.FieldURLID, - }) + _spec.SetField(ginfilemiddleware.FieldURLID, field.TypeString, value) } if value, ok := gfmuo.mutation.FilePath(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: ginfilemiddleware.FieldFilePath, - }) + _spec.SetField(ginfilemiddleware.FieldFilePath, field.TypeString, value) } if value, ok := gfmuo.mutation.Accessed(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: ginfilemiddleware.FieldAccessed, - }) + _spec.SetField(ginfilemiddleware.FieldAccessed, field.TypeBool, value) } if gfmuo.mutation.GinFileMiddlewareToProvisionedHostCleared() { edge := &sqlgraph.EdgeSpec{ @@ -499,10 +430,7 @@ func (gfmuo *GinFileMiddlewareUpdateOne) sqlSave(ctx context.Context) (_node *Gi Columns: []string{ginfilemiddleware.GinFileMiddlewareToProvisionedHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionedhost.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionedhost.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -515,10 +443,7 @@ func (gfmuo *GinFileMiddlewareUpdateOne) sqlSave(ctx context.Context) (_node *Gi Columns: []string{ginfilemiddleware.GinFileMiddlewareToProvisionedHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionedhost.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionedhost.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -534,10 +459,7 @@ func (gfmuo *GinFileMiddlewareUpdateOne) sqlSave(ctx context.Context) (_node *Gi Columns: []string{ginfilemiddleware.GinFileMiddlewareToProvisioningStepColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisioningstep.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisioningstep.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -550,10 +472,7 @@ func (gfmuo *GinFileMiddlewareUpdateOne) sqlSave(ctx context.Context) (_node *Gi Columns: []string{ginfilemiddleware.GinFileMiddlewareToProvisioningStepColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisioningstep.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisioningstep.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -568,9 +487,10 @@ func (gfmuo *GinFileMiddlewareUpdateOne) sqlSave(ctx context.Context) (_node *Gi if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{ginfilemiddleware.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } + gfmuo.mutation.done = true return _node, nil } diff --git a/ent/gql_collection.go b/ent/gql_collection.go index 4fd00efb..c375675e 100644 --- a/ent/gql_collection.go +++ b/ent/gql_collection.go @@ -1,453 +1,4996 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" + "entgo.io/ent/dialect/sql" "github.com/99designs/gqlgen/graphql" + "github.com/gen0cide/laforge/ent/agentstatus" + "github.com/gen0cide/laforge/ent/agenttask" + "github.com/gen0cide/laforge/ent/ansible" + "github.com/gen0cide/laforge/ent/authuser" + "github.com/gen0cide/laforge/ent/build" + "github.com/gen0cide/laforge/ent/buildcommit" + "github.com/gen0cide/laforge/ent/command" + "github.com/gen0cide/laforge/ent/competition" + "github.com/gen0cide/laforge/ent/disk" + "github.com/gen0cide/laforge/ent/dns" + "github.com/gen0cide/laforge/ent/dnsrecord" + "github.com/gen0cide/laforge/ent/environment" + "github.com/gen0cide/laforge/ent/filedelete" + "github.com/gen0cide/laforge/ent/filedownload" + "github.com/gen0cide/laforge/ent/fileextract" + "github.com/gen0cide/laforge/ent/finding" + "github.com/gen0cide/laforge/ent/ginfilemiddleware" + "github.com/gen0cide/laforge/ent/host" + "github.com/gen0cide/laforge/ent/hostdependency" + "github.com/gen0cide/laforge/ent/identity" + "github.com/gen0cide/laforge/ent/includednetwork" + "github.com/gen0cide/laforge/ent/network" + "github.com/gen0cide/laforge/ent/plan" + "github.com/gen0cide/laforge/ent/plandiff" + "github.com/gen0cide/laforge/ent/provisionedhost" + "github.com/gen0cide/laforge/ent/provisionednetwork" + "github.com/gen0cide/laforge/ent/provisioningstep" + "github.com/gen0cide/laforge/ent/repocommit" + "github.com/gen0cide/laforge/ent/repository" + "github.com/gen0cide/laforge/ent/script" + "github.com/gen0cide/laforge/ent/servertask" + "github.com/gen0cide/laforge/ent/status" + "github.com/gen0cide/laforge/ent/tag" + "github.com/gen0cide/laforge/ent/team" + "github.com/gen0cide/laforge/ent/token" + "github.com/gen0cide/laforge/ent/user" ) // CollectFields tells the query-builder to eagerly load connected nodes by resolver context. -func (ap *AdhocPlanQuery) CollectFields(ctx context.Context, satisfies ...string) *AdhocPlanQuery { - if fc := graphql.GetFieldContext(ctx); fc != nil { - ap = ap.collectField(graphql.GetOperationContext(ctx), fc.Field, satisfies...) +func (ap *AdhocPlanQuery) CollectFields(ctx context.Context, satisfies ...string) (*AdhocPlanQuery, error) { + fc := graphql.GetFieldContext(ctx) + if fc == nil { + return ap, nil } - return ap + if err := ap.collectField(ctx, graphql.GetOperationContext(ctx), fc.Field, nil, satisfies...); err != nil { + return nil, err + } + return ap, nil +} + +func (ap *AdhocPlanQuery) collectField(ctx context.Context, opCtx *graphql.OperationContext, collected graphql.CollectedField, path []string, satisfies ...string) error { + path = append([]string(nil), path...) + for _, field := range graphql.CollectFields(opCtx, collected.Selections, satisfies) { + switch field.Name { + case "prevadhocplan": + var ( + alias = field.Alias + path = append(path, alias) + query = (&AdhocPlanClient{config: ap.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + ap.WithNamedPrevAdhocPlan(alias, func(wq *AdhocPlanQuery) { + *wq = *query + }) + case "nextadhocplan": + var ( + alias = field.Alias + path = append(path, alias) + query = (&AdhocPlanClient{config: ap.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + ap.WithNamedNextAdhocPlan(alias, func(wq *AdhocPlanQuery) { + *wq = *query + }) + case "adhocplantobuild": + var ( + alias = field.Alias + path = append(path, alias) + query = (&BuildClient{config: ap.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + ap.withAdhocPlanToBuild = query + case "adhocplantostatus": + var ( + alias = field.Alias + path = append(path, alias) + query = (&StatusClient{config: ap.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + ap.withAdhocPlanToStatus = query + case "adhocplantoagenttask": + var ( + alias = field.Alias + path = append(path, alias) + query = (&AgentTaskClient{config: ap.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + ap.withAdhocPlanToAgentTask = query + } + } + return nil } -func (ap *AdhocPlanQuery) collectField(ctx *graphql.OperationContext, field graphql.CollectedField, satisfies ...string) *AdhocPlanQuery { - return ap +type adhocplanPaginateArgs struct { + first, last *int + after, before *Cursor + opts []AdhocPlanPaginateOption +} + +func newAdhocPlanPaginateArgs(rv map[string]any) *adhocplanPaginateArgs { + args := &adhocplanPaginateArgs{} + if rv == nil { + return args + } + if v := rv[firstField]; v != nil { + args.first = v.(*int) + } + if v := rv[lastField]; v != nil { + args.last = v.(*int) + } + if v := rv[afterField]; v != nil { + args.after = v.(*Cursor) + } + if v := rv[beforeField]; v != nil { + args.before = v.(*Cursor) + } + return args } // CollectFields tells the query-builder to eagerly load connected nodes by resolver context. -func (as *AgentStatusQuery) CollectFields(ctx context.Context, satisfies ...string) *AgentStatusQuery { - if fc := graphql.GetFieldContext(ctx); fc != nil { - as = as.collectField(graphql.GetOperationContext(ctx), fc.Field, satisfies...) +func (as *AgentStatusQuery) CollectFields(ctx context.Context, satisfies ...string) (*AgentStatusQuery, error) { + fc := graphql.GetFieldContext(ctx) + if fc == nil { + return as, nil + } + if err := as.collectField(ctx, graphql.GetOperationContext(ctx), fc.Field, nil, satisfies...); err != nil { + return nil, err } - return as + return as, nil } -func (as *AgentStatusQuery) collectField(ctx *graphql.OperationContext, field graphql.CollectedField, satisfies ...string) *AgentStatusQuery { - return as +func (as *AgentStatusQuery) collectField(ctx context.Context, opCtx *graphql.OperationContext, collected graphql.CollectedField, path []string, satisfies ...string) error { + path = append([]string(nil), path...) + var ( + unknownSeen bool + fieldSeen = make(map[string]struct{}, len(agentstatus.Columns)) + selectedFields = []string{agentstatus.FieldID} + ) + for _, field := range graphql.CollectFields(opCtx, collected.Selections, satisfies) { + switch field.Name { + case "agentstatustoprovisionedhost": + var ( + alias = field.Alias + path = append(path, alias) + query = (&ProvisionedHostClient{config: as.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + as.withAgentStatusToProvisionedHost = query + case "agentstatustoprovisionednetwork": + var ( + alias = field.Alias + path = append(path, alias) + query = (&ProvisionedNetworkClient{config: as.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + as.withAgentStatusToProvisionedNetwork = query + case "agentstatustobuild": + var ( + alias = field.Alias + path = append(path, alias) + query = (&BuildClient{config: as.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + as.withAgentStatusToBuild = query + case "clientid": + if _, ok := fieldSeen[agentstatus.FieldClientID]; !ok { + selectedFields = append(selectedFields, agentstatus.FieldClientID) + fieldSeen[agentstatus.FieldClientID] = struct{}{} + } + case "hostname": + if _, ok := fieldSeen[agentstatus.FieldHostname]; !ok { + selectedFields = append(selectedFields, agentstatus.FieldHostname) + fieldSeen[agentstatus.FieldHostname] = struct{}{} + } + case "uptime": + if _, ok := fieldSeen[agentstatus.FieldUpTime]; !ok { + selectedFields = append(selectedFields, agentstatus.FieldUpTime) + fieldSeen[agentstatus.FieldUpTime] = struct{}{} + } + case "boottime": + if _, ok := fieldSeen[agentstatus.FieldBootTime]; !ok { + selectedFields = append(selectedFields, agentstatus.FieldBootTime) + fieldSeen[agentstatus.FieldBootTime] = struct{}{} + } + case "numprocs": + if _, ok := fieldSeen[agentstatus.FieldNumProcs]; !ok { + selectedFields = append(selectedFields, agentstatus.FieldNumProcs) + fieldSeen[agentstatus.FieldNumProcs] = struct{}{} + } + case "os": + if _, ok := fieldSeen[agentstatus.FieldOs]; !ok { + selectedFields = append(selectedFields, agentstatus.FieldOs) + fieldSeen[agentstatus.FieldOs] = struct{}{} + } + case "hostid": + if _, ok := fieldSeen[agentstatus.FieldHostID]; !ok { + selectedFields = append(selectedFields, agentstatus.FieldHostID) + fieldSeen[agentstatus.FieldHostID] = struct{}{} + } + case "load1": + if _, ok := fieldSeen[agentstatus.FieldLoad1]; !ok { + selectedFields = append(selectedFields, agentstatus.FieldLoad1) + fieldSeen[agentstatus.FieldLoad1] = struct{}{} + } + case "load5": + if _, ok := fieldSeen[agentstatus.FieldLoad5]; !ok { + selectedFields = append(selectedFields, agentstatus.FieldLoad5) + fieldSeen[agentstatus.FieldLoad5] = struct{}{} + } + case "load15": + if _, ok := fieldSeen[agentstatus.FieldLoad15]; !ok { + selectedFields = append(selectedFields, agentstatus.FieldLoad15) + fieldSeen[agentstatus.FieldLoad15] = struct{}{} + } + case "totalmem": + if _, ok := fieldSeen[agentstatus.FieldTotalMem]; !ok { + selectedFields = append(selectedFields, agentstatus.FieldTotalMem) + fieldSeen[agentstatus.FieldTotalMem] = struct{}{} + } + case "freemem": + if _, ok := fieldSeen[agentstatus.FieldFreeMem]; !ok { + selectedFields = append(selectedFields, agentstatus.FieldFreeMem) + fieldSeen[agentstatus.FieldFreeMem] = struct{}{} + } + case "usedmem": + if _, ok := fieldSeen[agentstatus.FieldUsedMem]; !ok { + selectedFields = append(selectedFields, agentstatus.FieldUsedMem) + fieldSeen[agentstatus.FieldUsedMem] = struct{}{} + } + case "timestamp": + if _, ok := fieldSeen[agentstatus.FieldTimestamp]; !ok { + selectedFields = append(selectedFields, agentstatus.FieldTimestamp) + fieldSeen[agentstatus.FieldTimestamp] = struct{}{} + } + case "id": + case "__typename": + default: + unknownSeen = true + } + } + if !unknownSeen { + as.Select(selectedFields...) + } + return nil +} + +type agentstatusPaginateArgs struct { + first, last *int + after, before *Cursor + opts []AgentStatusPaginateOption +} + +func newAgentStatusPaginateArgs(rv map[string]any) *agentstatusPaginateArgs { + args := &agentstatusPaginateArgs{} + if rv == nil { + return args + } + if v := rv[firstField]; v != nil { + args.first = v.(*int) + } + if v := rv[lastField]; v != nil { + args.last = v.(*int) + } + if v := rv[afterField]; v != nil { + args.after = v.(*Cursor) + } + if v := rv[beforeField]; v != nil { + args.before = v.(*Cursor) + } + return args } // CollectFields tells the query-builder to eagerly load connected nodes by resolver context. -func (at *AgentTaskQuery) CollectFields(ctx context.Context, satisfies ...string) *AgentTaskQuery { - if fc := graphql.GetFieldContext(ctx); fc != nil { - at = at.collectField(graphql.GetOperationContext(ctx), fc.Field, satisfies...) +func (at *AgentTaskQuery) CollectFields(ctx context.Context, satisfies ...string) (*AgentTaskQuery, error) { + fc := graphql.GetFieldContext(ctx) + if fc == nil { + return at, nil + } + if err := at.collectField(ctx, graphql.GetOperationContext(ctx), fc.Field, nil, satisfies...); err != nil { + return nil, err } - return at + return at, nil +} + +func (at *AgentTaskQuery) collectField(ctx context.Context, opCtx *graphql.OperationContext, collected graphql.CollectedField, path []string, satisfies ...string) error { + path = append([]string(nil), path...) + var ( + unknownSeen bool + fieldSeen = make(map[string]struct{}, len(agenttask.Columns)) + selectedFields = []string{agenttask.FieldID} + ) + for _, field := range graphql.CollectFields(opCtx, collected.Selections, satisfies) { + switch field.Name { + case "agenttasktoprovisioningstep": + var ( + alias = field.Alias + path = append(path, alias) + query = (&ProvisioningStepClient{config: at.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + at.withAgentTaskToProvisioningStep = query + case "agenttasktoprovisionedhost": + var ( + alias = field.Alias + path = append(path, alias) + query = (&ProvisionedHostClient{config: at.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + at.withAgentTaskToProvisionedHost = query + case "agenttasktoadhocplan": + var ( + alias = field.Alias + path = append(path, alias) + query = (&AdhocPlanClient{config: at.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + at.WithNamedAgentTaskToAdhocPlan(alias, func(wq *AdhocPlanQuery) { + *wq = *query + }) + case "command": + if _, ok := fieldSeen[agenttask.FieldCommand]; !ok { + selectedFields = append(selectedFields, agenttask.FieldCommand) + fieldSeen[agenttask.FieldCommand] = struct{}{} + } + case "args": + if _, ok := fieldSeen[agenttask.FieldArgs]; !ok { + selectedFields = append(selectedFields, agenttask.FieldArgs) + fieldSeen[agenttask.FieldArgs] = struct{}{} + } + case "number": + if _, ok := fieldSeen[agenttask.FieldNumber]; !ok { + selectedFields = append(selectedFields, agenttask.FieldNumber) + fieldSeen[agenttask.FieldNumber] = struct{}{} + } + case "output": + if _, ok := fieldSeen[agenttask.FieldOutput]; !ok { + selectedFields = append(selectedFields, agenttask.FieldOutput) + fieldSeen[agenttask.FieldOutput] = struct{}{} + } + case "state": + if _, ok := fieldSeen[agenttask.FieldState]; !ok { + selectedFields = append(selectedFields, agenttask.FieldState) + fieldSeen[agenttask.FieldState] = struct{}{} + } + case "errorMessage": + if _, ok := fieldSeen[agenttask.FieldErrorMessage]; !ok { + selectedFields = append(selectedFields, agenttask.FieldErrorMessage) + fieldSeen[agenttask.FieldErrorMessage] = struct{}{} + } + case "id": + case "__typename": + default: + unknownSeen = true + } + } + if !unknownSeen { + at.Select(selectedFields...) + } + return nil +} + +type agenttaskPaginateArgs struct { + first, last *int + after, before *Cursor + opts []AgentTaskPaginateOption } -func (at *AgentTaskQuery) collectField(ctx *graphql.OperationContext, field graphql.CollectedField, satisfies ...string) *AgentTaskQuery { - return at +func newAgentTaskPaginateArgs(rv map[string]any) *agenttaskPaginateArgs { + args := &agenttaskPaginateArgs{} + if rv == nil { + return args + } + if v := rv[firstField]; v != nil { + args.first = v.(*int) + } + if v := rv[lastField]; v != nil { + args.last = v.(*int) + } + if v := rv[afterField]; v != nil { + args.after = v.(*Cursor) + } + if v := rv[beforeField]; v != nil { + args.before = v.(*Cursor) + } + return args } // CollectFields tells the query-builder to eagerly load connected nodes by resolver context. -func (a *AnsibleQuery) CollectFields(ctx context.Context, satisfies ...string) *AnsibleQuery { - if fc := graphql.GetFieldContext(ctx); fc != nil { - a = a.collectField(graphql.GetOperationContext(ctx), fc.Field, satisfies...) +func (a *AnsibleQuery) CollectFields(ctx context.Context, satisfies ...string) (*AnsibleQuery, error) { + fc := graphql.GetFieldContext(ctx) + if fc == nil { + return a, nil } - return a + if err := a.collectField(ctx, graphql.GetOperationContext(ctx), fc.Field, nil, satisfies...); err != nil { + return nil, err + } + return a, nil } -func (a *AnsibleQuery) collectField(ctx *graphql.OperationContext, field graphql.CollectedField, satisfies ...string) *AnsibleQuery { - return a +func (a *AnsibleQuery) collectField(ctx context.Context, opCtx *graphql.OperationContext, collected graphql.CollectedField, path []string, satisfies ...string) error { + path = append([]string(nil), path...) + var ( + unknownSeen bool + fieldSeen = make(map[string]struct{}, len(ansible.Columns)) + selectedFields = []string{ansible.FieldID} + ) + for _, field := range graphql.CollectFields(opCtx, collected.Selections, satisfies) { + switch field.Name { + case "ansibletouser": + var ( + alias = field.Alias + path = append(path, alias) + query = (&UserClient{config: a.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + a.WithNamedAnsibleToUser(alias, func(wq *UserQuery) { + *wq = *query + }) + case "ansiblefromenvironment": + var ( + alias = field.Alias + path = append(path, alias) + query = (&EnvironmentClient{config: a.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + a.withAnsibleFromEnvironment = query + case "name": + if _, ok := fieldSeen[ansible.FieldName]; !ok { + selectedFields = append(selectedFields, ansible.FieldName) + fieldSeen[ansible.FieldName] = struct{}{} + } + case "hclID": + if _, ok := fieldSeen[ansible.FieldHCLID]; !ok { + selectedFields = append(selectedFields, ansible.FieldHCLID) + fieldSeen[ansible.FieldHCLID] = struct{}{} + } + case "description": + if _, ok := fieldSeen[ansible.FieldDescription]; !ok { + selectedFields = append(selectedFields, ansible.FieldDescription) + fieldSeen[ansible.FieldDescription] = struct{}{} + } + case "source": + if _, ok := fieldSeen[ansible.FieldSource]; !ok { + selectedFields = append(selectedFields, ansible.FieldSource) + fieldSeen[ansible.FieldSource] = struct{}{} + } + case "playbookName": + if _, ok := fieldSeen[ansible.FieldPlaybookName]; !ok { + selectedFields = append(selectedFields, ansible.FieldPlaybookName) + fieldSeen[ansible.FieldPlaybookName] = struct{}{} + } + case "method": + if _, ok := fieldSeen[ansible.FieldMethod]; !ok { + selectedFields = append(selectedFields, ansible.FieldMethod) + fieldSeen[ansible.FieldMethod] = struct{}{} + } + case "inventory": + if _, ok := fieldSeen[ansible.FieldInventory]; !ok { + selectedFields = append(selectedFields, ansible.FieldInventory) + fieldSeen[ansible.FieldInventory] = struct{}{} + } + case "absPath": + if _, ok := fieldSeen[ansible.FieldAbsPath]; !ok { + selectedFields = append(selectedFields, ansible.FieldAbsPath) + fieldSeen[ansible.FieldAbsPath] = struct{}{} + } + case "tags": + if _, ok := fieldSeen[ansible.FieldTags]; !ok { + selectedFields = append(selectedFields, ansible.FieldTags) + fieldSeen[ansible.FieldTags] = struct{}{} + } + case "id": + case "__typename": + default: + unknownSeen = true + } + } + if !unknownSeen { + a.Select(selectedFields...) + } + return nil +} + +type ansiblePaginateArgs struct { + first, last *int + after, before *Cursor + opts []AnsiblePaginateOption +} + +func newAnsiblePaginateArgs(rv map[string]any) *ansiblePaginateArgs { + args := &ansiblePaginateArgs{} + if rv == nil { + return args + } + if v := rv[firstField]; v != nil { + args.first = v.(*int) + } + if v := rv[lastField]; v != nil { + args.last = v.(*int) + } + if v := rv[afterField]; v != nil { + args.after = v.(*Cursor) + } + if v := rv[beforeField]; v != nil { + args.before = v.(*Cursor) + } + return args } // CollectFields tells the query-builder to eagerly load connected nodes by resolver context. -func (au *AuthUserQuery) CollectFields(ctx context.Context, satisfies ...string) *AuthUserQuery { - if fc := graphql.GetFieldContext(ctx); fc != nil { - au = au.collectField(graphql.GetOperationContext(ctx), fc.Field, satisfies...) +func (au *AuthUserQuery) CollectFields(ctx context.Context, satisfies ...string) (*AuthUserQuery, error) { + fc := graphql.GetFieldContext(ctx) + if fc == nil { + return au, nil + } + if err := au.collectField(ctx, graphql.GetOperationContext(ctx), fc.Field, nil, satisfies...); err != nil { + return nil, err + } + return au, nil +} + +func (au *AuthUserQuery) collectField(ctx context.Context, opCtx *graphql.OperationContext, collected graphql.CollectedField, path []string, satisfies ...string) error { + path = append([]string(nil), path...) + var ( + unknownSeen bool + fieldSeen = make(map[string]struct{}, len(authuser.Columns)) + selectedFields = []string{authuser.FieldID} + ) + for _, field := range graphql.CollectFields(opCtx, collected.Selections, satisfies) { + switch field.Name { + case "authusertotoken": + var ( + alias = field.Alias + path = append(path, alias) + query = (&TokenClient{config: au.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + au.WithNamedAuthUserToToken(alias, func(wq *TokenQuery) { + *wq = *query + }) + case "authusertoservertasks": + var ( + alias = field.Alias + path = append(path, alias) + query = (&ServerTaskClient{config: au.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + au.WithNamedAuthUserToServerTasks(alias, func(wq *ServerTaskQuery) { + *wq = *query + }) + case "username": + if _, ok := fieldSeen[authuser.FieldUsername]; !ok { + selectedFields = append(selectedFields, authuser.FieldUsername) + fieldSeen[authuser.FieldUsername] = struct{}{} + } + case "firstName": + if _, ok := fieldSeen[authuser.FieldFirstName]; !ok { + selectedFields = append(selectedFields, authuser.FieldFirstName) + fieldSeen[authuser.FieldFirstName] = struct{}{} + } + case "lastName": + if _, ok := fieldSeen[authuser.FieldLastName]; !ok { + selectedFields = append(selectedFields, authuser.FieldLastName) + fieldSeen[authuser.FieldLastName] = struct{}{} + } + case "email": + if _, ok := fieldSeen[authuser.FieldEmail]; !ok { + selectedFields = append(selectedFields, authuser.FieldEmail) + fieldSeen[authuser.FieldEmail] = struct{}{} + } + case "phone": + if _, ok := fieldSeen[authuser.FieldPhone]; !ok { + selectedFields = append(selectedFields, authuser.FieldPhone) + fieldSeen[authuser.FieldPhone] = struct{}{} + } + case "company": + if _, ok := fieldSeen[authuser.FieldCompany]; !ok { + selectedFields = append(selectedFields, authuser.FieldCompany) + fieldSeen[authuser.FieldCompany] = struct{}{} + } + case "occupation": + if _, ok := fieldSeen[authuser.FieldOccupation]; !ok { + selectedFields = append(selectedFields, authuser.FieldOccupation) + fieldSeen[authuser.FieldOccupation] = struct{}{} + } + case "privateKeyPath": + if _, ok := fieldSeen[authuser.FieldPrivateKeyPath]; !ok { + selectedFields = append(selectedFields, authuser.FieldPrivateKeyPath) + fieldSeen[authuser.FieldPrivateKeyPath] = struct{}{} + } + case "role": + if _, ok := fieldSeen[authuser.FieldRole]; !ok { + selectedFields = append(selectedFields, authuser.FieldRole) + fieldSeen[authuser.FieldRole] = struct{}{} + } + case "provider": + if _, ok := fieldSeen[authuser.FieldProvider]; !ok { + selectedFields = append(selectedFields, authuser.FieldProvider) + fieldSeen[authuser.FieldProvider] = struct{}{} + } + case "id": + case "__typename": + default: + unknownSeen = true + } } - return au + if !unknownSeen { + au.Select(selectedFields...) + } + return nil +} + +type authuserPaginateArgs struct { + first, last *int + after, before *Cursor + opts []AuthUserPaginateOption } -func (au *AuthUserQuery) collectField(ctx *graphql.OperationContext, field graphql.CollectedField, satisfies ...string) *AuthUserQuery { - return au +func newAuthUserPaginateArgs(rv map[string]any) *authuserPaginateArgs { + args := &authuserPaginateArgs{} + if rv == nil { + return args + } + if v := rv[firstField]; v != nil { + args.first = v.(*int) + } + if v := rv[lastField]; v != nil { + args.last = v.(*int) + } + if v := rv[afterField]; v != nil { + args.after = v.(*Cursor) + } + if v := rv[beforeField]; v != nil { + args.before = v.(*Cursor) + } + return args } // CollectFields tells the query-builder to eagerly load connected nodes by resolver context. -func (b *BuildQuery) CollectFields(ctx context.Context, satisfies ...string) *BuildQuery { - if fc := graphql.GetFieldContext(ctx); fc != nil { - b = b.collectField(graphql.GetOperationContext(ctx), fc.Field, satisfies...) +func (b *BuildQuery) CollectFields(ctx context.Context, satisfies ...string) (*BuildQuery, error) { + fc := graphql.GetFieldContext(ctx) + if fc == nil { + return b, nil + } + if err := b.collectField(ctx, graphql.GetOperationContext(ctx), fc.Field, nil, satisfies...); err != nil { + return nil, err + } + return b, nil +} + +func (b *BuildQuery) collectField(ctx context.Context, opCtx *graphql.OperationContext, collected graphql.CollectedField, path []string, satisfies ...string) error { + path = append([]string(nil), path...) + var ( + unknownSeen bool + fieldSeen = make(map[string]struct{}, len(build.Columns)) + selectedFields = []string{build.FieldID} + ) + for _, field := range graphql.CollectFields(opCtx, collected.Selections, satisfies) { + switch field.Name { + case "buildtostatus": + var ( + alias = field.Alias + path = append(path, alias) + query = (&StatusClient{config: b.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + b.withBuildToStatus = query + case "buildtoenvironment": + var ( + alias = field.Alias + path = append(path, alias) + query = (&EnvironmentClient{config: b.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + b.withBuildToEnvironment = query + case "buildtocompetition": + var ( + alias = field.Alias + path = append(path, alias) + query = (&CompetitionClient{config: b.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + b.withBuildToCompetition = query + case "buildtolatestbuildcommit": + var ( + alias = field.Alias + path = append(path, alias) + query = (&BuildCommitClient{config: b.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + b.withBuildToLatestBuildCommit = query + case "buildtorepocommit": + var ( + alias = field.Alias + path = append(path, alias) + query = (&RepoCommitClient{config: b.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + b.withBuildToRepoCommit = query + case "buildtoprovisionednetwork": + var ( + alias = field.Alias + path = append(path, alias) + query = (&ProvisionedNetworkClient{config: b.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + b.WithNamedBuildToProvisionedNetwork(alias, func(wq *ProvisionedNetworkQuery) { + *wq = *query + }) + case "buildtoteam": + var ( + alias = field.Alias + path = append(path, alias) + query = (&TeamClient{config: b.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + b.WithNamedBuildToTeam(alias, func(wq *TeamQuery) { + *wq = *query + }) + case "buildtoplan": + var ( + alias = field.Alias + path = append(path, alias) + query = (&PlanClient{config: b.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + b.WithNamedBuildToPlan(alias, func(wq *PlanQuery) { + *wq = *query + }) + case "buildtobuildcommits": + var ( + alias = field.Alias + path = append(path, alias) + query = (&BuildCommitClient{config: b.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + b.WithNamedBuildToBuildCommits(alias, func(wq *BuildCommitQuery) { + *wq = *query + }) + case "buildtoadhocplans": + var ( + alias = field.Alias + path = append(path, alias) + query = (&AdhocPlanClient{config: b.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + b.WithNamedBuildToAdhocPlans(alias, func(wq *AdhocPlanQuery) { + *wq = *query + }) + case "buildtoagentstatuses": + var ( + alias = field.Alias + path = append(path, alias) + query = (&AgentStatusClient{config: b.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + b.WithNamedBuildToAgentStatuses(alias, func(wq *AgentStatusQuery) { + *wq = *query + }) + case "buildtoservertasks": + var ( + alias = field.Alias + path = append(path, alias) + query = (&ServerTaskClient{config: b.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + b.WithNamedBuildToServerTasks(alias, func(wq *ServerTaskQuery) { + *wq = *query + }) + case "revision": + if _, ok := fieldSeen[build.FieldRevision]; !ok { + selectedFields = append(selectedFields, build.FieldRevision) + fieldSeen[build.FieldRevision] = struct{}{} + } + case "environmentRevision": + if _, ok := fieldSeen[build.FieldEnvironmentRevision]; !ok { + selectedFields = append(selectedFields, build.FieldEnvironmentRevision) + fieldSeen[build.FieldEnvironmentRevision] = struct{}{} + } + case "vars": + if _, ok := fieldSeen[build.FieldVars]; !ok { + selectedFields = append(selectedFields, build.FieldVars) + fieldSeen[build.FieldVars] = struct{}{} + } + case "completedPlan": + if _, ok := fieldSeen[build.FieldCompletedPlan]; !ok { + selectedFields = append(selectedFields, build.FieldCompletedPlan) + fieldSeen[build.FieldCompletedPlan] = struct{}{} + } + case "id": + case "__typename": + default: + unknownSeen = true + } + } + if !unknownSeen { + b.Select(selectedFields...) } - return b + return nil } -func (b *BuildQuery) collectField(ctx *graphql.OperationContext, field graphql.CollectedField, satisfies ...string) *BuildQuery { - return b +type buildPaginateArgs struct { + first, last *int + after, before *Cursor + opts []BuildPaginateOption +} + +func newBuildPaginateArgs(rv map[string]any) *buildPaginateArgs { + args := &buildPaginateArgs{} + if rv == nil { + return args + } + if v := rv[firstField]; v != nil { + args.first = v.(*int) + } + if v := rv[lastField]; v != nil { + args.last = v.(*int) + } + if v := rv[afterField]; v != nil { + args.after = v.(*Cursor) + } + if v := rv[beforeField]; v != nil { + args.before = v.(*Cursor) + } + return args } // CollectFields tells the query-builder to eagerly load connected nodes by resolver context. -func (bc *BuildCommitQuery) CollectFields(ctx context.Context, satisfies ...string) *BuildCommitQuery { - if fc := graphql.GetFieldContext(ctx); fc != nil { - bc = bc.collectField(graphql.GetOperationContext(ctx), fc.Field, satisfies...) +func (bc *BuildCommitQuery) CollectFields(ctx context.Context, satisfies ...string) (*BuildCommitQuery, error) { + fc := graphql.GetFieldContext(ctx) + if fc == nil { + return bc, nil + } + if err := bc.collectField(ctx, graphql.GetOperationContext(ctx), fc.Field, nil, satisfies...); err != nil { + return nil, err } - return bc + return bc, nil } -func (bc *BuildCommitQuery) collectField(ctx *graphql.OperationContext, field graphql.CollectedField, satisfies ...string) *BuildCommitQuery { - return bc +func (bc *BuildCommitQuery) collectField(ctx context.Context, opCtx *graphql.OperationContext, collected graphql.CollectedField, path []string, satisfies ...string) error { + path = append([]string(nil), path...) + var ( + unknownSeen bool + fieldSeen = make(map[string]struct{}, len(buildcommit.Columns)) + selectedFields = []string{buildcommit.FieldID} + ) + for _, field := range graphql.CollectFields(opCtx, collected.Selections, satisfies) { + switch field.Name { + case "buildcommittobuild": + var ( + alias = field.Alias + path = append(path, alias) + query = (&BuildClient{config: bc.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + bc.withBuildCommitToBuild = query + case "buildcommittoservertask": + var ( + alias = field.Alias + path = append(path, alias) + query = (&ServerTaskClient{config: bc.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + bc.WithNamedBuildCommitToServerTask(alias, func(wq *ServerTaskQuery) { + *wq = *query + }) + case "buildcommittoplandiffs": + var ( + alias = field.Alias + path = append(path, alias) + query = (&PlanDiffClient{config: bc.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + bc.WithNamedBuildCommitToPlanDiffs(alias, func(wq *PlanDiffQuery) { + *wq = *query + }) + case "type": + if _, ok := fieldSeen[buildcommit.FieldType]; !ok { + selectedFields = append(selectedFields, buildcommit.FieldType) + fieldSeen[buildcommit.FieldType] = struct{}{} + } + case "revision": + if _, ok := fieldSeen[buildcommit.FieldRevision]; !ok { + selectedFields = append(selectedFields, buildcommit.FieldRevision) + fieldSeen[buildcommit.FieldRevision] = struct{}{} + } + case "state": + if _, ok := fieldSeen[buildcommit.FieldState]; !ok { + selectedFields = append(selectedFields, buildcommit.FieldState) + fieldSeen[buildcommit.FieldState] = struct{}{} + } + case "createdAt": + if _, ok := fieldSeen[buildcommit.FieldCreatedAt]; !ok { + selectedFields = append(selectedFields, buildcommit.FieldCreatedAt) + fieldSeen[buildcommit.FieldCreatedAt] = struct{}{} + } + case "id": + case "__typename": + default: + unknownSeen = true + } + } + if !unknownSeen { + bc.Select(selectedFields...) + } + return nil +} + +type buildcommitPaginateArgs struct { + first, last *int + after, before *Cursor + opts []BuildCommitPaginateOption +} + +func newBuildCommitPaginateArgs(rv map[string]any) *buildcommitPaginateArgs { + args := &buildcommitPaginateArgs{} + if rv == nil { + return args + } + if v := rv[firstField]; v != nil { + args.first = v.(*int) + } + if v := rv[lastField]; v != nil { + args.last = v.(*int) + } + if v := rv[afterField]; v != nil { + args.after = v.(*Cursor) + } + if v := rv[beforeField]; v != nil { + args.before = v.(*Cursor) + } + return args } // CollectFields tells the query-builder to eagerly load connected nodes by resolver context. -func (c *CommandQuery) CollectFields(ctx context.Context, satisfies ...string) *CommandQuery { - if fc := graphql.GetFieldContext(ctx); fc != nil { - c = c.collectField(graphql.GetOperationContext(ctx), fc.Field, satisfies...) +func (c *CommandQuery) CollectFields(ctx context.Context, satisfies ...string) (*CommandQuery, error) { + fc := graphql.GetFieldContext(ctx) + if fc == nil { + return c, nil + } + if err := c.collectField(ctx, graphql.GetOperationContext(ctx), fc.Field, nil, satisfies...); err != nil { + return nil, err + } + return c, nil +} + +func (c *CommandQuery) collectField(ctx context.Context, opCtx *graphql.OperationContext, collected graphql.CollectedField, path []string, satisfies ...string) error { + path = append([]string(nil), path...) + var ( + unknownSeen bool + fieldSeen = make(map[string]struct{}, len(command.Columns)) + selectedFields = []string{command.FieldID} + ) + for _, field := range graphql.CollectFields(opCtx, collected.Selections, satisfies) { + switch field.Name { + case "commandtouser": + var ( + alias = field.Alias + path = append(path, alias) + query = (&UserClient{config: c.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + c.WithNamedCommandToUser(alias, func(wq *UserQuery) { + *wq = *query + }) + case "commandtoenvironment": + var ( + alias = field.Alias + path = append(path, alias) + query = (&EnvironmentClient{config: c.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + c.withCommandToEnvironment = query + case "hclID": + if _, ok := fieldSeen[command.FieldHCLID]; !ok { + selectedFields = append(selectedFields, command.FieldHCLID) + fieldSeen[command.FieldHCLID] = struct{}{} + } + case "name": + if _, ok := fieldSeen[command.FieldName]; !ok { + selectedFields = append(selectedFields, command.FieldName) + fieldSeen[command.FieldName] = struct{}{} + } + case "description": + if _, ok := fieldSeen[command.FieldDescription]; !ok { + selectedFields = append(selectedFields, command.FieldDescription) + fieldSeen[command.FieldDescription] = struct{}{} + } + case "program": + if _, ok := fieldSeen[command.FieldProgram]; !ok { + selectedFields = append(selectedFields, command.FieldProgram) + fieldSeen[command.FieldProgram] = struct{}{} + } + case "args": + if _, ok := fieldSeen[command.FieldArgs]; !ok { + selectedFields = append(selectedFields, command.FieldArgs) + fieldSeen[command.FieldArgs] = struct{}{} + } + case "ignoreErrors": + if _, ok := fieldSeen[command.FieldIgnoreErrors]; !ok { + selectedFields = append(selectedFields, command.FieldIgnoreErrors) + fieldSeen[command.FieldIgnoreErrors] = struct{}{} + } + case "disabled": + if _, ok := fieldSeen[command.FieldDisabled]; !ok { + selectedFields = append(selectedFields, command.FieldDisabled) + fieldSeen[command.FieldDisabled] = struct{}{} + } + case "cooldown": + if _, ok := fieldSeen[command.FieldCooldown]; !ok { + selectedFields = append(selectedFields, command.FieldCooldown) + fieldSeen[command.FieldCooldown] = struct{}{} + } + case "timeout": + if _, ok := fieldSeen[command.FieldTimeout]; !ok { + selectedFields = append(selectedFields, command.FieldTimeout) + fieldSeen[command.FieldTimeout] = struct{}{} + } + case "vars": + if _, ok := fieldSeen[command.FieldVars]; !ok { + selectedFields = append(selectedFields, command.FieldVars) + fieldSeen[command.FieldVars] = struct{}{} + } + case "tags": + if _, ok := fieldSeen[command.FieldTags]; !ok { + selectedFields = append(selectedFields, command.FieldTags) + fieldSeen[command.FieldTags] = struct{}{} + } + case "id": + case "__typename": + default: + unknownSeen = true + } } - return c + if !unknownSeen { + c.Select(selectedFields...) + } + return nil +} + +type commandPaginateArgs struct { + first, last *int + after, before *Cursor + opts []CommandPaginateOption } -func (c *CommandQuery) collectField(ctx *graphql.OperationContext, field graphql.CollectedField, satisfies ...string) *CommandQuery { - return c +func newCommandPaginateArgs(rv map[string]any) *commandPaginateArgs { + args := &commandPaginateArgs{} + if rv == nil { + return args + } + if v := rv[firstField]; v != nil { + args.first = v.(*int) + } + if v := rv[lastField]; v != nil { + args.last = v.(*int) + } + if v := rv[afterField]; v != nil { + args.after = v.(*Cursor) + } + if v := rv[beforeField]; v != nil { + args.before = v.(*Cursor) + } + return args } // CollectFields tells the query-builder to eagerly load connected nodes by resolver context. -func (c *CompetitionQuery) CollectFields(ctx context.Context, satisfies ...string) *CompetitionQuery { - if fc := graphql.GetFieldContext(ctx); fc != nil { - c = c.collectField(graphql.GetOperationContext(ctx), fc.Field, satisfies...) +func (c *CompetitionQuery) CollectFields(ctx context.Context, satisfies ...string) (*CompetitionQuery, error) { + fc := graphql.GetFieldContext(ctx) + if fc == nil { + return c, nil } - return c + if err := c.collectField(ctx, graphql.GetOperationContext(ctx), fc.Field, nil, satisfies...); err != nil { + return nil, err + } + return c, nil } -func (c *CompetitionQuery) collectField(ctx *graphql.OperationContext, field graphql.CollectedField, satisfies ...string) *CompetitionQuery { - return c +func (c *CompetitionQuery) collectField(ctx context.Context, opCtx *graphql.OperationContext, collected graphql.CollectedField, path []string, satisfies ...string) error { + path = append([]string(nil), path...) + var ( + unknownSeen bool + fieldSeen = make(map[string]struct{}, len(competition.Columns)) + selectedFields = []string{competition.FieldID} + ) + for _, field := range graphql.CollectFields(opCtx, collected.Selections, satisfies) { + switch field.Name { + case "competitiontodns": + var ( + alias = field.Alias + path = append(path, alias) + query = (&DNSClient{config: c.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + c.WithNamedCompetitionToDNS(alias, func(wq *DNSQuery) { + *wq = *query + }) + case "competitiontoenvironment": + var ( + alias = field.Alias + path = append(path, alias) + query = (&EnvironmentClient{config: c.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + c.withCompetitionToEnvironment = query + case "competitiontobuild": + var ( + alias = field.Alias + path = append(path, alias) + query = (&BuildClient{config: c.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + c.WithNamedCompetitionToBuild(alias, func(wq *BuildQuery) { + *wq = *query + }) + case "hclID": + if _, ok := fieldSeen[competition.FieldHCLID]; !ok { + selectedFields = append(selectedFields, competition.FieldHCLID) + fieldSeen[competition.FieldHCLID] = struct{}{} + } + case "rootPassword": + if _, ok := fieldSeen[competition.FieldRootPassword]; !ok { + selectedFields = append(selectedFields, competition.FieldRootPassword) + fieldSeen[competition.FieldRootPassword] = struct{}{} + } + case "config": + if _, ok := fieldSeen[competition.FieldConfig]; !ok { + selectedFields = append(selectedFields, competition.FieldConfig) + fieldSeen[competition.FieldConfig] = struct{}{} + } + case "tags": + if _, ok := fieldSeen[competition.FieldTags]; !ok { + selectedFields = append(selectedFields, competition.FieldTags) + fieldSeen[competition.FieldTags] = struct{}{} + } + case "id": + case "__typename": + default: + unknownSeen = true + } + } + if !unknownSeen { + c.Select(selectedFields...) + } + return nil +} + +type competitionPaginateArgs struct { + first, last *int + after, before *Cursor + opts []CompetitionPaginateOption +} + +func newCompetitionPaginateArgs(rv map[string]any) *competitionPaginateArgs { + args := &competitionPaginateArgs{} + if rv == nil { + return args + } + if v := rv[firstField]; v != nil { + args.first = v.(*int) + } + if v := rv[lastField]; v != nil { + args.last = v.(*int) + } + if v := rv[afterField]; v != nil { + args.after = v.(*Cursor) + } + if v := rv[beforeField]; v != nil { + args.before = v.(*Cursor) + } + return args } // CollectFields tells the query-builder to eagerly load connected nodes by resolver context. -func (d *DNSQuery) CollectFields(ctx context.Context, satisfies ...string) *DNSQuery { - if fc := graphql.GetFieldContext(ctx); fc != nil { - d = d.collectField(graphql.GetOperationContext(ctx), fc.Field, satisfies...) +func (d *DNSQuery) CollectFields(ctx context.Context, satisfies ...string) (*DNSQuery, error) { + fc := graphql.GetFieldContext(ctx) + if fc == nil { + return d, nil + } + if err := d.collectField(ctx, graphql.GetOperationContext(ctx), fc.Field, nil, satisfies...); err != nil { + return nil, err + } + return d, nil +} + +func (d *DNSQuery) collectField(ctx context.Context, opCtx *graphql.OperationContext, collected graphql.CollectedField, path []string, satisfies ...string) error { + path = append([]string(nil), path...) + var ( + unknownSeen bool + fieldSeen = make(map[string]struct{}, len(dns.Columns)) + selectedFields = []string{dns.FieldID} + ) + for _, field := range graphql.CollectFields(opCtx, collected.Selections, satisfies) { + switch field.Name { + case "dnstoenvironment": + var ( + alias = field.Alias + path = append(path, alias) + query = (&EnvironmentClient{config: d.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + d.WithNamedDNSToEnvironment(alias, func(wq *EnvironmentQuery) { + *wq = *query + }) + case "dnstocompetition": + var ( + alias = field.Alias + path = append(path, alias) + query = (&CompetitionClient{config: d.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + d.WithNamedDNSToCompetition(alias, func(wq *CompetitionQuery) { + *wq = *query + }) + case "hclID": + if _, ok := fieldSeen[dns.FieldHCLID]; !ok { + selectedFields = append(selectedFields, dns.FieldHCLID) + fieldSeen[dns.FieldHCLID] = struct{}{} + } + case "type": + if _, ok := fieldSeen[dns.FieldType]; !ok { + selectedFields = append(selectedFields, dns.FieldType) + fieldSeen[dns.FieldType] = struct{}{} + } + case "rootDomain": + if _, ok := fieldSeen[dns.FieldRootDomain]; !ok { + selectedFields = append(selectedFields, dns.FieldRootDomain) + fieldSeen[dns.FieldRootDomain] = struct{}{} + } + case "dnsServers": + if _, ok := fieldSeen[dns.FieldDNSServers]; !ok { + selectedFields = append(selectedFields, dns.FieldDNSServers) + fieldSeen[dns.FieldDNSServers] = struct{}{} + } + case "ntpServers": + if _, ok := fieldSeen[dns.FieldNtpServers]; !ok { + selectedFields = append(selectedFields, dns.FieldNtpServers) + fieldSeen[dns.FieldNtpServers] = struct{}{} + } + case "config": + if _, ok := fieldSeen[dns.FieldConfig]; !ok { + selectedFields = append(selectedFields, dns.FieldConfig) + fieldSeen[dns.FieldConfig] = struct{}{} + } + case "id": + case "__typename": + default: + unknownSeen = true + } } - return d + if !unknownSeen { + d.Select(selectedFields...) + } + return nil } -func (d *DNSQuery) collectField(ctx *graphql.OperationContext, field graphql.CollectedField, satisfies ...string) *DNSQuery { - return d +type dnsPaginateArgs struct { + first, last *int + after, before *Cursor + opts []DNSPaginateOption +} + +func newDNSPaginateArgs(rv map[string]any) *dnsPaginateArgs { + args := &dnsPaginateArgs{} + if rv == nil { + return args + } + if v := rv[firstField]; v != nil { + args.first = v.(*int) + } + if v := rv[lastField]; v != nil { + args.last = v.(*int) + } + if v := rv[afterField]; v != nil { + args.after = v.(*Cursor) + } + if v := rv[beforeField]; v != nil { + args.before = v.(*Cursor) + } + return args } // CollectFields tells the query-builder to eagerly load connected nodes by resolver context. -func (dr *DNSRecordQuery) CollectFields(ctx context.Context, satisfies ...string) *DNSRecordQuery { - if fc := graphql.GetFieldContext(ctx); fc != nil { - dr = dr.collectField(graphql.GetOperationContext(ctx), fc.Field, satisfies...) +func (dr *DNSRecordQuery) CollectFields(ctx context.Context, satisfies ...string) (*DNSRecordQuery, error) { + fc := graphql.GetFieldContext(ctx) + if fc == nil { + return dr, nil + } + if err := dr.collectField(ctx, graphql.GetOperationContext(ctx), fc.Field, nil, satisfies...); err != nil { + return nil, err + } + return dr, nil +} + +func (dr *DNSRecordQuery) collectField(ctx context.Context, opCtx *graphql.OperationContext, collected graphql.CollectedField, path []string, satisfies ...string) error { + path = append([]string(nil), path...) + var ( + unknownSeen bool + fieldSeen = make(map[string]struct{}, len(dnsrecord.Columns)) + selectedFields = []string{dnsrecord.FieldID} + ) + for _, field := range graphql.CollectFields(opCtx, collected.Selections, satisfies) { + switch field.Name { + case "dnsrecordtoenvironment": + var ( + alias = field.Alias + path = append(path, alias) + query = (&EnvironmentClient{config: dr.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + dr.withDNSRecordToEnvironment = query + case "hclID": + if _, ok := fieldSeen[dnsrecord.FieldHCLID]; !ok { + selectedFields = append(selectedFields, dnsrecord.FieldHCLID) + fieldSeen[dnsrecord.FieldHCLID] = struct{}{} + } + case "name": + if _, ok := fieldSeen[dnsrecord.FieldName]; !ok { + selectedFields = append(selectedFields, dnsrecord.FieldName) + fieldSeen[dnsrecord.FieldName] = struct{}{} + } + case "values": + if _, ok := fieldSeen[dnsrecord.FieldValues]; !ok { + selectedFields = append(selectedFields, dnsrecord.FieldValues) + fieldSeen[dnsrecord.FieldValues] = struct{}{} + } + case "type": + if _, ok := fieldSeen[dnsrecord.FieldType]; !ok { + selectedFields = append(selectedFields, dnsrecord.FieldType) + fieldSeen[dnsrecord.FieldType] = struct{}{} + } + case "zone": + if _, ok := fieldSeen[dnsrecord.FieldZone]; !ok { + selectedFields = append(selectedFields, dnsrecord.FieldZone) + fieldSeen[dnsrecord.FieldZone] = struct{}{} + } + case "vars": + if _, ok := fieldSeen[dnsrecord.FieldVars]; !ok { + selectedFields = append(selectedFields, dnsrecord.FieldVars) + fieldSeen[dnsrecord.FieldVars] = struct{}{} + } + case "disabled": + if _, ok := fieldSeen[dnsrecord.FieldDisabled]; !ok { + selectedFields = append(selectedFields, dnsrecord.FieldDisabled) + fieldSeen[dnsrecord.FieldDisabled] = struct{}{} + } + case "tags": + if _, ok := fieldSeen[dnsrecord.FieldTags]; !ok { + selectedFields = append(selectedFields, dnsrecord.FieldTags) + fieldSeen[dnsrecord.FieldTags] = struct{}{} + } + case "id": + case "__typename": + default: + unknownSeen = true + } + } + if !unknownSeen { + dr.Select(selectedFields...) } - return dr + return nil } -func (dr *DNSRecordQuery) collectField(ctx *graphql.OperationContext, field graphql.CollectedField, satisfies ...string) *DNSRecordQuery { - return dr +type dnsrecordPaginateArgs struct { + first, last *int + after, before *Cursor + opts []DNSRecordPaginateOption +} + +func newDNSRecordPaginateArgs(rv map[string]any) *dnsrecordPaginateArgs { + args := &dnsrecordPaginateArgs{} + if rv == nil { + return args + } + if v := rv[firstField]; v != nil { + args.first = v.(*int) + } + if v := rv[lastField]; v != nil { + args.last = v.(*int) + } + if v := rv[afterField]; v != nil { + args.after = v.(*Cursor) + } + if v := rv[beforeField]; v != nil { + args.before = v.(*Cursor) + } + return args } // CollectFields tells the query-builder to eagerly load connected nodes by resolver context. -func (d *DiskQuery) CollectFields(ctx context.Context, satisfies ...string) *DiskQuery { - if fc := graphql.GetFieldContext(ctx); fc != nil { - d = d.collectField(graphql.GetOperationContext(ctx), fc.Field, satisfies...) +func (d *DiskQuery) CollectFields(ctx context.Context, satisfies ...string) (*DiskQuery, error) { + fc := graphql.GetFieldContext(ctx) + if fc == nil { + return d, nil } - return d + if err := d.collectField(ctx, graphql.GetOperationContext(ctx), fc.Field, nil, satisfies...); err != nil { + return nil, err + } + return d, nil +} + +func (d *DiskQuery) collectField(ctx context.Context, opCtx *graphql.OperationContext, collected graphql.CollectedField, path []string, satisfies ...string) error { + path = append([]string(nil), path...) + var ( + unknownSeen bool + fieldSeen = make(map[string]struct{}, len(disk.Columns)) + selectedFields = []string{disk.FieldID} + ) + for _, field := range graphql.CollectFields(opCtx, collected.Selections, satisfies) { + switch field.Name { + case "disktohost": + var ( + alias = field.Alias + path = append(path, alias) + query = (&HostClient{config: d.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + d.withDiskToHost = query + case "size": + if _, ok := fieldSeen[disk.FieldSize]; !ok { + selectedFields = append(selectedFields, disk.FieldSize) + fieldSeen[disk.FieldSize] = struct{}{} + } + case "id": + case "__typename": + default: + unknownSeen = true + } + } + if !unknownSeen { + d.Select(selectedFields...) + } + return nil +} + +type diskPaginateArgs struct { + first, last *int + after, before *Cursor + opts []DiskPaginateOption } -func (d *DiskQuery) collectField(ctx *graphql.OperationContext, field graphql.CollectedField, satisfies ...string) *DiskQuery { - return d +func newDiskPaginateArgs(rv map[string]any) *diskPaginateArgs { + args := &diskPaginateArgs{} + if rv == nil { + return args + } + if v := rv[firstField]; v != nil { + args.first = v.(*int) + } + if v := rv[lastField]; v != nil { + args.last = v.(*int) + } + if v := rv[afterField]; v != nil { + args.after = v.(*Cursor) + } + if v := rv[beforeField]; v != nil { + args.before = v.(*Cursor) + } + return args } // CollectFields tells the query-builder to eagerly load connected nodes by resolver context. -func (e *EnvironmentQuery) CollectFields(ctx context.Context, satisfies ...string) *EnvironmentQuery { - if fc := graphql.GetFieldContext(ctx); fc != nil { - e = e.collectField(graphql.GetOperationContext(ctx), fc.Field, satisfies...) +func (e *EnvironmentQuery) CollectFields(ctx context.Context, satisfies ...string) (*EnvironmentQuery, error) { + fc := graphql.GetFieldContext(ctx) + if fc == nil { + return e, nil + } + if err := e.collectField(ctx, graphql.GetOperationContext(ctx), fc.Field, nil, satisfies...); err != nil { + return nil, err + } + return e, nil +} + +func (e *EnvironmentQuery) collectField(ctx context.Context, opCtx *graphql.OperationContext, collected graphql.CollectedField, path []string, satisfies ...string) error { + path = append([]string(nil), path...) + var ( + unknownSeen bool + fieldSeen = make(map[string]struct{}, len(environment.Columns)) + selectedFields = []string{environment.FieldID} + ) + for _, field := range graphql.CollectFields(opCtx, collected.Selections, satisfies) { + switch field.Name { + case "environmenttouser": + var ( + alias = field.Alias + path = append(path, alias) + query = (&UserClient{config: e.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + e.WithNamedEnvironmentToUser(alias, func(wq *UserQuery) { + *wq = *query + }) + case "environmenttohost": + var ( + alias = field.Alias + path = append(path, alias) + query = (&HostClient{config: e.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + e.WithNamedEnvironmentToHost(alias, func(wq *HostQuery) { + *wq = *query + }) + case "environmenttocompetition": + var ( + alias = field.Alias + path = append(path, alias) + query = (&CompetitionClient{config: e.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + e.WithNamedEnvironmentToCompetition(alias, func(wq *CompetitionQuery) { + *wq = *query + }) + case "environmenttoidentity": + var ( + alias = field.Alias + path = append(path, alias) + query = (&IdentityClient{config: e.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + e.WithNamedEnvironmentToIdentity(alias, func(wq *IdentityQuery) { + *wq = *query + }) + case "environmenttocommand": + var ( + alias = field.Alias + path = append(path, alias) + query = (&CommandClient{config: e.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + e.WithNamedEnvironmentToCommand(alias, func(wq *CommandQuery) { + *wq = *query + }) + case "environmenttoscript": + var ( + alias = field.Alias + path = append(path, alias) + query = (&ScriptClient{config: e.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + e.WithNamedEnvironmentToScript(alias, func(wq *ScriptQuery) { + *wq = *query + }) + case "environmenttofiledownload": + var ( + alias = field.Alias + path = append(path, alias) + query = (&FileDownloadClient{config: e.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + e.WithNamedEnvironmentToFileDownload(alias, func(wq *FileDownloadQuery) { + *wq = *query + }) + case "environmenttofiledelete": + var ( + alias = field.Alias + path = append(path, alias) + query = (&FileDeleteClient{config: e.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + e.WithNamedEnvironmentToFileDelete(alias, func(wq *FileDeleteQuery) { + *wq = *query + }) + case "environmenttofileextract": + var ( + alias = field.Alias + path = append(path, alias) + query = (&FileExtractClient{config: e.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + e.WithNamedEnvironmentToFileExtract(alias, func(wq *FileExtractQuery) { + *wq = *query + }) + case "environmenttoincludednetwork": + var ( + alias = field.Alias + path = append(path, alias) + query = (&IncludedNetworkClient{config: e.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + e.WithNamedEnvironmentToIncludedNetwork(alias, func(wq *IncludedNetworkQuery) { + *wq = *query + }) + case "environmenttofinding": + var ( + alias = field.Alias + path = append(path, alias) + query = (&FindingClient{config: e.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + e.WithNamedEnvironmentToFinding(alias, func(wq *FindingQuery) { + *wq = *query + }) + case "environmenttodnsrecord": + var ( + alias = field.Alias + path = append(path, alias) + query = (&DNSRecordClient{config: e.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + e.WithNamedEnvironmentToDNSRecord(alias, func(wq *DNSRecordQuery) { + *wq = *query + }) + case "environmenttodns": + var ( + alias = field.Alias + path = append(path, alias) + query = (&DNSClient{config: e.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + e.WithNamedEnvironmentToDNS(alias, func(wq *DNSQuery) { + *wq = *query + }) + case "environmenttonetwork": + var ( + alias = field.Alias + path = append(path, alias) + query = (&NetworkClient{config: e.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + e.WithNamedEnvironmentToNetwork(alias, func(wq *NetworkQuery) { + *wq = *query + }) + case "environmenttohostdependency": + var ( + alias = field.Alias + path = append(path, alias) + query = (&HostDependencyClient{config: e.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + e.WithNamedEnvironmentToHostDependency(alias, func(wq *HostDependencyQuery) { + *wq = *query + }) + case "environmenttoansible": + var ( + alias = field.Alias + path = append(path, alias) + query = (&AnsibleClient{config: e.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + e.WithNamedEnvironmentToAnsible(alias, func(wq *AnsibleQuery) { + *wq = *query + }) + case "environmenttobuild": + var ( + alias = field.Alias + path = append(path, alias) + query = (&BuildClient{config: e.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + e.WithNamedEnvironmentToBuild(alias, func(wq *BuildQuery) { + *wq = *query + }) + case "environmenttorepository": + var ( + alias = field.Alias + path = append(path, alias) + query = (&RepositoryClient{config: e.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + e.WithNamedEnvironmentToRepository(alias, func(wq *RepositoryQuery) { + *wq = *query + }) + case "environmenttoservertask": + var ( + alias = field.Alias + path = append(path, alias) + query = (&ServerTaskClient{config: e.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + e.WithNamedEnvironmentToServerTask(alias, func(wq *ServerTaskQuery) { + *wq = *query + }) + case "hclID": + if _, ok := fieldSeen[environment.FieldHCLID]; !ok { + selectedFields = append(selectedFields, environment.FieldHCLID) + fieldSeen[environment.FieldHCLID] = struct{}{} + } + case "competitionID": + if _, ok := fieldSeen[environment.FieldCompetitionID]; !ok { + selectedFields = append(selectedFields, environment.FieldCompetitionID) + fieldSeen[environment.FieldCompetitionID] = struct{}{} + } + case "name": + if _, ok := fieldSeen[environment.FieldName]; !ok { + selectedFields = append(selectedFields, environment.FieldName) + fieldSeen[environment.FieldName] = struct{}{} + } + case "description": + if _, ok := fieldSeen[environment.FieldDescription]; !ok { + selectedFields = append(selectedFields, environment.FieldDescription) + fieldSeen[environment.FieldDescription] = struct{}{} + } + case "builder": + if _, ok := fieldSeen[environment.FieldBuilder]; !ok { + selectedFields = append(selectedFields, environment.FieldBuilder) + fieldSeen[environment.FieldBuilder] = struct{}{} + } + case "teamCount": + if _, ok := fieldSeen[environment.FieldTeamCount]; !ok { + selectedFields = append(selectedFields, environment.FieldTeamCount) + fieldSeen[environment.FieldTeamCount] = struct{}{} + } + case "revision": + if _, ok := fieldSeen[environment.FieldRevision]; !ok { + selectedFields = append(selectedFields, environment.FieldRevision) + fieldSeen[environment.FieldRevision] = struct{}{} + } + case "adminCidrs": + if _, ok := fieldSeen[environment.FieldAdminCidrs]; !ok { + selectedFields = append(selectedFields, environment.FieldAdminCidrs) + fieldSeen[environment.FieldAdminCidrs] = struct{}{} + } + case "exposedVdiPorts": + if _, ok := fieldSeen[environment.FieldExposedVdiPorts]; !ok { + selectedFields = append(selectedFields, environment.FieldExposedVdiPorts) + fieldSeen[environment.FieldExposedVdiPorts] = struct{}{} + } + case "config": + if _, ok := fieldSeen[environment.FieldConfig]; !ok { + selectedFields = append(selectedFields, environment.FieldConfig) + fieldSeen[environment.FieldConfig] = struct{}{} + } + case "tags": + if _, ok := fieldSeen[environment.FieldTags]; !ok { + selectedFields = append(selectedFields, environment.FieldTags) + fieldSeen[environment.FieldTags] = struct{}{} + } + case "id": + case "__typename": + default: + unknownSeen = true + } } - return e + if !unknownSeen { + e.Select(selectedFields...) + } + return nil } -func (e *EnvironmentQuery) collectField(ctx *graphql.OperationContext, field graphql.CollectedField, satisfies ...string) *EnvironmentQuery { - return e +type environmentPaginateArgs struct { + first, last *int + after, before *Cursor + opts []EnvironmentPaginateOption +} + +func newEnvironmentPaginateArgs(rv map[string]any) *environmentPaginateArgs { + args := &environmentPaginateArgs{} + if rv == nil { + return args + } + if v := rv[firstField]; v != nil { + args.first = v.(*int) + } + if v := rv[lastField]; v != nil { + args.last = v.(*int) + } + if v := rv[afterField]; v != nil { + args.after = v.(*Cursor) + } + if v := rv[beforeField]; v != nil { + args.before = v.(*Cursor) + } + return args } // CollectFields tells the query-builder to eagerly load connected nodes by resolver context. -func (fd *FileDeleteQuery) CollectFields(ctx context.Context, satisfies ...string) *FileDeleteQuery { - if fc := graphql.GetFieldContext(ctx); fc != nil { - fd = fd.collectField(graphql.GetOperationContext(ctx), fc.Field, satisfies...) +func (fd *FileDeleteQuery) CollectFields(ctx context.Context, satisfies ...string) (*FileDeleteQuery, error) { + fc := graphql.GetFieldContext(ctx) + if fc == nil { + return fd, nil + } + if err := fd.collectField(ctx, graphql.GetOperationContext(ctx), fc.Field, nil, satisfies...); err != nil { + return nil, err + } + return fd, nil +} + +func (fd *FileDeleteQuery) collectField(ctx context.Context, opCtx *graphql.OperationContext, collected graphql.CollectedField, path []string, satisfies ...string) error { + path = append([]string(nil), path...) + var ( + unknownSeen bool + fieldSeen = make(map[string]struct{}, len(filedelete.Columns)) + selectedFields = []string{filedelete.FieldID} + ) + for _, field := range graphql.CollectFields(opCtx, collected.Selections, satisfies) { + switch field.Name { + case "filedeletetoenvironment": + var ( + alias = field.Alias + path = append(path, alias) + query = (&EnvironmentClient{config: fd.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + fd.withFileDeleteToEnvironment = query + case "hclID": + if _, ok := fieldSeen[filedelete.FieldHCLID]; !ok { + selectedFields = append(selectedFields, filedelete.FieldHCLID) + fieldSeen[filedelete.FieldHCLID] = struct{}{} + } + case "path": + if _, ok := fieldSeen[filedelete.FieldPath]; !ok { + selectedFields = append(selectedFields, filedelete.FieldPath) + fieldSeen[filedelete.FieldPath] = struct{}{} + } + case "tags": + if _, ok := fieldSeen[filedelete.FieldTags]; !ok { + selectedFields = append(selectedFields, filedelete.FieldTags) + fieldSeen[filedelete.FieldTags] = struct{}{} + } + case "id": + case "__typename": + default: + unknownSeen = true + } + } + if !unknownSeen { + fd.Select(selectedFields...) } - return fd + return nil } -func (fd *FileDeleteQuery) collectField(ctx *graphql.OperationContext, field graphql.CollectedField, satisfies ...string) *FileDeleteQuery { - return fd +type filedeletePaginateArgs struct { + first, last *int + after, before *Cursor + opts []FileDeletePaginateOption +} + +func newFileDeletePaginateArgs(rv map[string]any) *filedeletePaginateArgs { + args := &filedeletePaginateArgs{} + if rv == nil { + return args + } + if v := rv[firstField]; v != nil { + args.first = v.(*int) + } + if v := rv[lastField]; v != nil { + args.last = v.(*int) + } + if v := rv[afterField]; v != nil { + args.after = v.(*Cursor) + } + if v := rv[beforeField]; v != nil { + args.before = v.(*Cursor) + } + return args } // CollectFields tells the query-builder to eagerly load connected nodes by resolver context. -func (fd *FileDownloadQuery) CollectFields(ctx context.Context, satisfies ...string) *FileDownloadQuery { - if fc := graphql.GetFieldContext(ctx); fc != nil { - fd = fd.collectField(graphql.GetOperationContext(ctx), fc.Field, satisfies...) +func (fd *FileDownloadQuery) CollectFields(ctx context.Context, satisfies ...string) (*FileDownloadQuery, error) { + fc := graphql.GetFieldContext(ctx) + if fc == nil { + return fd, nil } - return fd + if err := fd.collectField(ctx, graphql.GetOperationContext(ctx), fc.Field, nil, satisfies...); err != nil { + return nil, err + } + return fd, nil } -func (fd *FileDownloadQuery) collectField(ctx *graphql.OperationContext, field graphql.CollectedField, satisfies ...string) *FileDownloadQuery { - return fd +func (fd *FileDownloadQuery) collectField(ctx context.Context, opCtx *graphql.OperationContext, collected graphql.CollectedField, path []string, satisfies ...string) error { + path = append([]string(nil), path...) + var ( + unknownSeen bool + fieldSeen = make(map[string]struct{}, len(filedownload.Columns)) + selectedFields = []string{filedownload.FieldID} + ) + for _, field := range graphql.CollectFields(opCtx, collected.Selections, satisfies) { + switch field.Name { + case "filedownloadtoenvironment": + var ( + alias = field.Alias + path = append(path, alias) + query = (&EnvironmentClient{config: fd.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + fd.withFileDownloadToEnvironment = query + case "hclID": + if _, ok := fieldSeen[filedownload.FieldHCLID]; !ok { + selectedFields = append(selectedFields, filedownload.FieldHCLID) + fieldSeen[filedownload.FieldHCLID] = struct{}{} + } + case "sourceType": + if _, ok := fieldSeen[filedownload.FieldSourceType]; !ok { + selectedFields = append(selectedFields, filedownload.FieldSourceType) + fieldSeen[filedownload.FieldSourceType] = struct{}{} + } + case "source": + if _, ok := fieldSeen[filedownload.FieldSource]; !ok { + selectedFields = append(selectedFields, filedownload.FieldSource) + fieldSeen[filedownload.FieldSource] = struct{}{} + } + case "destination": + if _, ok := fieldSeen[filedownload.FieldDestination]; !ok { + selectedFields = append(selectedFields, filedownload.FieldDestination) + fieldSeen[filedownload.FieldDestination] = struct{}{} + } + case "template": + if _, ok := fieldSeen[filedownload.FieldTemplate]; !ok { + selectedFields = append(selectedFields, filedownload.FieldTemplate) + fieldSeen[filedownload.FieldTemplate] = struct{}{} + } + case "perms": + if _, ok := fieldSeen[filedownload.FieldPerms]; !ok { + selectedFields = append(selectedFields, filedownload.FieldPerms) + fieldSeen[filedownload.FieldPerms] = struct{}{} + } + case "disabled": + if _, ok := fieldSeen[filedownload.FieldDisabled]; !ok { + selectedFields = append(selectedFields, filedownload.FieldDisabled) + fieldSeen[filedownload.FieldDisabled] = struct{}{} + } + case "md5": + if _, ok := fieldSeen[filedownload.FieldMd5]; !ok { + selectedFields = append(selectedFields, filedownload.FieldMd5) + fieldSeen[filedownload.FieldMd5] = struct{}{} + } + case "absPath": + if _, ok := fieldSeen[filedownload.FieldAbsPath]; !ok { + selectedFields = append(selectedFields, filedownload.FieldAbsPath) + fieldSeen[filedownload.FieldAbsPath] = struct{}{} + } + case "isTxt": + if _, ok := fieldSeen[filedownload.FieldIsTxt]; !ok { + selectedFields = append(selectedFields, filedownload.FieldIsTxt) + fieldSeen[filedownload.FieldIsTxt] = struct{}{} + } + case "tags": + if _, ok := fieldSeen[filedownload.FieldTags]; !ok { + selectedFields = append(selectedFields, filedownload.FieldTags) + fieldSeen[filedownload.FieldTags] = struct{}{} + } + case "id": + case "__typename": + default: + unknownSeen = true + } + } + if !unknownSeen { + fd.Select(selectedFields...) + } + return nil +} + +type filedownloadPaginateArgs struct { + first, last *int + after, before *Cursor + opts []FileDownloadPaginateOption +} + +func newFileDownloadPaginateArgs(rv map[string]any) *filedownloadPaginateArgs { + args := &filedownloadPaginateArgs{} + if rv == nil { + return args + } + if v := rv[firstField]; v != nil { + args.first = v.(*int) + } + if v := rv[lastField]; v != nil { + args.last = v.(*int) + } + if v := rv[afterField]; v != nil { + args.after = v.(*Cursor) + } + if v := rv[beforeField]; v != nil { + args.before = v.(*Cursor) + } + return args } // CollectFields tells the query-builder to eagerly load connected nodes by resolver context. -func (fe *FileExtractQuery) CollectFields(ctx context.Context, satisfies ...string) *FileExtractQuery { - if fc := graphql.GetFieldContext(ctx); fc != nil { - fe = fe.collectField(graphql.GetOperationContext(ctx), fc.Field, satisfies...) +func (fe *FileExtractQuery) CollectFields(ctx context.Context, satisfies ...string) (*FileExtractQuery, error) { + fc := graphql.GetFieldContext(ctx) + if fc == nil { + return fe, nil + } + if err := fe.collectField(ctx, graphql.GetOperationContext(ctx), fc.Field, nil, satisfies...); err != nil { + return nil, err } - return fe + return fe, nil } -func (fe *FileExtractQuery) collectField(ctx *graphql.OperationContext, field graphql.CollectedField, satisfies ...string) *FileExtractQuery { - return fe +func (fe *FileExtractQuery) collectField(ctx context.Context, opCtx *graphql.OperationContext, collected graphql.CollectedField, path []string, satisfies ...string) error { + path = append([]string(nil), path...) + var ( + unknownSeen bool + fieldSeen = make(map[string]struct{}, len(fileextract.Columns)) + selectedFields = []string{fileextract.FieldID} + ) + for _, field := range graphql.CollectFields(opCtx, collected.Selections, satisfies) { + switch field.Name { + case "fileextracttoenvironment": + var ( + alias = field.Alias + path = append(path, alias) + query = (&EnvironmentClient{config: fe.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + fe.withFileExtractToEnvironment = query + case "hclID": + if _, ok := fieldSeen[fileextract.FieldHCLID]; !ok { + selectedFields = append(selectedFields, fileextract.FieldHCLID) + fieldSeen[fileextract.FieldHCLID] = struct{}{} + } + case "source": + if _, ok := fieldSeen[fileextract.FieldSource]; !ok { + selectedFields = append(selectedFields, fileextract.FieldSource) + fieldSeen[fileextract.FieldSource] = struct{}{} + } + case "destination": + if _, ok := fieldSeen[fileextract.FieldDestination]; !ok { + selectedFields = append(selectedFields, fileextract.FieldDestination) + fieldSeen[fileextract.FieldDestination] = struct{}{} + } + case "type": + if _, ok := fieldSeen[fileextract.FieldType]; !ok { + selectedFields = append(selectedFields, fileextract.FieldType) + fieldSeen[fileextract.FieldType] = struct{}{} + } + case "tags": + if _, ok := fieldSeen[fileextract.FieldTags]; !ok { + selectedFields = append(selectedFields, fileextract.FieldTags) + fieldSeen[fileextract.FieldTags] = struct{}{} + } + case "id": + case "__typename": + default: + unknownSeen = true + } + } + if !unknownSeen { + fe.Select(selectedFields...) + } + return nil +} + +type fileextractPaginateArgs struct { + first, last *int + after, before *Cursor + opts []FileExtractPaginateOption +} + +func newFileExtractPaginateArgs(rv map[string]any) *fileextractPaginateArgs { + args := &fileextractPaginateArgs{} + if rv == nil { + return args + } + if v := rv[firstField]; v != nil { + args.first = v.(*int) + } + if v := rv[lastField]; v != nil { + args.last = v.(*int) + } + if v := rv[afterField]; v != nil { + args.after = v.(*Cursor) + } + if v := rv[beforeField]; v != nil { + args.before = v.(*Cursor) + } + return args } // CollectFields tells the query-builder to eagerly load connected nodes by resolver context. -func (f *FindingQuery) CollectFields(ctx context.Context, satisfies ...string) *FindingQuery { - if fc := graphql.GetFieldContext(ctx); fc != nil { - f = f.collectField(graphql.GetOperationContext(ctx), fc.Field, satisfies...) +func (f *FindingQuery) CollectFields(ctx context.Context, satisfies ...string) (*FindingQuery, error) { + fc := graphql.GetFieldContext(ctx) + if fc == nil { + return f, nil + } + if err := f.collectField(ctx, graphql.GetOperationContext(ctx), fc.Field, nil, satisfies...); err != nil { + return nil, err + } + return f, nil +} + +func (f *FindingQuery) collectField(ctx context.Context, opCtx *graphql.OperationContext, collected graphql.CollectedField, path []string, satisfies ...string) error { + path = append([]string(nil), path...) + var ( + unknownSeen bool + fieldSeen = make(map[string]struct{}, len(finding.Columns)) + selectedFields = []string{finding.FieldID} + ) + for _, field := range graphql.CollectFields(opCtx, collected.Selections, satisfies) { + switch field.Name { + case "findingtouser": + var ( + alias = field.Alias + path = append(path, alias) + query = (&UserClient{config: f.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + f.WithNamedFindingToUser(alias, func(wq *UserQuery) { + *wq = *query + }) + case "findingtohost": + var ( + alias = field.Alias + path = append(path, alias) + query = (&HostClient{config: f.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + f.withFindingToHost = query + case "findingtoscript": + var ( + alias = field.Alias + path = append(path, alias) + query = (&ScriptClient{config: f.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + f.withFindingToScript = query + case "findingtoenvironment": + var ( + alias = field.Alias + path = append(path, alias) + query = (&EnvironmentClient{config: f.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + f.withFindingToEnvironment = query + case "name": + if _, ok := fieldSeen[finding.FieldName]; !ok { + selectedFields = append(selectedFields, finding.FieldName) + fieldSeen[finding.FieldName] = struct{}{} + } + case "description": + if _, ok := fieldSeen[finding.FieldDescription]; !ok { + selectedFields = append(selectedFields, finding.FieldDescription) + fieldSeen[finding.FieldDescription] = struct{}{} + } + case "severity": + if _, ok := fieldSeen[finding.FieldSeverity]; !ok { + selectedFields = append(selectedFields, finding.FieldSeverity) + fieldSeen[finding.FieldSeverity] = struct{}{} + } + case "difficulty": + if _, ok := fieldSeen[finding.FieldDifficulty]; !ok { + selectedFields = append(selectedFields, finding.FieldDifficulty) + fieldSeen[finding.FieldDifficulty] = struct{}{} + } + case "tags": + if _, ok := fieldSeen[finding.FieldTags]; !ok { + selectedFields = append(selectedFields, finding.FieldTags) + fieldSeen[finding.FieldTags] = struct{}{} + } + case "id": + case "__typename": + default: + unknownSeen = true + } + } + if !unknownSeen { + f.Select(selectedFields...) } - return f + return nil } -func (f *FindingQuery) collectField(ctx *graphql.OperationContext, field graphql.CollectedField, satisfies ...string) *FindingQuery { - return f +type findingPaginateArgs struct { + first, last *int + after, before *Cursor + opts []FindingPaginateOption +} + +func newFindingPaginateArgs(rv map[string]any) *findingPaginateArgs { + args := &findingPaginateArgs{} + if rv == nil { + return args + } + if v := rv[firstField]; v != nil { + args.first = v.(*int) + } + if v := rv[lastField]; v != nil { + args.last = v.(*int) + } + if v := rv[afterField]; v != nil { + args.after = v.(*Cursor) + } + if v := rv[beforeField]; v != nil { + args.before = v.(*Cursor) + } + return args } // CollectFields tells the query-builder to eagerly load connected nodes by resolver context. -func (gfm *GinFileMiddlewareQuery) CollectFields(ctx context.Context, satisfies ...string) *GinFileMiddlewareQuery { - if fc := graphql.GetFieldContext(ctx); fc != nil { - gfm = gfm.collectField(graphql.GetOperationContext(ctx), fc.Field, satisfies...) +func (gfm *GinFileMiddlewareQuery) CollectFields(ctx context.Context, satisfies ...string) (*GinFileMiddlewareQuery, error) { + fc := graphql.GetFieldContext(ctx) + if fc == nil { + return gfm, nil } - return gfm + if err := gfm.collectField(ctx, graphql.GetOperationContext(ctx), fc.Field, nil, satisfies...); err != nil { + return nil, err + } + return gfm, nil +} + +func (gfm *GinFileMiddlewareQuery) collectField(ctx context.Context, opCtx *graphql.OperationContext, collected graphql.CollectedField, path []string, satisfies ...string) error { + path = append([]string(nil), path...) + var ( + unknownSeen bool + fieldSeen = make(map[string]struct{}, len(ginfilemiddleware.Columns)) + selectedFields = []string{ginfilemiddleware.FieldID} + ) + for _, field := range graphql.CollectFields(opCtx, collected.Selections, satisfies) { + switch field.Name { + case "ginfilemiddlewaretoprovisionedhost": + var ( + alias = field.Alias + path = append(path, alias) + query = (&ProvisionedHostClient{config: gfm.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + gfm.withGinFileMiddlewareToProvisionedHost = query + case "ginfilemiddlewaretoprovisioningstep": + var ( + alias = field.Alias + path = append(path, alias) + query = (&ProvisioningStepClient{config: gfm.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + gfm.withGinFileMiddlewareToProvisioningStep = query + case "urlID": + if _, ok := fieldSeen[ginfilemiddleware.FieldURLID]; !ok { + selectedFields = append(selectedFields, ginfilemiddleware.FieldURLID) + fieldSeen[ginfilemiddleware.FieldURLID] = struct{}{} + } + case "filePath": + if _, ok := fieldSeen[ginfilemiddleware.FieldFilePath]; !ok { + selectedFields = append(selectedFields, ginfilemiddleware.FieldFilePath) + fieldSeen[ginfilemiddleware.FieldFilePath] = struct{}{} + } + case "accessed": + if _, ok := fieldSeen[ginfilemiddleware.FieldAccessed]; !ok { + selectedFields = append(selectedFields, ginfilemiddleware.FieldAccessed) + fieldSeen[ginfilemiddleware.FieldAccessed] = struct{}{} + } + case "id": + case "__typename": + default: + unknownSeen = true + } + } + if !unknownSeen { + gfm.Select(selectedFields...) + } + return nil +} + +type ginfilemiddlewarePaginateArgs struct { + first, last *int + after, before *Cursor + opts []GinFileMiddlewarePaginateOption } -func (gfm *GinFileMiddlewareQuery) collectField(ctx *graphql.OperationContext, field graphql.CollectedField, satisfies ...string) *GinFileMiddlewareQuery { - return gfm +func newGinFileMiddlewarePaginateArgs(rv map[string]any) *ginfilemiddlewarePaginateArgs { + args := &ginfilemiddlewarePaginateArgs{} + if rv == nil { + return args + } + if v := rv[firstField]; v != nil { + args.first = v.(*int) + } + if v := rv[lastField]; v != nil { + args.last = v.(*int) + } + if v := rv[afterField]; v != nil { + args.after = v.(*Cursor) + } + if v := rv[beforeField]; v != nil { + args.before = v.(*Cursor) + } + return args } // CollectFields tells the query-builder to eagerly load connected nodes by resolver context. -func (h *HostQuery) CollectFields(ctx context.Context, satisfies ...string) *HostQuery { - if fc := graphql.GetFieldContext(ctx); fc != nil { - h = h.collectField(graphql.GetOperationContext(ctx), fc.Field, satisfies...) +func (h *HostQuery) CollectFields(ctx context.Context, satisfies ...string) (*HostQuery, error) { + fc := graphql.GetFieldContext(ctx) + if fc == nil { + return h, nil + } + if err := h.collectField(ctx, graphql.GetOperationContext(ctx), fc.Field, nil, satisfies...); err != nil { + return nil, err + } + return h, nil +} + +func (h *HostQuery) collectField(ctx context.Context, opCtx *graphql.OperationContext, collected graphql.CollectedField, path []string, satisfies ...string) error { + path = append([]string(nil), path...) + var ( + unknownSeen bool + fieldSeen = make(map[string]struct{}, len(host.Columns)) + selectedFields = []string{host.FieldID} + ) + for _, field := range graphql.CollectFields(opCtx, collected.Selections, satisfies) { + switch field.Name { + case "hosttodisk": + var ( + alias = field.Alias + path = append(path, alias) + query = (&DiskClient{config: h.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + h.withHostToDisk = query + case "hosttouser": + var ( + alias = field.Alias + path = append(path, alias) + query = (&UserClient{config: h.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + h.WithNamedHostToUser(alias, func(wq *UserQuery) { + *wq = *query + }) + case "hosttoenvironment": + var ( + alias = field.Alias + path = append(path, alias) + query = (&EnvironmentClient{config: h.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + h.withHostToEnvironment = query + case "hosttoincludednetwork": + var ( + alias = field.Alias + path = append(path, alias) + query = (&IncludedNetworkClient{config: h.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + h.WithNamedHostToIncludedNetwork(alias, func(wq *IncludedNetworkQuery) { + *wq = *query + }) + case "dependonhosttohostdependency": + var ( + alias = field.Alias + path = append(path, alias) + query = (&HostDependencyClient{config: h.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + h.WithNamedDependOnHostToHostDependency(alias, func(wq *HostDependencyQuery) { + *wq = *query + }) + case "dependbyhosttohostdependency": + var ( + alias = field.Alias + path = append(path, alias) + query = (&HostDependencyClient{config: h.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + h.WithNamedDependByHostToHostDependency(alias, func(wq *HostDependencyQuery) { + *wq = *query + }) + case "hclID": + if _, ok := fieldSeen[host.FieldHCLID]; !ok { + selectedFields = append(selectedFields, host.FieldHCLID) + fieldSeen[host.FieldHCLID] = struct{}{} + } + case "hostname": + if _, ok := fieldSeen[host.FieldHostname]; !ok { + selectedFields = append(selectedFields, host.FieldHostname) + fieldSeen[host.FieldHostname] = struct{}{} + } + case "description": + if _, ok := fieldSeen[host.FieldDescription]; !ok { + selectedFields = append(selectedFields, host.FieldDescription) + fieldSeen[host.FieldDescription] = struct{}{} + } + case "os": + if _, ok := fieldSeen[host.FieldOS]; !ok { + selectedFields = append(selectedFields, host.FieldOS) + fieldSeen[host.FieldOS] = struct{}{} + } + case "lastOctet": + if _, ok := fieldSeen[host.FieldLastOctet]; !ok { + selectedFields = append(selectedFields, host.FieldLastOctet) + fieldSeen[host.FieldLastOctet] = struct{}{} + } + case "instanceSize": + if _, ok := fieldSeen[host.FieldInstanceSize]; !ok { + selectedFields = append(selectedFields, host.FieldInstanceSize) + fieldSeen[host.FieldInstanceSize] = struct{}{} + } + case "allowMACChanges": + if _, ok := fieldSeen[host.FieldAllowMACChanges]; !ok { + selectedFields = append(selectedFields, host.FieldAllowMACChanges) + fieldSeen[host.FieldAllowMACChanges] = struct{}{} + } + case "exposedTCPPorts": + if _, ok := fieldSeen[host.FieldExposedTCPPorts]; !ok { + selectedFields = append(selectedFields, host.FieldExposedTCPPorts) + fieldSeen[host.FieldExposedTCPPorts] = struct{}{} + } + case "exposedUDPPorts": + if _, ok := fieldSeen[host.FieldExposedUDPPorts]; !ok { + selectedFields = append(selectedFields, host.FieldExposedUDPPorts) + fieldSeen[host.FieldExposedUDPPorts] = struct{}{} + } + case "overridePassword": + if _, ok := fieldSeen[host.FieldOverridePassword]; !ok { + selectedFields = append(selectedFields, host.FieldOverridePassword) + fieldSeen[host.FieldOverridePassword] = struct{}{} + } + case "vars": + if _, ok := fieldSeen[host.FieldVars]; !ok { + selectedFields = append(selectedFields, host.FieldVars) + fieldSeen[host.FieldVars] = struct{}{} + } + case "userGroups": + if _, ok := fieldSeen[host.FieldUserGroups]; !ok { + selectedFields = append(selectedFields, host.FieldUserGroups) + fieldSeen[host.FieldUserGroups] = struct{}{} + } + case "provisionSteps": + if _, ok := fieldSeen[host.FieldProvisionSteps]; !ok { + selectedFields = append(selectedFields, host.FieldProvisionSteps) + fieldSeen[host.FieldProvisionSteps] = struct{}{} + } + case "tags": + if _, ok := fieldSeen[host.FieldTags]; !ok { + selectedFields = append(selectedFields, host.FieldTags) + fieldSeen[host.FieldTags] = struct{}{} + } + case "id": + case "__typename": + default: + unknownSeen = true + } } - return h + if !unknownSeen { + h.Select(selectedFields...) + } + return nil } -func (h *HostQuery) collectField(ctx *graphql.OperationContext, field graphql.CollectedField, satisfies ...string) *HostQuery { - return h +type hostPaginateArgs struct { + first, last *int + after, before *Cursor + opts []HostPaginateOption +} + +func newHostPaginateArgs(rv map[string]any) *hostPaginateArgs { + args := &hostPaginateArgs{} + if rv == nil { + return args + } + if v := rv[firstField]; v != nil { + args.first = v.(*int) + } + if v := rv[lastField]; v != nil { + args.last = v.(*int) + } + if v := rv[afterField]; v != nil { + args.after = v.(*Cursor) + } + if v := rv[beforeField]; v != nil { + args.before = v.(*Cursor) + } + return args } // CollectFields tells the query-builder to eagerly load connected nodes by resolver context. -func (hd *HostDependencyQuery) CollectFields(ctx context.Context, satisfies ...string) *HostDependencyQuery { - if fc := graphql.GetFieldContext(ctx); fc != nil { - hd = hd.collectField(graphql.GetOperationContext(ctx), fc.Field, satisfies...) +func (hd *HostDependencyQuery) CollectFields(ctx context.Context, satisfies ...string) (*HostDependencyQuery, error) { + fc := graphql.GetFieldContext(ctx) + if fc == nil { + return hd, nil + } + if err := hd.collectField(ctx, graphql.GetOperationContext(ctx), fc.Field, nil, satisfies...); err != nil { + return nil, err } - return hd + return hd, nil } -func (hd *HostDependencyQuery) collectField(ctx *graphql.OperationContext, field graphql.CollectedField, satisfies ...string) *HostDependencyQuery { - return hd +func (hd *HostDependencyQuery) collectField(ctx context.Context, opCtx *graphql.OperationContext, collected graphql.CollectedField, path []string, satisfies ...string) error { + path = append([]string(nil), path...) + var ( + unknownSeen bool + fieldSeen = make(map[string]struct{}, len(hostdependency.Columns)) + selectedFields = []string{hostdependency.FieldID} + ) + for _, field := range graphql.CollectFields(opCtx, collected.Selections, satisfies) { + switch field.Name { + case "hostdependencytodependonhost": + var ( + alias = field.Alias + path = append(path, alias) + query = (&HostClient{config: hd.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + hd.withHostDependencyToDependOnHost = query + case "hostdependencytodependbyhost": + var ( + alias = field.Alias + path = append(path, alias) + query = (&HostClient{config: hd.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + hd.withHostDependencyToDependByHost = query + case "hostdependencytonetwork": + var ( + alias = field.Alias + path = append(path, alias) + query = (&NetworkClient{config: hd.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + hd.withHostDependencyToNetwork = query + case "hostdependencytoenvironment": + var ( + alias = field.Alias + path = append(path, alias) + query = (&EnvironmentClient{config: hd.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + hd.withHostDependencyToEnvironment = query + case "hostID": + if _, ok := fieldSeen[hostdependency.FieldHostID]; !ok { + selectedFields = append(selectedFields, hostdependency.FieldHostID) + fieldSeen[hostdependency.FieldHostID] = struct{}{} + } + case "networkID": + if _, ok := fieldSeen[hostdependency.FieldNetworkID]; !ok { + selectedFields = append(selectedFields, hostdependency.FieldNetworkID) + fieldSeen[hostdependency.FieldNetworkID] = struct{}{} + } + case "id": + case "__typename": + default: + unknownSeen = true + } + } + if !unknownSeen { + hd.Select(selectedFields...) + } + return nil +} + +type hostdependencyPaginateArgs struct { + first, last *int + after, before *Cursor + opts []HostDependencyPaginateOption +} + +func newHostDependencyPaginateArgs(rv map[string]any) *hostdependencyPaginateArgs { + args := &hostdependencyPaginateArgs{} + if rv == nil { + return args + } + if v := rv[firstField]; v != nil { + args.first = v.(*int) + } + if v := rv[lastField]; v != nil { + args.last = v.(*int) + } + if v := rv[afterField]; v != nil { + args.after = v.(*Cursor) + } + if v := rv[beforeField]; v != nil { + args.before = v.(*Cursor) + } + return args } // CollectFields tells the query-builder to eagerly load connected nodes by resolver context. -func (i *IdentityQuery) CollectFields(ctx context.Context, satisfies ...string) *IdentityQuery { - if fc := graphql.GetFieldContext(ctx); fc != nil { - i = i.collectField(graphql.GetOperationContext(ctx), fc.Field, satisfies...) +func (i *IdentityQuery) CollectFields(ctx context.Context, satisfies ...string) (*IdentityQuery, error) { + fc := graphql.GetFieldContext(ctx) + if fc == nil { + return i, nil } - return i + if err := i.collectField(ctx, graphql.GetOperationContext(ctx), fc.Field, nil, satisfies...); err != nil { + return nil, err + } + return i, nil } -func (i *IdentityQuery) collectField(ctx *graphql.OperationContext, field graphql.CollectedField, satisfies ...string) *IdentityQuery { - return i +func (i *IdentityQuery) collectField(ctx context.Context, opCtx *graphql.OperationContext, collected graphql.CollectedField, path []string, satisfies ...string) error { + path = append([]string(nil), path...) + var ( + unknownSeen bool + fieldSeen = make(map[string]struct{}, len(identity.Columns)) + selectedFields = []string{identity.FieldID} + ) + for _, field := range graphql.CollectFields(opCtx, collected.Selections, satisfies) { + switch field.Name { + case "identitytoenvironment": + var ( + alias = field.Alias + path = append(path, alias) + query = (&EnvironmentClient{config: i.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + i.withIdentityToEnvironment = query + case "hclID": + if _, ok := fieldSeen[identity.FieldHCLID]; !ok { + selectedFields = append(selectedFields, identity.FieldHCLID) + fieldSeen[identity.FieldHCLID] = struct{}{} + } + case "firstName": + if _, ok := fieldSeen[identity.FieldFirstName]; !ok { + selectedFields = append(selectedFields, identity.FieldFirstName) + fieldSeen[identity.FieldFirstName] = struct{}{} + } + case "lastName": + if _, ok := fieldSeen[identity.FieldLastName]; !ok { + selectedFields = append(selectedFields, identity.FieldLastName) + fieldSeen[identity.FieldLastName] = struct{}{} + } + case "email": + if _, ok := fieldSeen[identity.FieldEmail]; !ok { + selectedFields = append(selectedFields, identity.FieldEmail) + fieldSeen[identity.FieldEmail] = struct{}{} + } + case "password": + if _, ok := fieldSeen[identity.FieldPassword]; !ok { + selectedFields = append(selectedFields, identity.FieldPassword) + fieldSeen[identity.FieldPassword] = struct{}{} + } + case "description": + if _, ok := fieldSeen[identity.FieldDescription]; !ok { + selectedFields = append(selectedFields, identity.FieldDescription) + fieldSeen[identity.FieldDescription] = struct{}{} + } + case "avatarFile": + if _, ok := fieldSeen[identity.FieldAvatarFile]; !ok { + selectedFields = append(selectedFields, identity.FieldAvatarFile) + fieldSeen[identity.FieldAvatarFile] = struct{}{} + } + case "vars": + if _, ok := fieldSeen[identity.FieldVars]; !ok { + selectedFields = append(selectedFields, identity.FieldVars) + fieldSeen[identity.FieldVars] = struct{}{} + } + case "tags": + if _, ok := fieldSeen[identity.FieldTags]; !ok { + selectedFields = append(selectedFields, identity.FieldTags) + fieldSeen[identity.FieldTags] = struct{}{} + } + case "id": + case "__typename": + default: + unknownSeen = true + } + } + if !unknownSeen { + i.Select(selectedFields...) + } + return nil +} + +type identityPaginateArgs struct { + first, last *int + after, before *Cursor + opts []IdentityPaginateOption +} + +func newIdentityPaginateArgs(rv map[string]any) *identityPaginateArgs { + args := &identityPaginateArgs{} + if rv == nil { + return args + } + if v := rv[firstField]; v != nil { + args.first = v.(*int) + } + if v := rv[lastField]; v != nil { + args.last = v.(*int) + } + if v := rv[afterField]; v != nil { + args.after = v.(*Cursor) + } + if v := rv[beforeField]; v != nil { + args.before = v.(*Cursor) + } + return args } // CollectFields tells the query-builder to eagerly load connected nodes by resolver context. -func (in *IncludedNetworkQuery) CollectFields(ctx context.Context, satisfies ...string) *IncludedNetworkQuery { - if fc := graphql.GetFieldContext(ctx); fc != nil { - in = in.collectField(graphql.GetOperationContext(ctx), fc.Field, satisfies...) +func (in *IncludedNetworkQuery) CollectFields(ctx context.Context, satisfies ...string) (*IncludedNetworkQuery, error) { + fc := graphql.GetFieldContext(ctx) + if fc == nil { + return in, nil + } + if err := in.collectField(ctx, graphql.GetOperationContext(ctx), fc.Field, nil, satisfies...); err != nil { + return nil, err + } + return in, nil +} + +func (in *IncludedNetworkQuery) collectField(ctx context.Context, opCtx *graphql.OperationContext, collected graphql.CollectedField, path []string, satisfies ...string) error { + path = append([]string(nil), path...) + var ( + unknownSeen bool + fieldSeen = make(map[string]struct{}, len(includednetwork.Columns)) + selectedFields = []string{includednetwork.FieldID} + ) + for _, field := range graphql.CollectFields(opCtx, collected.Selections, satisfies) { + switch field.Name { + case "includednetworktotag": + var ( + alias = field.Alias + path = append(path, alias) + query = (&TagClient{config: in.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + in.WithNamedIncludedNetworkToTag(alias, func(wq *TagQuery) { + *wq = *query + }) + case "includednetworktohost": + var ( + alias = field.Alias + path = append(path, alias) + query = (&HostClient{config: in.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + in.WithNamedIncludedNetworkToHost(alias, func(wq *HostQuery) { + *wq = *query + }) + case "includednetworktonetwork": + var ( + alias = field.Alias + path = append(path, alias) + query = (&NetworkClient{config: in.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + in.withIncludedNetworkToNetwork = query + case "includednetworktoenvironment": + var ( + alias = field.Alias + path = append(path, alias) + query = (&EnvironmentClient{config: in.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + in.WithNamedIncludedNetworkToEnvironment(alias, func(wq *EnvironmentQuery) { + *wq = *query + }) + case "name": + if _, ok := fieldSeen[includednetwork.FieldName]; !ok { + selectedFields = append(selectedFields, includednetwork.FieldName) + fieldSeen[includednetwork.FieldName] = struct{}{} + } + case "hosts": + if _, ok := fieldSeen[includednetwork.FieldHosts]; !ok { + selectedFields = append(selectedFields, includednetwork.FieldHosts) + fieldSeen[includednetwork.FieldHosts] = struct{}{} + } + case "id": + case "__typename": + default: + unknownSeen = true + } } - return in + if !unknownSeen { + in.Select(selectedFields...) + } + return nil } -func (in *IncludedNetworkQuery) collectField(ctx *graphql.OperationContext, field graphql.CollectedField, satisfies ...string) *IncludedNetworkQuery { - return in +type includednetworkPaginateArgs struct { + first, last *int + after, before *Cursor + opts []IncludedNetworkPaginateOption +} + +func newIncludedNetworkPaginateArgs(rv map[string]any) *includednetworkPaginateArgs { + args := &includednetworkPaginateArgs{} + if rv == nil { + return args + } + if v := rv[firstField]; v != nil { + args.first = v.(*int) + } + if v := rv[lastField]; v != nil { + args.last = v.(*int) + } + if v := rv[afterField]; v != nil { + args.after = v.(*Cursor) + } + if v := rv[beforeField]; v != nil { + args.before = v.(*Cursor) + } + return args } // CollectFields tells the query-builder to eagerly load connected nodes by resolver context. -func (n *NetworkQuery) CollectFields(ctx context.Context, satisfies ...string) *NetworkQuery { - if fc := graphql.GetFieldContext(ctx); fc != nil { - n = n.collectField(graphql.GetOperationContext(ctx), fc.Field, satisfies...) +func (n *NetworkQuery) CollectFields(ctx context.Context, satisfies ...string) (*NetworkQuery, error) { + fc := graphql.GetFieldContext(ctx) + if fc == nil { + return n, nil + } + if err := n.collectField(ctx, graphql.GetOperationContext(ctx), fc.Field, nil, satisfies...); err != nil { + return nil, err + } + return n, nil +} + +func (n *NetworkQuery) collectField(ctx context.Context, opCtx *graphql.OperationContext, collected graphql.CollectedField, path []string, satisfies ...string) error { + path = append([]string(nil), path...) + var ( + unknownSeen bool + fieldSeen = make(map[string]struct{}, len(network.Columns)) + selectedFields = []string{network.FieldID} + ) + for _, field := range graphql.CollectFields(opCtx, collected.Selections, satisfies) { + switch field.Name { + case "networktoenvironment": + var ( + alias = field.Alias + path = append(path, alias) + query = (&EnvironmentClient{config: n.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + n.withNetworkToEnvironment = query + case "networktohostdependency": + var ( + alias = field.Alias + path = append(path, alias) + query = (&HostDependencyClient{config: n.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + n.WithNamedNetworkToHostDependency(alias, func(wq *HostDependencyQuery) { + *wq = *query + }) + case "networktoincludednetwork": + var ( + alias = field.Alias + path = append(path, alias) + query = (&IncludedNetworkClient{config: n.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + n.WithNamedNetworkToIncludedNetwork(alias, func(wq *IncludedNetworkQuery) { + *wq = *query + }) + case "hclID": + if _, ok := fieldSeen[network.FieldHCLID]; !ok { + selectedFields = append(selectedFields, network.FieldHCLID) + fieldSeen[network.FieldHCLID] = struct{}{} + } + case "name": + if _, ok := fieldSeen[network.FieldName]; !ok { + selectedFields = append(selectedFields, network.FieldName) + fieldSeen[network.FieldName] = struct{}{} + } + case "cidr": + if _, ok := fieldSeen[network.FieldCidr]; !ok { + selectedFields = append(selectedFields, network.FieldCidr) + fieldSeen[network.FieldCidr] = struct{}{} + } + case "vdiVisible": + if _, ok := fieldSeen[network.FieldVdiVisible]; !ok { + selectedFields = append(selectedFields, network.FieldVdiVisible) + fieldSeen[network.FieldVdiVisible] = struct{}{} + } + case "vars": + if _, ok := fieldSeen[network.FieldVars]; !ok { + selectedFields = append(selectedFields, network.FieldVars) + fieldSeen[network.FieldVars] = struct{}{} + } + case "tags": + if _, ok := fieldSeen[network.FieldTags]; !ok { + selectedFields = append(selectedFields, network.FieldTags) + fieldSeen[network.FieldTags] = struct{}{} + } + case "id": + case "__typename": + default: + unknownSeen = true + } + } + if !unknownSeen { + n.Select(selectedFields...) } - return n + return nil } -func (n *NetworkQuery) collectField(ctx *graphql.OperationContext, field graphql.CollectedField, satisfies ...string) *NetworkQuery { - return n +type networkPaginateArgs struct { + first, last *int + after, before *Cursor + opts []NetworkPaginateOption +} + +func newNetworkPaginateArgs(rv map[string]any) *networkPaginateArgs { + args := &networkPaginateArgs{} + if rv == nil { + return args + } + if v := rv[firstField]; v != nil { + args.first = v.(*int) + } + if v := rv[lastField]; v != nil { + args.last = v.(*int) + } + if v := rv[afterField]; v != nil { + args.after = v.(*Cursor) + } + if v := rv[beforeField]; v != nil { + args.before = v.(*Cursor) + } + return args } // CollectFields tells the query-builder to eagerly load connected nodes by resolver context. -func (pl *PlanQuery) CollectFields(ctx context.Context, satisfies ...string) *PlanQuery { - if fc := graphql.GetFieldContext(ctx); fc != nil { - pl = pl.collectField(graphql.GetOperationContext(ctx), fc.Field, satisfies...) +func (pl *PlanQuery) CollectFields(ctx context.Context, satisfies ...string) (*PlanQuery, error) { + fc := graphql.GetFieldContext(ctx) + if fc == nil { + return pl, nil } - return pl + if err := pl.collectField(ctx, graphql.GetOperationContext(ctx), fc.Field, nil, satisfies...); err != nil { + return nil, err + } + return pl, nil +} + +func (pl *PlanQuery) collectField(ctx context.Context, opCtx *graphql.OperationContext, collected graphql.CollectedField, path []string, satisfies ...string) error { + path = append([]string(nil), path...) + var ( + unknownSeen bool + fieldSeen = make(map[string]struct{}, len(plan.Columns)) + selectedFields = []string{plan.FieldID} + ) + for _, field := range graphql.CollectFields(opCtx, collected.Selections, satisfies) { + switch field.Name { + case "prevplan": + var ( + alias = field.Alias + path = append(path, alias) + query = (&PlanClient{config: pl.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + pl.WithNamedPrevPlan(alias, func(wq *PlanQuery) { + *wq = *query + }) + case "nextplan": + var ( + alias = field.Alias + path = append(path, alias) + query = (&PlanClient{config: pl.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + pl.WithNamedNextPlan(alias, func(wq *PlanQuery) { + *wq = *query + }) + case "plantobuild": + var ( + alias = field.Alias + path = append(path, alias) + query = (&BuildClient{config: pl.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + pl.withPlanToBuild = query + case "plantoteam": + var ( + alias = field.Alias + path = append(path, alias) + query = (&TeamClient{config: pl.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + pl.withPlanToTeam = query + case "plantoprovisionednetwork": + var ( + alias = field.Alias + path = append(path, alias) + query = (&ProvisionedNetworkClient{config: pl.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + pl.withPlanToProvisionedNetwork = query + case "plantoprovisionedhost": + var ( + alias = field.Alias + path = append(path, alias) + query = (&ProvisionedHostClient{config: pl.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + pl.withPlanToProvisionedHost = query + case "plantoprovisioningstep": + var ( + alias = field.Alias + path = append(path, alias) + query = (&ProvisioningStepClient{config: pl.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + pl.withPlanToProvisioningStep = query + case "plantostatus": + var ( + alias = field.Alias + path = append(path, alias) + query = (&StatusClient{config: pl.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + pl.withPlanToStatus = query + case "plantoplandiffs": + var ( + alias = field.Alias + path = append(path, alias) + query = (&PlanDiffClient{config: pl.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + pl.WithNamedPlanToPlanDiffs(alias, func(wq *PlanDiffQuery) { + *wq = *query + }) + case "stepNumber": + if _, ok := fieldSeen[plan.FieldStepNumber]; !ok { + selectedFields = append(selectedFields, plan.FieldStepNumber) + fieldSeen[plan.FieldStepNumber] = struct{}{} + } + case "type": + if _, ok := fieldSeen[plan.FieldType]; !ok { + selectedFields = append(selectedFields, plan.FieldType) + fieldSeen[plan.FieldType] = struct{}{} + } + case "buildID": + if _, ok := fieldSeen[plan.FieldBuildID]; !ok { + selectedFields = append(selectedFields, plan.FieldBuildID) + fieldSeen[plan.FieldBuildID] = struct{}{} + } + case "id": + case "__typename": + default: + unknownSeen = true + } + } + if !unknownSeen { + pl.Select(selectedFields...) + } + return nil +} + +type planPaginateArgs struct { + first, last *int + after, before *Cursor + opts []PlanPaginateOption } -func (pl *PlanQuery) collectField(ctx *graphql.OperationContext, field graphql.CollectedField, satisfies ...string) *PlanQuery { - return pl +func newPlanPaginateArgs(rv map[string]any) *planPaginateArgs { + args := &planPaginateArgs{} + if rv == nil { + return args + } + if v := rv[firstField]; v != nil { + args.first = v.(*int) + } + if v := rv[lastField]; v != nil { + args.last = v.(*int) + } + if v := rv[afterField]; v != nil { + args.after = v.(*Cursor) + } + if v := rv[beforeField]; v != nil { + args.before = v.(*Cursor) + } + return args } // CollectFields tells the query-builder to eagerly load connected nodes by resolver context. -func (pd *PlanDiffQuery) CollectFields(ctx context.Context, satisfies ...string) *PlanDiffQuery { - if fc := graphql.GetFieldContext(ctx); fc != nil { - pd = pd.collectField(graphql.GetOperationContext(ctx), fc.Field, satisfies...) +func (pd *PlanDiffQuery) CollectFields(ctx context.Context, satisfies ...string) (*PlanDiffQuery, error) { + fc := graphql.GetFieldContext(ctx) + if fc == nil { + return pd, nil + } + if err := pd.collectField(ctx, graphql.GetOperationContext(ctx), fc.Field, nil, satisfies...); err != nil { + return nil, err } - return pd + return pd, nil } -func (pd *PlanDiffQuery) collectField(ctx *graphql.OperationContext, field graphql.CollectedField, satisfies ...string) *PlanDiffQuery { - return pd +func (pd *PlanDiffQuery) collectField(ctx context.Context, opCtx *graphql.OperationContext, collected graphql.CollectedField, path []string, satisfies ...string) error { + path = append([]string(nil), path...) + var ( + unknownSeen bool + fieldSeen = make(map[string]struct{}, len(plandiff.Columns)) + selectedFields = []string{plandiff.FieldID} + ) + for _, field := range graphql.CollectFields(opCtx, collected.Selections, satisfies) { + switch field.Name { + case "plandifftobuildcommit": + var ( + alias = field.Alias + path = append(path, alias) + query = (&BuildCommitClient{config: pd.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + pd.withPlanDiffToBuildCommit = query + case "plandifftoplan": + var ( + alias = field.Alias + path = append(path, alias) + query = (&PlanClient{config: pd.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + pd.withPlanDiffToPlan = query + case "revision": + if _, ok := fieldSeen[plandiff.FieldRevision]; !ok { + selectedFields = append(selectedFields, plandiff.FieldRevision) + fieldSeen[plandiff.FieldRevision] = struct{}{} + } + case "newState": + if _, ok := fieldSeen[plandiff.FieldNewState]; !ok { + selectedFields = append(selectedFields, plandiff.FieldNewState) + fieldSeen[plandiff.FieldNewState] = struct{}{} + } + case "id": + case "__typename": + default: + unknownSeen = true + } + } + if !unknownSeen { + pd.Select(selectedFields...) + } + return nil +} + +type plandiffPaginateArgs struct { + first, last *int + after, before *Cursor + opts []PlanDiffPaginateOption +} + +func newPlanDiffPaginateArgs(rv map[string]any) *plandiffPaginateArgs { + args := &plandiffPaginateArgs{} + if rv == nil { + return args + } + if v := rv[firstField]; v != nil { + args.first = v.(*int) + } + if v := rv[lastField]; v != nil { + args.last = v.(*int) + } + if v := rv[afterField]; v != nil { + args.after = v.(*Cursor) + } + if v := rv[beforeField]; v != nil { + args.before = v.(*Cursor) + } + return args } // CollectFields tells the query-builder to eagerly load connected nodes by resolver context. -func (ph *ProvisionedHostQuery) CollectFields(ctx context.Context, satisfies ...string) *ProvisionedHostQuery { - if fc := graphql.GetFieldContext(ctx); fc != nil { - ph = ph.collectField(graphql.GetOperationContext(ctx), fc.Field, satisfies...) +func (ph *ProvisionedHostQuery) CollectFields(ctx context.Context, satisfies ...string) (*ProvisionedHostQuery, error) { + fc := graphql.GetFieldContext(ctx) + if fc == nil { + return ph, nil + } + if err := ph.collectField(ctx, graphql.GetOperationContext(ctx), fc.Field, nil, satisfies...); err != nil { + return nil, err + } + return ph, nil +} + +func (ph *ProvisionedHostQuery) collectField(ctx context.Context, opCtx *graphql.OperationContext, collected graphql.CollectedField, path []string, satisfies ...string) error { + path = append([]string(nil), path...) + var ( + unknownSeen bool + fieldSeen = make(map[string]struct{}, len(provisionedhost.Columns)) + selectedFields = []string{provisionedhost.FieldID} + ) + for _, field := range graphql.CollectFields(opCtx, collected.Selections, satisfies) { + switch field.Name { + case "provisionedhosttostatus": + var ( + alias = field.Alias + path = append(path, alias) + query = (&StatusClient{config: ph.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + ph.withProvisionedHostToStatus = query + case "provisionedhosttoprovisionednetwork": + var ( + alias = field.Alias + path = append(path, alias) + query = (&ProvisionedNetworkClient{config: ph.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + ph.withProvisionedHostToProvisionedNetwork = query + case "provisionedhosttohost": + var ( + alias = field.Alias + path = append(path, alias) + query = (&HostClient{config: ph.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + ph.withProvisionedHostToHost = query + case "provisionedhosttoendstepplan": + var ( + alias = field.Alias + path = append(path, alias) + query = (&PlanClient{config: ph.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + ph.withProvisionedHostToEndStepPlan = query + case "provisionedhosttobuild": + var ( + alias = field.Alias + path = append(path, alias) + query = (&BuildClient{config: ph.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + ph.withProvisionedHostToBuild = query + case "provisionedhosttoprovisioningstep": + var ( + alias = field.Alias + path = append(path, alias) + query = (&ProvisioningStepClient{config: ph.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + ph.WithNamedProvisionedHostToProvisioningStep(alias, func(wq *ProvisioningStepQuery) { + *wq = *query + }) + case "provisionedhosttoagentstatus": + var ( + alias = field.Alias + path = append(path, alias) + query = (&AgentStatusClient{config: ph.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + ph.WithNamedProvisionedHostToAgentStatus(alias, func(wq *AgentStatusQuery) { + *wq = *query + }) + case "provisionedhosttoagenttask": + var ( + alias = field.Alias + path = append(path, alias) + query = (&AgentTaskClient{config: ph.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + ph.WithNamedProvisionedHostToAgentTask(alias, func(wq *AgentTaskQuery) { + *wq = *query + }) + case "provisionedhosttoplan": + var ( + alias = field.Alias + path = append(path, alias) + query = (&PlanClient{config: ph.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + ph.withProvisionedHostToPlan = query + case "provisionedhosttoginfilemiddleware": + var ( + alias = field.Alias + path = append(path, alias) + query = (&GinFileMiddlewareClient{config: ph.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + ph.withProvisionedHostToGinFileMiddleware = query + case "subnetIP": + if _, ok := fieldSeen[provisionedhost.FieldSubnetIP]; !ok { + selectedFields = append(selectedFields, provisionedhost.FieldSubnetIP) + fieldSeen[provisionedhost.FieldSubnetIP] = struct{}{} + } + case "addonType": + if _, ok := fieldSeen[provisionedhost.FieldAddonType]; !ok { + selectedFields = append(selectedFields, provisionedhost.FieldAddonType) + fieldSeen[provisionedhost.FieldAddonType] = struct{}{} + } + case "vars": + if _, ok := fieldSeen[provisionedhost.FieldVars]; !ok { + selectedFields = append(selectedFields, provisionedhost.FieldVars) + fieldSeen[provisionedhost.FieldVars] = struct{}{} + } + case "id": + case "__typename": + default: + unknownSeen = true + } + } + if !unknownSeen { + ph.Select(selectedFields...) } - return ph + return nil } -func (ph *ProvisionedHostQuery) collectField(ctx *graphql.OperationContext, field graphql.CollectedField, satisfies ...string) *ProvisionedHostQuery { - return ph +type provisionedhostPaginateArgs struct { + first, last *int + after, before *Cursor + opts []ProvisionedHostPaginateOption +} + +func newProvisionedHostPaginateArgs(rv map[string]any) *provisionedhostPaginateArgs { + args := &provisionedhostPaginateArgs{} + if rv == nil { + return args + } + if v := rv[firstField]; v != nil { + args.first = v.(*int) + } + if v := rv[lastField]; v != nil { + args.last = v.(*int) + } + if v := rv[afterField]; v != nil { + args.after = v.(*Cursor) + } + if v := rv[beforeField]; v != nil { + args.before = v.(*Cursor) + } + return args } // CollectFields tells the query-builder to eagerly load connected nodes by resolver context. -func (pn *ProvisionedNetworkQuery) CollectFields(ctx context.Context, satisfies ...string) *ProvisionedNetworkQuery { - if fc := graphql.GetFieldContext(ctx); fc != nil { - pn = pn.collectField(graphql.GetOperationContext(ctx), fc.Field, satisfies...) +func (pn *ProvisionedNetworkQuery) CollectFields(ctx context.Context, satisfies ...string) (*ProvisionedNetworkQuery, error) { + fc := graphql.GetFieldContext(ctx) + if fc == nil { + return pn, nil } - return pn + if err := pn.collectField(ctx, graphql.GetOperationContext(ctx), fc.Field, nil, satisfies...); err != nil { + return nil, err + } + return pn, nil } -func (pn *ProvisionedNetworkQuery) collectField(ctx *graphql.OperationContext, field graphql.CollectedField, satisfies ...string) *ProvisionedNetworkQuery { - return pn +func (pn *ProvisionedNetworkQuery) collectField(ctx context.Context, opCtx *graphql.OperationContext, collected graphql.CollectedField, path []string, satisfies ...string) error { + path = append([]string(nil), path...) + var ( + unknownSeen bool + fieldSeen = make(map[string]struct{}, len(provisionednetwork.Columns)) + selectedFields = []string{provisionednetwork.FieldID} + ) + for _, field := range graphql.CollectFields(opCtx, collected.Selections, satisfies) { + switch field.Name { + case "provisionednetworktostatus": + var ( + alias = field.Alias + path = append(path, alias) + query = (&StatusClient{config: pn.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + pn.withProvisionedNetworkToStatus = query + case "provisionednetworktonetwork": + var ( + alias = field.Alias + path = append(path, alias) + query = (&NetworkClient{config: pn.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + pn.withProvisionedNetworkToNetwork = query + case "provisionednetworktobuild": + var ( + alias = field.Alias + path = append(path, alias) + query = (&BuildClient{config: pn.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + pn.withProvisionedNetworkToBuild = query + case "provisionednetworktoteam": + var ( + alias = field.Alias + path = append(path, alias) + query = (&TeamClient{config: pn.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + pn.withProvisionedNetworkToTeam = query + case "provisionednetworktoprovisionedhost": + var ( + alias = field.Alias + path = append(path, alias) + query = (&ProvisionedHostClient{config: pn.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + pn.WithNamedProvisionedNetworkToProvisionedHost(alias, func(wq *ProvisionedHostQuery) { + *wq = *query + }) + case "provisionednetworktoplan": + var ( + alias = field.Alias + path = append(path, alias) + query = (&PlanClient{config: pn.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + pn.withProvisionedNetworkToPlan = query + case "name": + if _, ok := fieldSeen[provisionednetwork.FieldName]; !ok { + selectedFields = append(selectedFields, provisionednetwork.FieldName) + fieldSeen[provisionednetwork.FieldName] = struct{}{} + } + case "cidr": + if _, ok := fieldSeen[provisionednetwork.FieldCidr]; !ok { + selectedFields = append(selectedFields, provisionednetwork.FieldCidr) + fieldSeen[provisionednetwork.FieldCidr] = struct{}{} + } + case "vars": + if _, ok := fieldSeen[provisionednetwork.FieldVars]; !ok { + selectedFields = append(selectedFields, provisionednetwork.FieldVars) + fieldSeen[provisionednetwork.FieldVars] = struct{}{} + } + case "id": + case "__typename": + default: + unknownSeen = true + } + } + if !unknownSeen { + pn.Select(selectedFields...) + } + return nil +} + +type provisionednetworkPaginateArgs struct { + first, last *int + after, before *Cursor + opts []ProvisionedNetworkPaginateOption +} + +func newProvisionedNetworkPaginateArgs(rv map[string]any) *provisionednetworkPaginateArgs { + args := &provisionednetworkPaginateArgs{} + if rv == nil { + return args + } + if v := rv[firstField]; v != nil { + args.first = v.(*int) + } + if v := rv[lastField]; v != nil { + args.last = v.(*int) + } + if v := rv[afterField]; v != nil { + args.after = v.(*Cursor) + } + if v := rv[beforeField]; v != nil { + args.before = v.(*Cursor) + } + return args } // CollectFields tells the query-builder to eagerly load connected nodes by resolver context. -func (ps *ProvisioningStepQuery) CollectFields(ctx context.Context, satisfies ...string) *ProvisioningStepQuery { - if fc := graphql.GetFieldContext(ctx); fc != nil { - ps = ps.collectField(graphql.GetOperationContext(ctx), fc.Field, satisfies...) +func (ps *ProvisioningStepQuery) CollectFields(ctx context.Context, satisfies ...string) (*ProvisioningStepQuery, error) { + fc := graphql.GetFieldContext(ctx) + if fc == nil { + return ps, nil + } + if err := ps.collectField(ctx, graphql.GetOperationContext(ctx), fc.Field, nil, satisfies...); err != nil { + return nil, err + } + return ps, nil +} + +func (ps *ProvisioningStepQuery) collectField(ctx context.Context, opCtx *graphql.OperationContext, collected graphql.CollectedField, path []string, satisfies ...string) error { + path = append([]string(nil), path...) + var ( + unknownSeen bool + fieldSeen = make(map[string]struct{}, len(provisioningstep.Columns)) + selectedFields = []string{provisioningstep.FieldID} + ) + for _, field := range graphql.CollectFields(opCtx, collected.Selections, satisfies) { + switch field.Name { + case "provisioningsteptostatus": + var ( + alias = field.Alias + path = append(path, alias) + query = (&StatusClient{config: ps.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + ps.withProvisioningStepToStatus = query + case "provisioningsteptoprovisionedhost": + var ( + alias = field.Alias + path = append(path, alias) + query = (&ProvisionedHostClient{config: ps.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + ps.withProvisioningStepToProvisionedHost = query + case "provisioningsteptoscript": + var ( + alias = field.Alias + path = append(path, alias) + query = (&ScriptClient{config: ps.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + ps.withProvisioningStepToScript = query + case "provisioningsteptocommand": + var ( + alias = field.Alias + path = append(path, alias) + query = (&CommandClient{config: ps.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + ps.withProvisioningStepToCommand = query + case "provisioningsteptodnsrecord": + var ( + alias = field.Alias + path = append(path, alias) + query = (&DNSRecordClient{config: ps.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + ps.withProvisioningStepToDNSRecord = query + case "provisioningsteptofiledelete": + var ( + alias = field.Alias + path = append(path, alias) + query = (&FileDeleteClient{config: ps.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + ps.withProvisioningStepToFileDelete = query + case "provisioningsteptofiledownload": + var ( + alias = field.Alias + path = append(path, alias) + query = (&FileDownloadClient{config: ps.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + ps.withProvisioningStepToFileDownload = query + case "provisioningsteptofileextract": + var ( + alias = field.Alias + path = append(path, alias) + query = (&FileExtractClient{config: ps.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + ps.withProvisioningStepToFileExtract = query + case "provisioningsteptoansible": + var ( + alias = field.Alias + path = append(path, alias) + query = (&AnsibleClient{config: ps.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + ps.withProvisioningStepToAnsible = query + case "provisioningsteptoplan": + var ( + alias = field.Alias + path = append(path, alias) + query = (&PlanClient{config: ps.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + ps.withProvisioningStepToPlan = query + case "provisioningsteptoagenttask": + var ( + alias = field.Alias + path = append(path, alias) + query = (&AgentTaskClient{config: ps.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + ps.WithNamedProvisioningStepToAgentTask(alias, func(wq *AgentTaskQuery) { + *wq = *query + }) + case "provisioningsteptoginfilemiddleware": + var ( + alias = field.Alias + path = append(path, alias) + query = (&GinFileMiddlewareClient{config: ps.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + ps.withProvisioningStepToGinFileMiddleware = query + case "type": + if _, ok := fieldSeen[provisioningstep.FieldType]; !ok { + selectedFields = append(selectedFields, provisioningstep.FieldType) + fieldSeen[provisioningstep.FieldType] = struct{}{} + } + case "stepNumber": + if _, ok := fieldSeen[provisioningstep.FieldStepNumber]; !ok { + selectedFields = append(selectedFields, provisioningstep.FieldStepNumber) + fieldSeen[provisioningstep.FieldStepNumber] = struct{}{} + } + case "id": + case "__typename": + default: + unknownSeen = true + } } - return ps + if !unknownSeen { + ps.Select(selectedFields...) + } + return nil } -func (ps *ProvisioningStepQuery) collectField(ctx *graphql.OperationContext, field graphql.CollectedField, satisfies ...string) *ProvisioningStepQuery { - return ps +type provisioningstepPaginateArgs struct { + first, last *int + after, before *Cursor + opts []ProvisioningStepPaginateOption +} + +func newProvisioningStepPaginateArgs(rv map[string]any) *provisioningstepPaginateArgs { + args := &provisioningstepPaginateArgs{} + if rv == nil { + return args + } + if v := rv[firstField]; v != nil { + args.first = v.(*int) + } + if v := rv[lastField]; v != nil { + args.last = v.(*int) + } + if v := rv[afterField]; v != nil { + args.after = v.(*Cursor) + } + if v := rv[beforeField]; v != nil { + args.before = v.(*Cursor) + } + return args } // CollectFields tells the query-builder to eagerly load connected nodes by resolver context. -func (rc *RepoCommitQuery) CollectFields(ctx context.Context, satisfies ...string) *RepoCommitQuery { - if fc := graphql.GetFieldContext(ctx); fc != nil { - rc = rc.collectField(graphql.GetOperationContext(ctx), fc.Field, satisfies...) +func (rc *RepoCommitQuery) CollectFields(ctx context.Context, satisfies ...string) (*RepoCommitQuery, error) { + fc := graphql.GetFieldContext(ctx) + if fc == nil { + return rc, nil + } + if err := rc.collectField(ctx, graphql.GetOperationContext(ctx), fc.Field, nil, satisfies...); err != nil { + return nil, err + } + return rc, nil +} + +func (rc *RepoCommitQuery) collectField(ctx context.Context, opCtx *graphql.OperationContext, collected graphql.CollectedField, path []string, satisfies ...string) error { + path = append([]string(nil), path...) + var ( + unknownSeen bool + fieldSeen = make(map[string]struct{}, len(repocommit.Columns)) + selectedFields = []string{repocommit.FieldID} + ) + for _, field := range graphql.CollectFields(opCtx, collected.Selections, satisfies) { + switch field.Name { + case "repocommittorepository": + var ( + alias = field.Alias + path = append(path, alias) + query = (&RepositoryClient{config: rc.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + rc.withRepoCommitToRepository = query + case "revision": + if _, ok := fieldSeen[repocommit.FieldRevision]; !ok { + selectedFields = append(selectedFields, repocommit.FieldRevision) + fieldSeen[repocommit.FieldRevision] = struct{}{} + } + case "hash": + if _, ok := fieldSeen[repocommit.FieldHash]; !ok { + selectedFields = append(selectedFields, repocommit.FieldHash) + fieldSeen[repocommit.FieldHash] = struct{}{} + } + case "author": + if _, ok := fieldSeen[repocommit.FieldAuthor]; !ok { + selectedFields = append(selectedFields, repocommit.FieldAuthor) + fieldSeen[repocommit.FieldAuthor] = struct{}{} + } + case "committer": + if _, ok := fieldSeen[repocommit.FieldCommitter]; !ok { + selectedFields = append(selectedFields, repocommit.FieldCommitter) + fieldSeen[repocommit.FieldCommitter] = struct{}{} + } + case "pgpSignature": + if _, ok := fieldSeen[repocommit.FieldPgpSignature]; !ok { + selectedFields = append(selectedFields, repocommit.FieldPgpSignature) + fieldSeen[repocommit.FieldPgpSignature] = struct{}{} + } + case "message": + if _, ok := fieldSeen[repocommit.FieldMessage]; !ok { + selectedFields = append(selectedFields, repocommit.FieldMessage) + fieldSeen[repocommit.FieldMessage] = struct{}{} + } + case "treeHash": + if _, ok := fieldSeen[repocommit.FieldTreeHash]; !ok { + selectedFields = append(selectedFields, repocommit.FieldTreeHash) + fieldSeen[repocommit.FieldTreeHash] = struct{}{} + } + case "parentHashes": + if _, ok := fieldSeen[repocommit.FieldParentHashes]; !ok { + selectedFields = append(selectedFields, repocommit.FieldParentHashes) + fieldSeen[repocommit.FieldParentHashes] = struct{}{} + } + case "id": + case "__typename": + default: + unknownSeen = true + } } - return rc + if !unknownSeen { + rc.Select(selectedFields...) + } + return nil +} + +type repocommitPaginateArgs struct { + first, last *int + after, before *Cursor + opts []RepoCommitPaginateOption } -func (rc *RepoCommitQuery) collectField(ctx *graphql.OperationContext, field graphql.CollectedField, satisfies ...string) *RepoCommitQuery { - return rc +func newRepoCommitPaginateArgs(rv map[string]any) *repocommitPaginateArgs { + args := &repocommitPaginateArgs{} + if rv == nil { + return args + } + if v := rv[firstField]; v != nil { + args.first = v.(*int) + } + if v := rv[lastField]; v != nil { + args.last = v.(*int) + } + if v := rv[afterField]; v != nil { + args.after = v.(*Cursor) + } + if v := rv[beforeField]; v != nil { + args.before = v.(*Cursor) + } + return args } // CollectFields tells the query-builder to eagerly load connected nodes by resolver context. -func (r *RepositoryQuery) CollectFields(ctx context.Context, satisfies ...string) *RepositoryQuery { - if fc := graphql.GetFieldContext(ctx); fc != nil { - r = r.collectField(graphql.GetOperationContext(ctx), fc.Field, satisfies...) +func (r *RepositoryQuery) CollectFields(ctx context.Context, satisfies ...string) (*RepositoryQuery, error) { + fc := graphql.GetFieldContext(ctx) + if fc == nil { + return r, nil + } + if err := r.collectField(ctx, graphql.GetOperationContext(ctx), fc.Field, nil, satisfies...); err != nil { + return nil, err + } + return r, nil +} + +func (r *RepositoryQuery) collectField(ctx context.Context, opCtx *graphql.OperationContext, collected graphql.CollectedField, path []string, satisfies ...string) error { + path = append([]string(nil), path...) + var ( + unknownSeen bool + fieldSeen = make(map[string]struct{}, len(repository.Columns)) + selectedFields = []string{repository.FieldID} + ) + for _, field := range graphql.CollectFields(opCtx, collected.Selections, satisfies) { + switch field.Name { + case "repositorytoenvironment": + var ( + alias = field.Alias + path = append(path, alias) + query = (&EnvironmentClient{config: r.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + r.WithNamedRepositoryToEnvironment(alias, func(wq *EnvironmentQuery) { + *wq = *query + }) + case "repositorytorepocommit": + var ( + alias = field.Alias + path = append(path, alias) + query = (&RepoCommitClient{config: r.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + r.WithNamedRepositoryToRepoCommit(alias, func(wq *RepoCommitQuery) { + *wq = *query + }) + case "repoURL": + if _, ok := fieldSeen[repository.FieldRepoURL]; !ok { + selectedFields = append(selectedFields, repository.FieldRepoURL) + fieldSeen[repository.FieldRepoURL] = struct{}{} + } + case "branchName": + if _, ok := fieldSeen[repository.FieldBranchName]; !ok { + selectedFields = append(selectedFields, repository.FieldBranchName) + fieldSeen[repository.FieldBranchName] = struct{}{} + } + case "enviromentFilepath": + if _, ok := fieldSeen[repository.FieldEnviromentFilepath]; !ok { + selectedFields = append(selectedFields, repository.FieldEnviromentFilepath) + fieldSeen[repository.FieldEnviromentFilepath] = struct{}{} + } + case "folderPath": + if _, ok := fieldSeen[repository.FieldFolderPath]; !ok { + selectedFields = append(selectedFields, repository.FieldFolderPath) + fieldSeen[repository.FieldFolderPath] = struct{}{} + } + case "id": + case "__typename": + default: + unknownSeen = true + } + } + if !unknownSeen { + r.Select(selectedFields...) } - return r + return nil } -func (r *RepositoryQuery) collectField(ctx *graphql.OperationContext, field graphql.CollectedField, satisfies ...string) *RepositoryQuery { - return r +type repositoryPaginateArgs struct { + first, last *int + after, before *Cursor + opts []RepositoryPaginateOption +} + +func newRepositoryPaginateArgs(rv map[string]any) *repositoryPaginateArgs { + args := &repositoryPaginateArgs{} + if rv == nil { + return args + } + if v := rv[firstField]; v != nil { + args.first = v.(*int) + } + if v := rv[lastField]; v != nil { + args.last = v.(*int) + } + if v := rv[afterField]; v != nil { + args.after = v.(*Cursor) + } + if v := rv[beforeField]; v != nil { + args.before = v.(*Cursor) + } + return args } // CollectFields tells the query-builder to eagerly load connected nodes by resolver context. -func (s *ScriptQuery) CollectFields(ctx context.Context, satisfies ...string) *ScriptQuery { - if fc := graphql.GetFieldContext(ctx); fc != nil { - s = s.collectField(graphql.GetOperationContext(ctx), fc.Field, satisfies...) +func (s *ScriptQuery) CollectFields(ctx context.Context, satisfies ...string) (*ScriptQuery, error) { + fc := graphql.GetFieldContext(ctx) + if fc == nil { + return s, nil + } + if err := s.collectField(ctx, graphql.GetOperationContext(ctx), fc.Field, nil, satisfies...); err != nil { + return nil, err } - return s + return s, nil } -func (s *ScriptQuery) collectField(ctx *graphql.OperationContext, field graphql.CollectedField, satisfies ...string) *ScriptQuery { - return s +func (s *ScriptQuery) collectField(ctx context.Context, opCtx *graphql.OperationContext, collected graphql.CollectedField, path []string, satisfies ...string) error { + path = append([]string(nil), path...) + var ( + unknownSeen bool + fieldSeen = make(map[string]struct{}, len(script.Columns)) + selectedFields = []string{script.FieldID} + ) + for _, field := range graphql.CollectFields(opCtx, collected.Selections, satisfies) { + switch field.Name { + case "scripttouser": + var ( + alias = field.Alias + path = append(path, alias) + query = (&UserClient{config: s.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + s.WithNamedScriptToUser(alias, func(wq *UserQuery) { + *wq = *query + }) + case "scripttofinding": + var ( + alias = field.Alias + path = append(path, alias) + query = (&FindingClient{config: s.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + s.WithNamedScriptToFinding(alias, func(wq *FindingQuery) { + *wq = *query + }) + case "scripttoenvironment": + var ( + alias = field.Alias + path = append(path, alias) + query = (&EnvironmentClient{config: s.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + s.withScriptToEnvironment = query + case "hclID": + if _, ok := fieldSeen[script.FieldHCLID]; !ok { + selectedFields = append(selectedFields, script.FieldHCLID) + fieldSeen[script.FieldHCLID] = struct{}{} + } + case "name": + if _, ok := fieldSeen[script.FieldName]; !ok { + selectedFields = append(selectedFields, script.FieldName) + fieldSeen[script.FieldName] = struct{}{} + } + case "language": + if _, ok := fieldSeen[script.FieldLanguage]; !ok { + selectedFields = append(selectedFields, script.FieldLanguage) + fieldSeen[script.FieldLanguage] = struct{}{} + } + case "description": + if _, ok := fieldSeen[script.FieldDescription]; !ok { + selectedFields = append(selectedFields, script.FieldDescription) + fieldSeen[script.FieldDescription] = struct{}{} + } + case "source": + if _, ok := fieldSeen[script.FieldSource]; !ok { + selectedFields = append(selectedFields, script.FieldSource) + fieldSeen[script.FieldSource] = struct{}{} + } + case "sourceType": + if _, ok := fieldSeen[script.FieldSourceType]; !ok { + selectedFields = append(selectedFields, script.FieldSourceType) + fieldSeen[script.FieldSourceType] = struct{}{} + } + case "cooldown": + if _, ok := fieldSeen[script.FieldCooldown]; !ok { + selectedFields = append(selectedFields, script.FieldCooldown) + fieldSeen[script.FieldCooldown] = struct{}{} + } + case "timeout": + if _, ok := fieldSeen[script.FieldTimeout]; !ok { + selectedFields = append(selectedFields, script.FieldTimeout) + fieldSeen[script.FieldTimeout] = struct{}{} + } + case "ignoreErrors": + if _, ok := fieldSeen[script.FieldIgnoreErrors]; !ok { + selectedFields = append(selectedFields, script.FieldIgnoreErrors) + fieldSeen[script.FieldIgnoreErrors] = struct{}{} + } + case "args": + if _, ok := fieldSeen[script.FieldArgs]; !ok { + selectedFields = append(selectedFields, script.FieldArgs) + fieldSeen[script.FieldArgs] = struct{}{} + } + case "disabled": + if _, ok := fieldSeen[script.FieldDisabled]; !ok { + selectedFields = append(selectedFields, script.FieldDisabled) + fieldSeen[script.FieldDisabled] = struct{}{} + } + case "vars": + if _, ok := fieldSeen[script.FieldVars]; !ok { + selectedFields = append(selectedFields, script.FieldVars) + fieldSeen[script.FieldVars] = struct{}{} + } + case "absPath": + if _, ok := fieldSeen[script.FieldAbsPath]; !ok { + selectedFields = append(selectedFields, script.FieldAbsPath) + fieldSeen[script.FieldAbsPath] = struct{}{} + } + case "tags": + if _, ok := fieldSeen[script.FieldTags]; !ok { + selectedFields = append(selectedFields, script.FieldTags) + fieldSeen[script.FieldTags] = struct{}{} + } + case "id": + case "__typename": + default: + unknownSeen = true + } + } + if !unknownSeen { + s.Select(selectedFields...) + } + return nil +} + +type scriptPaginateArgs struct { + first, last *int + after, before *Cursor + opts []ScriptPaginateOption +} + +func newScriptPaginateArgs(rv map[string]any) *scriptPaginateArgs { + args := &scriptPaginateArgs{} + if rv == nil { + return args + } + if v := rv[firstField]; v != nil { + args.first = v.(*int) + } + if v := rv[lastField]; v != nil { + args.last = v.(*int) + } + if v := rv[afterField]; v != nil { + args.after = v.(*Cursor) + } + if v := rv[beforeField]; v != nil { + args.before = v.(*Cursor) + } + return args } // CollectFields tells the query-builder to eagerly load connected nodes by resolver context. -func (st *ServerTaskQuery) CollectFields(ctx context.Context, satisfies ...string) *ServerTaskQuery { - if fc := graphql.GetFieldContext(ctx); fc != nil { - st = st.collectField(graphql.GetOperationContext(ctx), fc.Field, satisfies...) +func (st *ServerTaskQuery) CollectFields(ctx context.Context, satisfies ...string) (*ServerTaskQuery, error) { + fc := graphql.GetFieldContext(ctx) + if fc == nil { + return st, nil + } + if err := st.collectField(ctx, graphql.GetOperationContext(ctx), fc.Field, nil, satisfies...); err != nil { + return nil, err + } + return st, nil +} + +func (st *ServerTaskQuery) collectField(ctx context.Context, opCtx *graphql.OperationContext, collected graphql.CollectedField, path []string, satisfies ...string) error { + path = append([]string(nil), path...) + var ( + unknownSeen bool + fieldSeen = make(map[string]struct{}, len(servertask.Columns)) + selectedFields = []string{servertask.FieldID} + ) + for _, field := range graphql.CollectFields(opCtx, collected.Selections, satisfies) { + switch field.Name { + case "servertasktoauthuser": + var ( + alias = field.Alias + path = append(path, alias) + query = (&AuthUserClient{config: st.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + st.withServerTaskToAuthUser = query + case "servertasktostatus": + var ( + alias = field.Alias + path = append(path, alias) + query = (&StatusClient{config: st.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + st.withServerTaskToStatus = query + case "servertasktoenvironment": + var ( + alias = field.Alias + path = append(path, alias) + query = (&EnvironmentClient{config: st.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + st.withServerTaskToEnvironment = query + case "servertasktobuild": + var ( + alias = field.Alias + path = append(path, alias) + query = (&BuildClient{config: st.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + st.withServerTaskToBuild = query + case "servertasktobuildcommit": + var ( + alias = field.Alias + path = append(path, alias) + query = (&BuildCommitClient{config: st.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + st.withServerTaskToBuildCommit = query + case "servertasktoginfilemiddleware": + var ( + alias = field.Alias + path = append(path, alias) + query = (&GinFileMiddlewareClient{config: st.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + st.WithNamedServerTaskToGinFileMiddleware(alias, func(wq *GinFileMiddlewareQuery) { + *wq = *query + }) + case "type": + if _, ok := fieldSeen[servertask.FieldType]; !ok { + selectedFields = append(selectedFields, servertask.FieldType) + fieldSeen[servertask.FieldType] = struct{}{} + } + case "startTime": + if _, ok := fieldSeen[servertask.FieldStartTime]; !ok { + selectedFields = append(selectedFields, servertask.FieldStartTime) + fieldSeen[servertask.FieldStartTime] = struct{}{} + } + case "endTime": + if _, ok := fieldSeen[servertask.FieldEndTime]; !ok { + selectedFields = append(selectedFields, servertask.FieldEndTime) + fieldSeen[servertask.FieldEndTime] = struct{}{} + } + case "errors": + if _, ok := fieldSeen[servertask.FieldErrors]; !ok { + selectedFields = append(selectedFields, servertask.FieldErrors) + fieldSeen[servertask.FieldErrors] = struct{}{} + } + case "logFilePath": + if _, ok := fieldSeen[servertask.FieldLogFilePath]; !ok { + selectedFields = append(selectedFields, servertask.FieldLogFilePath) + fieldSeen[servertask.FieldLogFilePath] = struct{}{} + } + case "id": + case "__typename": + default: + unknownSeen = true + } } - return st + if !unknownSeen { + st.Select(selectedFields...) + } + return nil +} + +type servertaskPaginateArgs struct { + first, last *int + after, before *Cursor + opts []ServerTaskPaginateOption } -func (st *ServerTaskQuery) collectField(ctx *graphql.OperationContext, field graphql.CollectedField, satisfies ...string) *ServerTaskQuery { - return st +func newServerTaskPaginateArgs(rv map[string]any) *servertaskPaginateArgs { + args := &servertaskPaginateArgs{} + if rv == nil { + return args + } + if v := rv[firstField]; v != nil { + args.first = v.(*int) + } + if v := rv[lastField]; v != nil { + args.last = v.(*int) + } + if v := rv[afterField]; v != nil { + args.after = v.(*Cursor) + } + if v := rv[beforeField]; v != nil { + args.before = v.(*Cursor) + } + return args } // CollectFields tells the query-builder to eagerly load connected nodes by resolver context. -func (s *StatusQuery) CollectFields(ctx context.Context, satisfies ...string) *StatusQuery { - if fc := graphql.GetFieldContext(ctx); fc != nil { - s = s.collectField(graphql.GetOperationContext(ctx), fc.Field, satisfies...) +func (s *StatusQuery) CollectFields(ctx context.Context, satisfies ...string) (*StatusQuery, error) { + fc := graphql.GetFieldContext(ctx) + if fc == nil { + return s, nil } - return s + if err := s.collectField(ctx, graphql.GetOperationContext(ctx), fc.Field, nil, satisfies...); err != nil { + return nil, err + } + return s, nil } -func (s *StatusQuery) collectField(ctx *graphql.OperationContext, field graphql.CollectedField, satisfies ...string) *StatusQuery { - return s +func (s *StatusQuery) collectField(ctx context.Context, opCtx *graphql.OperationContext, collected graphql.CollectedField, path []string, satisfies ...string) error { + path = append([]string(nil), path...) + var ( + unknownSeen bool + fieldSeen = make(map[string]struct{}, len(status.Columns)) + selectedFields = []string{status.FieldID} + ) + for _, field := range graphql.CollectFields(opCtx, collected.Selections, satisfies) { + switch field.Name { + case "statustobuild": + var ( + alias = field.Alias + path = append(path, alias) + query = (&BuildClient{config: s.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + s.withStatusToBuild = query + case "statustoprovisionednetwork": + var ( + alias = field.Alias + path = append(path, alias) + query = (&ProvisionedNetworkClient{config: s.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + s.withStatusToProvisionedNetwork = query + case "statustoprovisionedhost": + var ( + alias = field.Alias + path = append(path, alias) + query = (&ProvisionedHostClient{config: s.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + s.withStatusToProvisionedHost = query + case "statustoprovisioningstep": + var ( + alias = field.Alias + path = append(path, alias) + query = (&ProvisioningStepClient{config: s.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + s.withStatusToProvisioningStep = query + case "statustoteam": + var ( + alias = field.Alias + path = append(path, alias) + query = (&TeamClient{config: s.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + s.withStatusToTeam = query + case "statustoplan": + var ( + alias = field.Alias + path = append(path, alias) + query = (&PlanClient{config: s.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + s.withStatusToPlan = query + case "statustoservertask": + var ( + alias = field.Alias + path = append(path, alias) + query = (&ServerTaskClient{config: s.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + s.withStatusToServerTask = query + case "statustoadhocplan": + var ( + alias = field.Alias + path = append(path, alias) + query = (&AdhocPlanClient{config: s.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + s.withStatusToAdhocPlan = query + case "state": + if _, ok := fieldSeen[status.FieldState]; !ok { + selectedFields = append(selectedFields, status.FieldState) + fieldSeen[status.FieldState] = struct{}{} + } + case "statusFor": + if _, ok := fieldSeen[status.FieldStatusFor]; !ok { + selectedFields = append(selectedFields, status.FieldStatusFor) + fieldSeen[status.FieldStatusFor] = struct{}{} + } + case "startedAt": + if _, ok := fieldSeen[status.FieldStartedAt]; !ok { + selectedFields = append(selectedFields, status.FieldStartedAt) + fieldSeen[status.FieldStartedAt] = struct{}{} + } + case "endedAt": + if _, ok := fieldSeen[status.FieldEndedAt]; !ok { + selectedFields = append(selectedFields, status.FieldEndedAt) + fieldSeen[status.FieldEndedAt] = struct{}{} + } + case "failed": + if _, ok := fieldSeen[status.FieldFailed]; !ok { + selectedFields = append(selectedFields, status.FieldFailed) + fieldSeen[status.FieldFailed] = struct{}{} + } + case "completed": + if _, ok := fieldSeen[status.FieldCompleted]; !ok { + selectedFields = append(selectedFields, status.FieldCompleted) + fieldSeen[status.FieldCompleted] = struct{}{} + } + case "error": + if _, ok := fieldSeen[status.FieldError]; !ok { + selectedFields = append(selectedFields, status.FieldError) + fieldSeen[status.FieldError] = struct{}{} + } + case "id": + case "__typename": + default: + unknownSeen = true + } + } + if !unknownSeen { + s.Select(selectedFields...) + } + return nil +} + +type statusPaginateArgs struct { + first, last *int + after, before *Cursor + opts []StatusPaginateOption +} + +func newStatusPaginateArgs(rv map[string]any) *statusPaginateArgs { + args := &statusPaginateArgs{} + if rv == nil { + return args + } + if v := rv[firstField]; v != nil { + args.first = v.(*int) + } + if v := rv[lastField]; v != nil { + args.last = v.(*int) + } + if v := rv[afterField]; v != nil { + args.after = v.(*Cursor) + } + if v := rv[beforeField]; v != nil { + args.before = v.(*Cursor) + } + return args } // CollectFields tells the query-builder to eagerly load connected nodes by resolver context. -func (t *TagQuery) CollectFields(ctx context.Context, satisfies ...string) *TagQuery { - if fc := graphql.GetFieldContext(ctx); fc != nil { - t = t.collectField(graphql.GetOperationContext(ctx), fc.Field, satisfies...) +func (t *TagQuery) CollectFields(ctx context.Context, satisfies ...string) (*TagQuery, error) { + fc := graphql.GetFieldContext(ctx) + if fc == nil { + return t, nil + } + if err := t.collectField(ctx, graphql.GetOperationContext(ctx), fc.Field, nil, satisfies...); err != nil { + return nil, err + } + return t, nil +} + +func (t *TagQuery) collectField(ctx context.Context, opCtx *graphql.OperationContext, collected graphql.CollectedField, path []string, satisfies ...string) error { + path = append([]string(nil), path...) + var ( + unknownSeen bool + fieldSeen = make(map[string]struct{}, len(tag.Columns)) + selectedFields = []string{tag.FieldID} + ) + for _, field := range graphql.CollectFields(opCtx, collected.Selections, satisfies) { + switch field.Name { + case "uuid": + if _, ok := fieldSeen[tag.FieldUUID]; !ok { + selectedFields = append(selectedFields, tag.FieldUUID) + fieldSeen[tag.FieldUUID] = struct{}{} + } + case "name": + if _, ok := fieldSeen[tag.FieldName]; !ok { + selectedFields = append(selectedFields, tag.FieldName) + fieldSeen[tag.FieldName] = struct{}{} + } + case "description": + if _, ok := fieldSeen[tag.FieldDescription]; !ok { + selectedFields = append(selectedFields, tag.FieldDescription) + fieldSeen[tag.FieldDescription] = struct{}{} + } + case "id": + case "__typename": + default: + unknownSeen = true + } } - return t + if !unknownSeen { + t.Select(selectedFields...) + } + return nil } -func (t *TagQuery) collectField(ctx *graphql.OperationContext, field graphql.CollectedField, satisfies ...string) *TagQuery { - return t +type tagPaginateArgs struct { + first, last *int + after, before *Cursor + opts []TagPaginateOption +} + +func newTagPaginateArgs(rv map[string]any) *tagPaginateArgs { + args := &tagPaginateArgs{} + if rv == nil { + return args + } + if v := rv[firstField]; v != nil { + args.first = v.(*int) + } + if v := rv[lastField]; v != nil { + args.last = v.(*int) + } + if v := rv[afterField]; v != nil { + args.after = v.(*Cursor) + } + if v := rv[beforeField]; v != nil { + args.before = v.(*Cursor) + } + return args } // CollectFields tells the query-builder to eagerly load connected nodes by resolver context. -func (t *TeamQuery) CollectFields(ctx context.Context, satisfies ...string) *TeamQuery { - if fc := graphql.GetFieldContext(ctx); fc != nil { - t = t.collectField(graphql.GetOperationContext(ctx), fc.Field, satisfies...) +func (t *TeamQuery) CollectFields(ctx context.Context, satisfies ...string) (*TeamQuery, error) { + fc := graphql.GetFieldContext(ctx) + if fc == nil { + return t, nil + } + if err := t.collectField(ctx, graphql.GetOperationContext(ctx), fc.Field, nil, satisfies...); err != nil { + return nil, err + } + return t, nil +} + +func (t *TeamQuery) collectField(ctx context.Context, opCtx *graphql.OperationContext, collected graphql.CollectedField, path []string, satisfies ...string) error { + path = append([]string(nil), path...) + var ( + unknownSeen bool + fieldSeen = make(map[string]struct{}, len(team.Columns)) + selectedFields = []string{team.FieldID} + ) + for _, field := range graphql.CollectFields(opCtx, collected.Selections, satisfies) { + switch field.Name { + case "teamtobuild": + var ( + alias = field.Alias + path = append(path, alias) + query = (&BuildClient{config: t.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + t.withTeamToBuild = query + case "teamtostatus": + var ( + alias = field.Alias + path = append(path, alias) + query = (&StatusClient{config: t.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + t.withTeamToStatus = query + case "teamtoprovisionednetwork": + var ( + alias = field.Alias + path = append(path, alias) + query = (&ProvisionedNetworkClient{config: t.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + t.WithNamedTeamToProvisionedNetwork(alias, func(wq *ProvisionedNetworkQuery) { + *wq = *query + }) + case "teamtoplan": + var ( + alias = field.Alias + path = append(path, alias) + query = (&PlanClient{config: t.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + t.withTeamToPlan = query + case "teamNumber": + if _, ok := fieldSeen[team.FieldTeamNumber]; !ok { + selectedFields = append(selectedFields, team.FieldTeamNumber) + fieldSeen[team.FieldTeamNumber] = struct{}{} + } + case "vars": + if _, ok := fieldSeen[team.FieldVars]; !ok { + selectedFields = append(selectedFields, team.FieldVars) + fieldSeen[team.FieldVars] = struct{}{} + } + case "id": + case "__typename": + default: + unknownSeen = true + } + } + if !unknownSeen { + t.Select(selectedFields...) } - return t + return nil } -func (t *TeamQuery) collectField(ctx *graphql.OperationContext, field graphql.CollectedField, satisfies ...string) *TeamQuery { - return t +type teamPaginateArgs struct { + first, last *int + after, before *Cursor + opts []TeamPaginateOption +} + +func newTeamPaginateArgs(rv map[string]any) *teamPaginateArgs { + args := &teamPaginateArgs{} + if rv == nil { + return args + } + if v := rv[firstField]; v != nil { + args.first = v.(*int) + } + if v := rv[lastField]; v != nil { + args.last = v.(*int) + } + if v := rv[afterField]; v != nil { + args.after = v.(*Cursor) + } + if v := rv[beforeField]; v != nil { + args.before = v.(*Cursor) + } + return args } // CollectFields tells the query-builder to eagerly load connected nodes by resolver context. -func (t *TokenQuery) CollectFields(ctx context.Context, satisfies ...string) *TokenQuery { - if fc := graphql.GetFieldContext(ctx); fc != nil { - t = t.collectField(graphql.GetOperationContext(ctx), fc.Field, satisfies...) +func (t *TokenQuery) CollectFields(ctx context.Context, satisfies ...string) (*TokenQuery, error) { + fc := graphql.GetFieldContext(ctx) + if fc == nil { + return t, nil } - return t + if err := t.collectField(ctx, graphql.GetOperationContext(ctx), fc.Field, nil, satisfies...); err != nil { + return nil, err + } + return t, nil +} + +func (t *TokenQuery) collectField(ctx context.Context, opCtx *graphql.OperationContext, collected graphql.CollectedField, path []string, satisfies ...string) error { + path = append([]string(nil), path...) + var ( + unknownSeen bool + fieldSeen = make(map[string]struct{}, len(token.Columns)) + selectedFields = []string{token.FieldID} + ) + for _, field := range graphql.CollectFields(opCtx, collected.Selections, satisfies) { + switch field.Name { + case "tokentoauthuser": + var ( + alias = field.Alias + path = append(path, alias) + query = (&AuthUserClient{config: t.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + t.withTokenToAuthUser = query + case "token": + if _, ok := fieldSeen[token.FieldToken]; !ok { + selectedFields = append(selectedFields, token.FieldToken) + fieldSeen[token.FieldToken] = struct{}{} + } + case "expireAt": + if _, ok := fieldSeen[token.FieldExpireAt]; !ok { + selectedFields = append(selectedFields, token.FieldExpireAt) + fieldSeen[token.FieldExpireAt] = struct{}{} + } + case "id": + case "__typename": + default: + unknownSeen = true + } + } + if !unknownSeen { + t.Select(selectedFields...) + } + return nil +} + +type tokenPaginateArgs struct { + first, last *int + after, before *Cursor + opts []TokenPaginateOption } -func (t *TokenQuery) collectField(ctx *graphql.OperationContext, field graphql.CollectedField, satisfies ...string) *TokenQuery { - return t +func newTokenPaginateArgs(rv map[string]any) *tokenPaginateArgs { + args := &tokenPaginateArgs{} + if rv == nil { + return args + } + if v := rv[firstField]; v != nil { + args.first = v.(*int) + } + if v := rv[lastField]; v != nil { + args.last = v.(*int) + } + if v := rv[afterField]; v != nil { + args.after = v.(*Cursor) + } + if v := rv[beforeField]; v != nil { + args.before = v.(*Cursor) + } + return args } // CollectFields tells the query-builder to eagerly load connected nodes by resolver context. -func (u *UserQuery) CollectFields(ctx context.Context, satisfies ...string) *UserQuery { - if fc := graphql.GetFieldContext(ctx); fc != nil { - u = u.collectField(graphql.GetOperationContext(ctx), fc.Field, satisfies...) +func (u *UserQuery) CollectFields(ctx context.Context, satisfies ...string) (*UserQuery, error) { + fc := graphql.GetFieldContext(ctx) + if fc == nil { + return u, nil + } + if err := u.collectField(ctx, graphql.GetOperationContext(ctx), fc.Field, nil, satisfies...); err != nil { + return nil, err } - return u + return u, nil } -func (u *UserQuery) collectField(ctx *graphql.OperationContext, field graphql.CollectedField, satisfies ...string) *UserQuery { - return u +func (u *UserQuery) collectField(ctx context.Context, opCtx *graphql.OperationContext, collected graphql.CollectedField, path []string, satisfies ...string) error { + path = append([]string(nil), path...) + var ( + unknownSeen bool + fieldSeen = make(map[string]struct{}, len(user.Columns)) + selectedFields = []string{user.FieldID} + ) + for _, field := range graphql.CollectFields(opCtx, collected.Selections, satisfies) { + switch field.Name { + case "usertotag": + var ( + alias = field.Alias + path = append(path, alias) + query = (&TagClient{config: u.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + u.WithNamedUserToTag(alias, func(wq *TagQuery) { + *wq = *query + }) + case "usertoenvironment": + var ( + alias = field.Alias + path = append(path, alias) + query = (&EnvironmentClient{config: u.config}).Query() + ) + if err := query.collectField(ctx, opCtx, field, path, satisfies...); err != nil { + return err + } + u.WithNamedUserToEnvironment(alias, func(wq *EnvironmentQuery) { + *wq = *query + }) + case "name": + if _, ok := fieldSeen[user.FieldName]; !ok { + selectedFields = append(selectedFields, user.FieldName) + fieldSeen[user.FieldName] = struct{}{} + } + case "uuid": + if _, ok := fieldSeen[user.FieldUUID]; !ok { + selectedFields = append(selectedFields, user.FieldUUID) + fieldSeen[user.FieldUUID] = struct{}{} + } + case "email": + if _, ok := fieldSeen[user.FieldEmail]; !ok { + selectedFields = append(selectedFields, user.FieldEmail) + fieldSeen[user.FieldEmail] = struct{}{} + } + case "hclID": + if _, ok := fieldSeen[user.FieldHCLID]; !ok { + selectedFields = append(selectedFields, user.FieldHCLID) + fieldSeen[user.FieldHCLID] = struct{}{} + } + case "id": + case "__typename": + default: + unknownSeen = true + } + } + if !unknownSeen { + u.Select(selectedFields...) + } + return nil +} + +type userPaginateArgs struct { + first, last *int + after, before *Cursor + opts []UserPaginateOption +} + +func newUserPaginateArgs(rv map[string]any) *userPaginateArgs { + args := &userPaginateArgs{} + if rv == nil { + return args + } + if v := rv[firstField]; v != nil { + args.first = v.(*int) + } + if v := rv[lastField]; v != nil { + args.last = v.(*int) + } + if v := rv[afterField]; v != nil { + args.after = v.(*Cursor) + } + if v := rv[beforeField]; v != nil { + args.before = v.(*Cursor) + } + return args +} + +const ( + afterField = "after" + firstField = "first" + beforeField = "before" + lastField = "last" + orderByField = "orderBy" + directionField = "direction" + fieldField = "field" + whereField = "where" +) + +func fieldArgs(ctx context.Context, whereInput any, path ...string) map[string]any { + field := collectedField(ctx, path...) + if field == nil || field.Arguments == nil { + return nil + } + oc := graphql.GetOperationContext(ctx) + args := field.ArgumentMap(oc.Variables) + return unmarshalArgs(ctx, whereInput, args) +} + +// unmarshalArgs allows extracting the field arguments from their raw representation. +func unmarshalArgs(ctx context.Context, whereInput any, args map[string]any) map[string]any { + for _, k := range []string{firstField, lastField} { + v, ok := args[k] + if !ok { + continue + } + i, err := graphql.UnmarshalInt(v) + if err == nil { + args[k] = &i + } + } + for _, k := range []string{beforeField, afterField} { + v, ok := args[k] + if !ok { + continue + } + c := &Cursor{} + if c.UnmarshalGQL(v) == nil { + args[k] = c + } + } + if v, ok := args[whereField]; ok && whereInput != nil { + if err := graphql.UnmarshalInputFromContext(ctx, v, whereInput); err == nil { + args[whereField] = whereInput + } + } + + return args +} + +func limitRows(partitionBy string, limit int, orderBy ...sql.Querier) func(s *sql.Selector) { + return func(s *sql.Selector) { + d := sql.Dialect(s.Dialect()) + s.SetDistinct(false) + with := d.With("src_query"). + As(s.Clone()). + With("limited_query"). + As( + d.Select("*"). + AppendSelectExprAs( + sql.RowNumber().PartitionBy(partitionBy).OrderExpr(orderBy...), + "row_number", + ). + From(d.Table("src_query")), + ) + t := d.Table("limited_query").As(s.TableName()) + *s = *d.Select(s.UnqualifiedColumns()...). + From(t). + Where(sql.LTE(t.C("row_number"), limit)). + Prefix(with) + } +} + +// mayAddCondition appends another type condition to the satisfies list +// if condition is enabled (Node/Nodes) and it does not exist in the list. +func mayAddCondition(satisfies []string, typeCond string) []string { + if len(satisfies) == 0 { + return satisfies + } + for _, s := range satisfies { + if typeCond == s { + return satisfies + } + } + return append(satisfies, typeCond) } diff --git a/ent/gql_edge.go b/ent/gql_edge.go index ac4a3b61..585a56c7 100644 --- a/ent/gql_edge.go +++ b/ent/gql_edge.go @@ -1,19 +1,31 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent -import "context" +import ( + "context" -func (ap *AdhocPlan) PrevAdhocPlan(ctx context.Context) ([]*AdhocPlan, error) { - result, err := ap.Edges.PrevAdhocPlanOrErr() + "github.com/99designs/gqlgen/graphql" +) + +func (ap *AdhocPlan) PrevAdhocPlan(ctx context.Context) (result []*AdhocPlan, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = ap.NamedPrevAdhocPlan(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = ap.Edges.PrevAdhocPlanOrErr() + } if IsNotLoaded(err) { result, err = ap.QueryPrevAdhocPlan().All(ctx) } return result, err } -func (ap *AdhocPlan) NextAdhocPlan(ctx context.Context) ([]*AdhocPlan, error) { - result, err := ap.Edges.NextAdhocPlanOrErr() +func (ap *AdhocPlan) NextAdhocPlan(ctx context.Context) (result []*AdhocPlan, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = ap.NamedNextAdhocPlan(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = ap.Edges.NextAdhocPlanOrErr() + } if IsNotLoaded(err) { result, err = ap.QueryNextAdhocPlan().All(ctx) } @@ -84,16 +96,24 @@ func (at *AgentTask) AgentTaskToProvisionedHost(ctx context.Context) (*Provision return result, err } -func (at *AgentTask) AgentTaskToAdhocPlan(ctx context.Context) ([]*AdhocPlan, error) { - result, err := at.Edges.AgentTaskToAdhocPlanOrErr() +func (at *AgentTask) AgentTaskToAdhocPlan(ctx context.Context) (result []*AdhocPlan, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = at.NamedAgentTaskToAdhocPlan(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = at.Edges.AgentTaskToAdhocPlanOrErr() + } if IsNotLoaded(err) { result, err = at.QueryAgentTaskToAdhocPlan().All(ctx) } return result, err } -func (a *Ansible) AnsibleToUser(ctx context.Context) ([]*User, error) { - result, err := a.Edges.AnsibleToUserOrErr() +func (a *Ansible) AnsibleToUser(ctx context.Context) (result []*User, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = a.NamedAnsibleToUser(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = a.Edges.AnsibleToUserOrErr() + } if IsNotLoaded(err) { result, err = a.QueryAnsibleToUser().All(ctx) } @@ -108,16 +128,24 @@ func (a *Ansible) AnsibleFromEnvironment(ctx context.Context) (*Environment, err return result, MaskNotFound(err) } -func (au *AuthUser) AuthUserToToken(ctx context.Context) ([]*Token, error) { - result, err := au.Edges.AuthUserToTokenOrErr() +func (au *AuthUser) AuthUserToToken(ctx context.Context) (result []*Token, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = au.NamedAuthUserToToken(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = au.Edges.AuthUserToTokenOrErr() + } if IsNotLoaded(err) { result, err = au.QueryAuthUserToToken().All(ctx) } return result, err } -func (au *AuthUser) AuthUserToServerTasks(ctx context.Context) ([]*ServerTask, error) { - result, err := au.Edges.AuthUserToServerTasksOrErr() +func (au *AuthUser) AuthUserToServerTasks(ctx context.Context) (result []*ServerTask, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = au.NamedAuthUserToServerTasks(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = au.Edges.AuthUserToServerTasksOrErr() + } if IsNotLoaded(err) { result, err = au.QueryAuthUserToServerTasks().All(ctx) } @@ -164,56 +192,84 @@ func (b *Build) BuildToRepoCommit(ctx context.Context) (*RepoCommit, error) { return result, MaskNotFound(err) } -func (b *Build) BuildToProvisionedNetwork(ctx context.Context) ([]*ProvisionedNetwork, error) { - result, err := b.Edges.BuildToProvisionedNetworkOrErr() +func (b *Build) BuildToProvisionedNetwork(ctx context.Context) (result []*ProvisionedNetwork, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = b.NamedBuildToProvisionedNetwork(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = b.Edges.BuildToProvisionedNetworkOrErr() + } if IsNotLoaded(err) { result, err = b.QueryBuildToProvisionedNetwork().All(ctx) } return result, err } -func (b *Build) BuildToTeam(ctx context.Context) ([]*Team, error) { - result, err := b.Edges.BuildToTeamOrErr() +func (b *Build) BuildToTeam(ctx context.Context) (result []*Team, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = b.NamedBuildToTeam(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = b.Edges.BuildToTeamOrErr() + } if IsNotLoaded(err) { result, err = b.QueryBuildToTeam().All(ctx) } return result, err } -func (b *Build) BuildToPlan(ctx context.Context) ([]*Plan, error) { - result, err := b.Edges.BuildToPlanOrErr() +func (b *Build) BuildToPlan(ctx context.Context) (result []*Plan, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = b.NamedBuildToPlan(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = b.Edges.BuildToPlanOrErr() + } if IsNotLoaded(err) { result, err = b.QueryBuildToPlan().All(ctx) } return result, err } -func (b *Build) BuildToBuildCommits(ctx context.Context) ([]*BuildCommit, error) { - result, err := b.Edges.BuildToBuildCommitsOrErr() +func (b *Build) BuildToBuildCommits(ctx context.Context) (result []*BuildCommit, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = b.NamedBuildToBuildCommits(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = b.Edges.BuildToBuildCommitsOrErr() + } if IsNotLoaded(err) { result, err = b.QueryBuildToBuildCommits().All(ctx) } return result, err } -func (b *Build) BuildToAdhocPlans(ctx context.Context) ([]*AdhocPlan, error) { - result, err := b.Edges.BuildToAdhocPlansOrErr() +func (b *Build) BuildToAdhocPlans(ctx context.Context) (result []*AdhocPlan, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = b.NamedBuildToAdhocPlans(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = b.Edges.BuildToAdhocPlansOrErr() + } if IsNotLoaded(err) { result, err = b.QueryBuildToAdhocPlans().All(ctx) } return result, err } -func (b *Build) BuildToAgentStatuses(ctx context.Context) ([]*AgentStatus, error) { - result, err := b.Edges.BuildToAgentStatusesOrErr() +func (b *Build) BuildToAgentStatuses(ctx context.Context) (result []*AgentStatus, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = b.NamedBuildToAgentStatuses(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = b.Edges.BuildToAgentStatusesOrErr() + } if IsNotLoaded(err) { result, err = b.QueryBuildToAgentStatuses().All(ctx) } return result, err } -func (b *Build) BuildToServerTasks(ctx context.Context) ([]*ServerTask, error) { - result, err := b.Edges.BuildToServerTasksOrErr() +func (b *Build) BuildToServerTasks(ctx context.Context) (result []*ServerTask, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = b.NamedBuildToServerTasks(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = b.Edges.BuildToServerTasksOrErr() + } if IsNotLoaded(err) { result, err = b.QueryBuildToServerTasks().All(ctx) } @@ -228,24 +284,36 @@ func (bc *BuildCommit) BuildCommitToBuild(ctx context.Context) (*Build, error) { return result, err } -func (bc *BuildCommit) BuildCommitToServerTask(ctx context.Context) ([]*ServerTask, error) { - result, err := bc.Edges.BuildCommitToServerTaskOrErr() +func (bc *BuildCommit) BuildCommitToServerTask(ctx context.Context) (result []*ServerTask, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = bc.NamedBuildCommitToServerTask(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = bc.Edges.BuildCommitToServerTaskOrErr() + } if IsNotLoaded(err) { result, err = bc.QueryBuildCommitToServerTask().All(ctx) } return result, err } -func (bc *BuildCommit) BuildCommitToPlanDiffs(ctx context.Context) ([]*PlanDiff, error) { - result, err := bc.Edges.BuildCommitToPlanDiffsOrErr() +func (bc *BuildCommit) BuildCommitToPlanDiffs(ctx context.Context) (result []*PlanDiff, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = bc.NamedBuildCommitToPlanDiffs(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = bc.Edges.BuildCommitToPlanDiffsOrErr() + } if IsNotLoaded(err) { result, err = bc.QueryBuildCommitToPlanDiffs().All(ctx) } return result, err } -func (c *Command) CommandToUser(ctx context.Context) ([]*User, error) { - result, err := c.Edges.CommandToUserOrErr() +func (c *Command) CommandToUser(ctx context.Context) (result []*User, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = c.NamedCommandToUser(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = c.Edges.CommandToUserOrErr() + } if IsNotLoaded(err) { result, err = c.QueryCommandToUser().All(ctx) } @@ -260,8 +328,12 @@ func (c *Command) CommandToEnvironment(ctx context.Context) (*Environment, error return result, MaskNotFound(err) } -func (c *Competition) CompetitionToDNS(ctx context.Context) ([]*DNS, error) { - result, err := c.Edges.CompetitionToDNSOrErr() +func (c *Competition) CompetitionToDNS(ctx context.Context) (result []*DNS, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = c.NamedCompetitionToDNS(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = c.Edges.CompetitionToDNSOrErr() + } if IsNotLoaded(err) { result, err = c.QueryCompetitionToDNS().All(ctx) } @@ -276,24 +348,36 @@ func (c *Competition) CompetitionToEnvironment(ctx context.Context) (*Environmen return result, MaskNotFound(err) } -func (c *Competition) CompetitionToBuild(ctx context.Context) ([]*Build, error) { - result, err := c.Edges.CompetitionToBuildOrErr() +func (c *Competition) CompetitionToBuild(ctx context.Context) (result []*Build, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = c.NamedCompetitionToBuild(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = c.Edges.CompetitionToBuildOrErr() + } if IsNotLoaded(err) { result, err = c.QueryCompetitionToBuild().All(ctx) } return result, err } -func (d *DNS) DNSToEnvironment(ctx context.Context) ([]*Environment, error) { - result, err := d.Edges.DNSToEnvironmentOrErr() +func (d *DNS) DNSToEnvironment(ctx context.Context) (result []*Environment, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = d.NamedDNSToEnvironment(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = d.Edges.DNSToEnvironmentOrErr() + } if IsNotLoaded(err) { result, err = d.QueryDNSToEnvironment().All(ctx) } return result, err } -func (d *DNS) DNSToCompetition(ctx context.Context) ([]*Competition, error) { - result, err := d.Edges.DNSToCompetitionOrErr() +func (d *DNS) DNSToCompetition(ctx context.Context) (result []*Competition, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = d.NamedDNSToCompetition(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = d.Edges.DNSToCompetitionOrErr() + } if IsNotLoaded(err) { result, err = d.QueryDNSToCompetition().All(ctx) } @@ -316,152 +400,228 @@ func (d *Disk) DiskToHost(ctx context.Context) (*Host, error) { return result, MaskNotFound(err) } -func (e *Environment) EnvironmentToUser(ctx context.Context) ([]*User, error) { - result, err := e.Edges.EnvironmentToUserOrErr() +func (e *Environment) EnvironmentToUser(ctx context.Context) (result []*User, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = e.NamedEnvironmentToUser(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = e.Edges.EnvironmentToUserOrErr() + } if IsNotLoaded(err) { result, err = e.QueryEnvironmentToUser().All(ctx) } return result, err } -func (e *Environment) EnvironmentToHost(ctx context.Context) ([]*Host, error) { - result, err := e.Edges.EnvironmentToHostOrErr() +func (e *Environment) EnvironmentToHost(ctx context.Context) (result []*Host, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = e.NamedEnvironmentToHost(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = e.Edges.EnvironmentToHostOrErr() + } if IsNotLoaded(err) { result, err = e.QueryEnvironmentToHost().All(ctx) } return result, err } -func (e *Environment) EnvironmentToCompetition(ctx context.Context) ([]*Competition, error) { - result, err := e.Edges.EnvironmentToCompetitionOrErr() +func (e *Environment) EnvironmentToCompetition(ctx context.Context) (result []*Competition, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = e.NamedEnvironmentToCompetition(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = e.Edges.EnvironmentToCompetitionOrErr() + } if IsNotLoaded(err) { result, err = e.QueryEnvironmentToCompetition().All(ctx) } return result, err } -func (e *Environment) EnvironmentToIdentity(ctx context.Context) ([]*Identity, error) { - result, err := e.Edges.EnvironmentToIdentityOrErr() +func (e *Environment) EnvironmentToIdentity(ctx context.Context) (result []*Identity, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = e.NamedEnvironmentToIdentity(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = e.Edges.EnvironmentToIdentityOrErr() + } if IsNotLoaded(err) { result, err = e.QueryEnvironmentToIdentity().All(ctx) } return result, err } -func (e *Environment) EnvironmentToCommand(ctx context.Context) ([]*Command, error) { - result, err := e.Edges.EnvironmentToCommandOrErr() +func (e *Environment) EnvironmentToCommand(ctx context.Context) (result []*Command, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = e.NamedEnvironmentToCommand(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = e.Edges.EnvironmentToCommandOrErr() + } if IsNotLoaded(err) { result, err = e.QueryEnvironmentToCommand().All(ctx) } return result, err } -func (e *Environment) EnvironmentToScript(ctx context.Context) ([]*Script, error) { - result, err := e.Edges.EnvironmentToScriptOrErr() +func (e *Environment) EnvironmentToScript(ctx context.Context) (result []*Script, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = e.NamedEnvironmentToScript(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = e.Edges.EnvironmentToScriptOrErr() + } if IsNotLoaded(err) { result, err = e.QueryEnvironmentToScript().All(ctx) } return result, err } -func (e *Environment) EnvironmentToFileDownload(ctx context.Context) ([]*FileDownload, error) { - result, err := e.Edges.EnvironmentToFileDownloadOrErr() +func (e *Environment) EnvironmentToFileDownload(ctx context.Context) (result []*FileDownload, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = e.NamedEnvironmentToFileDownload(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = e.Edges.EnvironmentToFileDownloadOrErr() + } if IsNotLoaded(err) { result, err = e.QueryEnvironmentToFileDownload().All(ctx) } return result, err } -func (e *Environment) EnvironmentToFileDelete(ctx context.Context) ([]*FileDelete, error) { - result, err := e.Edges.EnvironmentToFileDeleteOrErr() +func (e *Environment) EnvironmentToFileDelete(ctx context.Context) (result []*FileDelete, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = e.NamedEnvironmentToFileDelete(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = e.Edges.EnvironmentToFileDeleteOrErr() + } if IsNotLoaded(err) { result, err = e.QueryEnvironmentToFileDelete().All(ctx) } return result, err } -func (e *Environment) EnvironmentToFileExtract(ctx context.Context) ([]*FileExtract, error) { - result, err := e.Edges.EnvironmentToFileExtractOrErr() +func (e *Environment) EnvironmentToFileExtract(ctx context.Context) (result []*FileExtract, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = e.NamedEnvironmentToFileExtract(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = e.Edges.EnvironmentToFileExtractOrErr() + } if IsNotLoaded(err) { result, err = e.QueryEnvironmentToFileExtract().All(ctx) } return result, err } -func (e *Environment) EnvironmentToIncludedNetwork(ctx context.Context) ([]*IncludedNetwork, error) { - result, err := e.Edges.EnvironmentToIncludedNetworkOrErr() +func (e *Environment) EnvironmentToIncludedNetwork(ctx context.Context) (result []*IncludedNetwork, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = e.NamedEnvironmentToIncludedNetwork(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = e.Edges.EnvironmentToIncludedNetworkOrErr() + } if IsNotLoaded(err) { result, err = e.QueryEnvironmentToIncludedNetwork().All(ctx) } return result, err } -func (e *Environment) EnvironmentToFinding(ctx context.Context) ([]*Finding, error) { - result, err := e.Edges.EnvironmentToFindingOrErr() +func (e *Environment) EnvironmentToFinding(ctx context.Context) (result []*Finding, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = e.NamedEnvironmentToFinding(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = e.Edges.EnvironmentToFindingOrErr() + } if IsNotLoaded(err) { result, err = e.QueryEnvironmentToFinding().All(ctx) } return result, err } -func (e *Environment) EnvironmentToDNSRecord(ctx context.Context) ([]*DNSRecord, error) { - result, err := e.Edges.EnvironmentToDNSRecordOrErr() +func (e *Environment) EnvironmentToDNSRecord(ctx context.Context) (result []*DNSRecord, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = e.NamedEnvironmentToDNSRecord(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = e.Edges.EnvironmentToDNSRecordOrErr() + } if IsNotLoaded(err) { result, err = e.QueryEnvironmentToDNSRecord().All(ctx) } return result, err } -func (e *Environment) EnvironmentToDNS(ctx context.Context) ([]*DNS, error) { - result, err := e.Edges.EnvironmentToDNSOrErr() +func (e *Environment) EnvironmentToDNS(ctx context.Context) (result []*DNS, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = e.NamedEnvironmentToDNS(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = e.Edges.EnvironmentToDNSOrErr() + } if IsNotLoaded(err) { result, err = e.QueryEnvironmentToDNS().All(ctx) } return result, err } -func (e *Environment) EnvironmentToNetwork(ctx context.Context) ([]*Network, error) { - result, err := e.Edges.EnvironmentToNetworkOrErr() +func (e *Environment) EnvironmentToNetwork(ctx context.Context) (result []*Network, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = e.NamedEnvironmentToNetwork(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = e.Edges.EnvironmentToNetworkOrErr() + } if IsNotLoaded(err) { result, err = e.QueryEnvironmentToNetwork().All(ctx) } return result, err } -func (e *Environment) EnvironmentToHostDependency(ctx context.Context) ([]*HostDependency, error) { - result, err := e.Edges.EnvironmentToHostDependencyOrErr() +func (e *Environment) EnvironmentToHostDependency(ctx context.Context) (result []*HostDependency, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = e.NamedEnvironmentToHostDependency(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = e.Edges.EnvironmentToHostDependencyOrErr() + } if IsNotLoaded(err) { result, err = e.QueryEnvironmentToHostDependency().All(ctx) } return result, err } -func (e *Environment) EnvironmentToAnsible(ctx context.Context) ([]*Ansible, error) { - result, err := e.Edges.EnvironmentToAnsibleOrErr() +func (e *Environment) EnvironmentToAnsible(ctx context.Context) (result []*Ansible, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = e.NamedEnvironmentToAnsible(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = e.Edges.EnvironmentToAnsibleOrErr() + } if IsNotLoaded(err) { result, err = e.QueryEnvironmentToAnsible().All(ctx) } return result, err } -func (e *Environment) EnvironmentToBuild(ctx context.Context) ([]*Build, error) { - result, err := e.Edges.EnvironmentToBuildOrErr() +func (e *Environment) EnvironmentToBuild(ctx context.Context) (result []*Build, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = e.NamedEnvironmentToBuild(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = e.Edges.EnvironmentToBuildOrErr() + } if IsNotLoaded(err) { result, err = e.QueryEnvironmentToBuild().All(ctx) } return result, err } -func (e *Environment) EnvironmentToRepository(ctx context.Context) ([]*Repository, error) { - result, err := e.Edges.EnvironmentToRepositoryOrErr() +func (e *Environment) EnvironmentToRepository(ctx context.Context) (result []*Repository, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = e.NamedEnvironmentToRepository(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = e.Edges.EnvironmentToRepositoryOrErr() + } if IsNotLoaded(err) { result, err = e.QueryEnvironmentToRepository().All(ctx) } return result, err } -func (e *Environment) EnvironmentToServerTask(ctx context.Context) ([]*ServerTask, error) { - result, err := e.Edges.EnvironmentToServerTaskOrErr() +func (e *Environment) EnvironmentToServerTask(ctx context.Context) (result []*ServerTask, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = e.NamedEnvironmentToServerTask(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = e.Edges.EnvironmentToServerTaskOrErr() + } if IsNotLoaded(err) { result, err = e.QueryEnvironmentToServerTask().All(ctx) } @@ -492,8 +652,12 @@ func (fe *FileExtract) FileExtractToEnvironment(ctx context.Context) (*Environme return result, MaskNotFound(err) } -func (f *Finding) FindingToUser(ctx context.Context) ([]*User, error) { - result, err := f.Edges.FindingToUserOrErr() +func (f *Finding) FindingToUser(ctx context.Context) (result []*User, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = f.NamedFindingToUser(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = f.Edges.FindingToUserOrErr() + } if IsNotLoaded(err) { result, err = f.QueryFindingToUser().All(ctx) } @@ -548,8 +712,12 @@ func (h *Host) HostToDisk(ctx context.Context) (*Disk, error) { return result, MaskNotFound(err) } -func (h *Host) HostToUser(ctx context.Context) ([]*User, error) { - result, err := h.Edges.HostToUserOrErr() +func (h *Host) HostToUser(ctx context.Context) (result []*User, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = h.NamedHostToUser(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = h.Edges.HostToUserOrErr() + } if IsNotLoaded(err) { result, err = h.QueryHostToUser().All(ctx) } @@ -564,24 +732,36 @@ func (h *Host) HostToEnvironment(ctx context.Context) (*Environment, error) { return result, MaskNotFound(err) } -func (h *Host) HostToIncludedNetwork(ctx context.Context) ([]*IncludedNetwork, error) { - result, err := h.Edges.HostToIncludedNetworkOrErr() +func (h *Host) HostToIncludedNetwork(ctx context.Context) (result []*IncludedNetwork, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = h.NamedHostToIncludedNetwork(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = h.Edges.HostToIncludedNetworkOrErr() + } if IsNotLoaded(err) { result, err = h.QueryHostToIncludedNetwork().All(ctx) } return result, err } -func (h *Host) DependOnHostToHostDependency(ctx context.Context) ([]*HostDependency, error) { - result, err := h.Edges.DependOnHostToHostDependencyOrErr() +func (h *Host) DependOnHostToHostDependency(ctx context.Context) (result []*HostDependency, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = h.NamedDependOnHostToHostDependency(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = h.Edges.DependOnHostToHostDependencyOrErr() + } if IsNotLoaded(err) { result, err = h.QueryDependOnHostToHostDependency().All(ctx) } return result, err } -func (h *Host) DependByHostToHostDependency(ctx context.Context) ([]*HostDependency, error) { - result, err := h.Edges.DependByHostToHostDependencyOrErr() +func (h *Host) DependByHostToHostDependency(ctx context.Context) (result []*HostDependency, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = h.NamedDependByHostToHostDependency(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = h.Edges.DependByHostToHostDependencyOrErr() + } if IsNotLoaded(err) { result, err = h.QueryDependByHostToHostDependency().All(ctx) } @@ -628,16 +808,24 @@ func (i *Identity) IdentityToEnvironment(ctx context.Context) (*Environment, err return result, MaskNotFound(err) } -func (in *IncludedNetwork) IncludedNetworkToTag(ctx context.Context) ([]*Tag, error) { - result, err := in.Edges.IncludedNetworkToTagOrErr() +func (in *IncludedNetwork) IncludedNetworkToTag(ctx context.Context) (result []*Tag, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = in.NamedIncludedNetworkToTag(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = in.Edges.IncludedNetworkToTagOrErr() + } if IsNotLoaded(err) { result, err = in.QueryIncludedNetworkToTag().All(ctx) } return result, err } -func (in *IncludedNetwork) IncludedNetworkToHost(ctx context.Context) ([]*Host, error) { - result, err := in.Edges.IncludedNetworkToHostOrErr() +func (in *IncludedNetwork) IncludedNetworkToHost(ctx context.Context) (result []*Host, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = in.NamedIncludedNetworkToHost(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = in.Edges.IncludedNetworkToHostOrErr() + } if IsNotLoaded(err) { result, err = in.QueryIncludedNetworkToHost().All(ctx) } @@ -652,8 +840,12 @@ func (in *IncludedNetwork) IncludedNetworkToNetwork(ctx context.Context) (*Netwo return result, MaskNotFound(err) } -func (in *IncludedNetwork) IncludedNetworkToEnvironment(ctx context.Context) ([]*Environment, error) { - result, err := in.Edges.IncludedNetworkToEnvironmentOrErr() +func (in *IncludedNetwork) IncludedNetworkToEnvironment(ctx context.Context) (result []*Environment, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = in.NamedIncludedNetworkToEnvironment(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = in.Edges.IncludedNetworkToEnvironmentOrErr() + } if IsNotLoaded(err) { result, err = in.QueryIncludedNetworkToEnvironment().All(ctx) } @@ -668,32 +860,48 @@ func (n *Network) NetworkToEnvironment(ctx context.Context) (*Environment, error return result, MaskNotFound(err) } -func (n *Network) NetworkToHostDependency(ctx context.Context) ([]*HostDependency, error) { - result, err := n.Edges.NetworkToHostDependencyOrErr() +func (n *Network) NetworkToHostDependency(ctx context.Context) (result []*HostDependency, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = n.NamedNetworkToHostDependency(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = n.Edges.NetworkToHostDependencyOrErr() + } if IsNotLoaded(err) { result, err = n.QueryNetworkToHostDependency().All(ctx) } return result, err } -func (n *Network) NetworkToIncludedNetwork(ctx context.Context) ([]*IncludedNetwork, error) { - result, err := n.Edges.NetworkToIncludedNetworkOrErr() +func (n *Network) NetworkToIncludedNetwork(ctx context.Context) (result []*IncludedNetwork, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = n.NamedNetworkToIncludedNetwork(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = n.Edges.NetworkToIncludedNetworkOrErr() + } if IsNotLoaded(err) { result, err = n.QueryNetworkToIncludedNetwork().All(ctx) } return result, err } -func (pl *Plan) PrevPlan(ctx context.Context) ([]*Plan, error) { - result, err := pl.Edges.PrevPlanOrErr() +func (pl *Plan) PrevPlan(ctx context.Context) (result []*Plan, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = pl.NamedPrevPlan(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = pl.Edges.PrevPlanOrErr() + } if IsNotLoaded(err) { result, err = pl.QueryPrevPlan().All(ctx) } return result, err } -func (pl *Plan) NextPlan(ctx context.Context) ([]*Plan, error) { - result, err := pl.Edges.NextPlanOrErr() +func (pl *Plan) NextPlan(ctx context.Context) (result []*Plan, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = pl.NamedNextPlan(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = pl.Edges.NextPlanOrErr() + } if IsNotLoaded(err) { result, err = pl.QueryNextPlan().All(ctx) } @@ -748,8 +956,12 @@ func (pl *Plan) PlanToStatus(ctx context.Context) (*Status, error) { return result, err } -func (pl *Plan) PlanToPlanDiffs(ctx context.Context) ([]*PlanDiff, error) { - result, err := pl.Edges.PlanToPlanDiffsOrErr() +func (pl *Plan) PlanToPlanDiffs(ctx context.Context) (result []*PlanDiff, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = pl.NamedPlanToPlanDiffs(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = pl.Edges.PlanToPlanDiffsOrErr() + } if IsNotLoaded(err) { result, err = pl.QueryPlanToPlanDiffs().All(ctx) } @@ -812,24 +1024,36 @@ func (ph *ProvisionedHost) ProvisionedHostToBuild(ctx context.Context) (*Build, return result, err } -func (ph *ProvisionedHost) ProvisionedHostToProvisioningStep(ctx context.Context) ([]*ProvisioningStep, error) { - result, err := ph.Edges.ProvisionedHostToProvisioningStepOrErr() +func (ph *ProvisionedHost) ProvisionedHostToProvisioningStep(ctx context.Context) (result []*ProvisioningStep, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = ph.NamedProvisionedHostToProvisioningStep(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = ph.Edges.ProvisionedHostToProvisioningStepOrErr() + } if IsNotLoaded(err) { result, err = ph.QueryProvisionedHostToProvisioningStep().All(ctx) } return result, err } -func (ph *ProvisionedHost) ProvisionedHostToAgentStatus(ctx context.Context) ([]*AgentStatus, error) { - result, err := ph.Edges.ProvisionedHostToAgentStatusOrErr() +func (ph *ProvisionedHost) ProvisionedHostToAgentStatus(ctx context.Context) (result []*AgentStatus, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = ph.NamedProvisionedHostToAgentStatus(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = ph.Edges.ProvisionedHostToAgentStatusOrErr() + } if IsNotLoaded(err) { result, err = ph.QueryProvisionedHostToAgentStatus().All(ctx) } return result, err } -func (ph *ProvisionedHost) ProvisionedHostToAgentTask(ctx context.Context) ([]*AgentTask, error) { - result, err := ph.Edges.ProvisionedHostToAgentTaskOrErr() +func (ph *ProvisionedHost) ProvisionedHostToAgentTask(ctx context.Context) (result []*AgentTask, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = ph.NamedProvisionedHostToAgentTask(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = ph.Edges.ProvisionedHostToAgentTaskOrErr() + } if IsNotLoaded(err) { result, err = ph.QueryProvisionedHostToAgentTask().All(ctx) } @@ -884,8 +1108,12 @@ func (pn *ProvisionedNetwork) ProvisionedNetworkToTeam(ctx context.Context) (*Te return result, MaskNotFound(err) } -func (pn *ProvisionedNetwork) ProvisionedNetworkToProvisionedHost(ctx context.Context) ([]*ProvisionedHost, error) { - result, err := pn.Edges.ProvisionedNetworkToProvisionedHostOrErr() +func (pn *ProvisionedNetwork) ProvisionedNetworkToProvisionedHost(ctx context.Context) (result []*ProvisionedHost, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = pn.NamedProvisionedNetworkToProvisionedHost(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = pn.Edges.ProvisionedNetworkToProvisionedHostOrErr() + } if IsNotLoaded(err) { result, err = pn.QueryProvisionedNetworkToProvisionedHost().All(ctx) } @@ -980,8 +1208,12 @@ func (ps *ProvisioningStep) ProvisioningStepToPlan(ctx context.Context) (*Plan, return result, MaskNotFound(err) } -func (ps *ProvisioningStep) ProvisioningStepToAgentTask(ctx context.Context) ([]*AgentTask, error) { - result, err := ps.Edges.ProvisioningStepToAgentTaskOrErr() +func (ps *ProvisioningStep) ProvisioningStepToAgentTask(ctx context.Context) (result []*AgentTask, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = ps.NamedProvisioningStepToAgentTask(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = ps.Edges.ProvisioningStepToAgentTaskOrErr() + } if IsNotLoaded(err) { result, err = ps.QueryProvisioningStepToAgentTask().All(ctx) } @@ -1004,32 +1236,48 @@ func (rc *RepoCommit) RepoCommitToRepository(ctx context.Context) (*Repository, return result, MaskNotFound(err) } -func (r *Repository) RepositoryToEnvironment(ctx context.Context) ([]*Environment, error) { - result, err := r.Edges.RepositoryToEnvironmentOrErr() +func (r *Repository) RepositoryToEnvironment(ctx context.Context) (result []*Environment, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = r.NamedRepositoryToEnvironment(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = r.Edges.RepositoryToEnvironmentOrErr() + } if IsNotLoaded(err) { result, err = r.QueryRepositoryToEnvironment().All(ctx) } return result, err } -func (r *Repository) RepositoryToRepoCommit(ctx context.Context) ([]*RepoCommit, error) { - result, err := r.Edges.RepositoryToRepoCommitOrErr() +func (r *Repository) RepositoryToRepoCommit(ctx context.Context) (result []*RepoCommit, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = r.NamedRepositoryToRepoCommit(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = r.Edges.RepositoryToRepoCommitOrErr() + } if IsNotLoaded(err) { result, err = r.QueryRepositoryToRepoCommit().All(ctx) } return result, err } -func (s *Script) ScriptToUser(ctx context.Context) ([]*User, error) { - result, err := s.Edges.ScriptToUserOrErr() +func (s *Script) ScriptToUser(ctx context.Context) (result []*User, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = s.NamedScriptToUser(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = s.Edges.ScriptToUserOrErr() + } if IsNotLoaded(err) { result, err = s.QueryScriptToUser().All(ctx) } return result, err } -func (s *Script) ScriptToFinding(ctx context.Context) ([]*Finding, error) { - result, err := s.Edges.ScriptToFindingOrErr() +func (s *Script) ScriptToFinding(ctx context.Context) (result []*Finding, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = s.NamedScriptToFinding(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = s.Edges.ScriptToFindingOrErr() + } if IsNotLoaded(err) { result, err = s.QueryScriptToFinding().All(ctx) } @@ -1084,8 +1332,12 @@ func (st *ServerTask) ServerTaskToBuildCommit(ctx context.Context) (*BuildCommit return result, MaskNotFound(err) } -func (st *ServerTask) ServerTaskToGinFileMiddleware(ctx context.Context) ([]*GinFileMiddleware, error) { - result, err := st.Edges.ServerTaskToGinFileMiddlewareOrErr() +func (st *ServerTask) ServerTaskToGinFileMiddleware(ctx context.Context) (result []*GinFileMiddleware, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = st.NamedServerTaskToGinFileMiddleware(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = st.Edges.ServerTaskToGinFileMiddlewareOrErr() + } if IsNotLoaded(err) { result, err = st.QueryServerTaskToGinFileMiddleware().All(ctx) } @@ -1172,8 +1424,12 @@ func (t *Team) TeamToStatus(ctx context.Context) (*Status, error) { return result, MaskNotFound(err) } -func (t *Team) TeamToProvisionedNetwork(ctx context.Context) ([]*ProvisionedNetwork, error) { - result, err := t.Edges.TeamToProvisionedNetworkOrErr() +func (t *Team) TeamToProvisionedNetwork(ctx context.Context) (result []*ProvisionedNetwork, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = t.NamedTeamToProvisionedNetwork(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = t.Edges.TeamToProvisionedNetworkOrErr() + } if IsNotLoaded(err) { result, err = t.QueryTeamToProvisionedNetwork().All(ctx) } @@ -1196,16 +1452,24 @@ func (t *Token) TokenToAuthUser(ctx context.Context) (*AuthUser, error) { return result, err } -func (u *User) UserToTag(ctx context.Context) ([]*Tag, error) { - result, err := u.Edges.UserToTagOrErr() +func (u *User) UserToTag(ctx context.Context) (result []*Tag, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = u.NamedUserToTag(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = u.Edges.UserToTagOrErr() + } if IsNotLoaded(err) { result, err = u.QueryUserToTag().All(ctx) } return result, err } -func (u *User) UserToEnvironment(ctx context.Context) ([]*Environment, error) { - result, err := u.Edges.UserToEnvironmentOrErr() +func (u *User) UserToEnvironment(ctx context.Context) (result []*Environment, err error) { + if fc := graphql.GetFieldContext(ctx); fc != nil && fc.Field.Alias != "" { + result, err = u.NamedUserToEnvironment(graphql.GetFieldContext(ctx).Field.Alias) + } else { + result, err = u.Edges.UserToEnvironmentOrErr() + } if IsNotLoaded(err) { result, err = u.QueryUserToEnvironment().All(ctx) } diff --git a/ent/gql_node.go b/ent/gql_node.go index 37e30dfb..51346635 100644 --- a/ent/gql_node.go +++ b/ent/gql_node.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "encoding/json" "fmt" "entgo.io/contrib/entgql" @@ -52,3642 +51,119 @@ import ( // Noder wraps the basic Node method. type Noder interface { - Node(context.Context) (*Node, error) + IsNode() } -// Node in the graph. -type Node struct { - ID uuid.UUID `json:"id,omitempty"` // node id. - Type string `json:"type,omitempty"` // node type. - Fields []*Field `json:"fields,omitempty"` // node fields. - Edges []*Edge `json:"edges,omitempty"` // node edges. -} +// IsNode implements the Node interface check for GQLGen. +func (n *AdhocPlan) IsNode() {} -// Field of a node. -type Field struct { - Type string `json:"type,omitempty"` // field type. - Name string `json:"name,omitempty"` // field name (as in struct). - Value string `json:"value,omitempty"` // stringified value. -} +// IsNode implements the Node interface check for GQLGen. +func (n *AgentStatus) IsNode() {} -// Edges between two nodes. -type Edge struct { - Type string `json:"type,omitempty"` // edge type. - Name string `json:"name,omitempty"` // edge name. - IDs []uuid.UUID `json:"ids,omitempty"` // node ids (where this edge point to). -} +// IsNode implements the Node interface check for GQLGen. +func (n *AgentTask) IsNode() {} -func (ap *AdhocPlan) Node(ctx context.Context) (node *Node, err error) { - node = &Node{ - ID: ap.ID, - Type: "AdhocPlan", - Fields: make([]*Field, 0), - Edges: make([]*Edge, 5), - } - node.Edges[0] = &Edge{ - Type: "AdhocPlan", - Name: "PrevAdhocPlan", - } - err = ap.QueryPrevAdhocPlan(). - Select(adhocplan.FieldID). - Scan(ctx, &node.Edges[0].IDs) - if err != nil { - return nil, err - } - node.Edges[1] = &Edge{ - Type: "AdhocPlan", - Name: "NextAdhocPlan", - } - err = ap.QueryNextAdhocPlan(). - Select(adhocplan.FieldID). - Scan(ctx, &node.Edges[1].IDs) - if err != nil { - return nil, err - } - node.Edges[2] = &Edge{ - Type: "Build", - Name: "AdhocPlanToBuild", - } - err = ap.QueryAdhocPlanToBuild(). - Select(build.FieldID). - Scan(ctx, &node.Edges[2].IDs) - if err != nil { - return nil, err - } - node.Edges[3] = &Edge{ - Type: "Status", - Name: "AdhocPlanToStatus", - } - err = ap.QueryAdhocPlanToStatus(). - Select(status.FieldID). - Scan(ctx, &node.Edges[3].IDs) - if err != nil { - return nil, err - } - node.Edges[4] = &Edge{ - Type: "AgentTask", - Name: "AdhocPlanToAgentTask", - } - err = ap.QueryAdhocPlanToAgentTask(). - Select(agenttask.FieldID). - Scan(ctx, &node.Edges[4].IDs) - if err != nil { - return nil, err - } - return node, nil -} +// IsNode implements the Node interface check for GQLGen. +func (n *Ansible) IsNode() {} -func (as *AgentStatus) Node(ctx context.Context) (node *Node, err error) { - node = &Node{ - ID: as.ID, - Type: "AgentStatus", - Fields: make([]*Field, 14), - Edges: make([]*Edge, 3), - } - var buf []byte - if buf, err = json.Marshal(as.ClientID); err != nil { - return nil, err - } - node.Fields[0] = &Field{ - Type: "string", - Name: "ClientID", - Value: string(buf), - } - if buf, err = json.Marshal(as.Hostname); err != nil { - return nil, err - } - node.Fields[1] = &Field{ - Type: "string", - Name: "Hostname", - Value: string(buf), - } - if buf, err = json.Marshal(as.UpTime); err != nil { - return nil, err - } - node.Fields[2] = &Field{ - Type: "int64", - Name: "UpTime", - Value: string(buf), - } - if buf, err = json.Marshal(as.BootTime); err != nil { - return nil, err - } - node.Fields[3] = &Field{ - Type: "int64", - Name: "BootTime", - Value: string(buf), - } - if buf, err = json.Marshal(as.NumProcs); err != nil { - return nil, err - } - node.Fields[4] = &Field{ - Type: "int64", - Name: "NumProcs", - Value: string(buf), - } - if buf, err = json.Marshal(as.Os); err != nil { - return nil, err - } - node.Fields[5] = &Field{ - Type: "string", - Name: "Os", - Value: string(buf), - } - if buf, err = json.Marshal(as.HostID); err != nil { - return nil, err - } - node.Fields[6] = &Field{ - Type: "string", - Name: "HostID", - Value: string(buf), - } - if buf, err = json.Marshal(as.Load1); err != nil { - return nil, err - } - node.Fields[7] = &Field{ - Type: "float64", - Name: "Load1", - Value: string(buf), - } - if buf, err = json.Marshal(as.Load5); err != nil { - return nil, err - } - node.Fields[8] = &Field{ - Type: "float64", - Name: "Load5", - Value: string(buf), - } - if buf, err = json.Marshal(as.Load15); err != nil { - return nil, err - } - node.Fields[9] = &Field{ - Type: "float64", - Name: "Load15", - Value: string(buf), - } - if buf, err = json.Marshal(as.TotalMem); err != nil { - return nil, err - } - node.Fields[10] = &Field{ - Type: "int64", - Name: "TotalMem", - Value: string(buf), - } - if buf, err = json.Marshal(as.FreeMem); err != nil { - return nil, err - } - node.Fields[11] = &Field{ - Type: "int64", - Name: "FreeMem", - Value: string(buf), - } - if buf, err = json.Marshal(as.UsedMem); err != nil { - return nil, err - } - node.Fields[12] = &Field{ - Type: "int64", - Name: "UsedMem", - Value: string(buf), - } - if buf, err = json.Marshal(as.Timestamp); err != nil { - return nil, err - } - node.Fields[13] = &Field{ - Type: "int64", - Name: "Timestamp", - Value: string(buf), - } - node.Edges[0] = &Edge{ - Type: "ProvisionedHost", - Name: "AgentStatusToProvisionedHost", - } - err = as.QueryAgentStatusToProvisionedHost(). - Select(provisionedhost.FieldID). - Scan(ctx, &node.Edges[0].IDs) - if err != nil { - return nil, err - } - node.Edges[1] = &Edge{ - Type: "ProvisionedNetwork", - Name: "AgentStatusToProvisionedNetwork", - } - err = as.QueryAgentStatusToProvisionedNetwork(). - Select(provisionednetwork.FieldID). - Scan(ctx, &node.Edges[1].IDs) - if err != nil { - return nil, err - } - node.Edges[2] = &Edge{ - Type: "Build", - Name: "AgentStatusToBuild", - } - err = as.QueryAgentStatusToBuild(). - Select(build.FieldID). - Scan(ctx, &node.Edges[2].IDs) - if err != nil { - return nil, err - } - return node, nil -} +// IsNode implements the Node interface check for GQLGen. +func (n *AuthUser) IsNode() {} -func (at *AgentTask) Node(ctx context.Context) (node *Node, err error) { - node = &Node{ - ID: at.ID, - Type: "AgentTask", - Fields: make([]*Field, 6), - Edges: make([]*Edge, 3), - } - var buf []byte - if buf, err = json.Marshal(at.Command); err != nil { - return nil, err - } - node.Fields[0] = &Field{ - Type: "agenttask.Command", - Name: "command", - Value: string(buf), - } - if buf, err = json.Marshal(at.Args); err != nil { - return nil, err - } - node.Fields[1] = &Field{ - Type: "string", - Name: "args", - Value: string(buf), - } - if buf, err = json.Marshal(at.Number); err != nil { - return nil, err - } - node.Fields[2] = &Field{ - Type: "int", - Name: "number", - Value: string(buf), - } - if buf, err = json.Marshal(at.Output); err != nil { - return nil, err - } - node.Fields[3] = &Field{ - Type: "string", - Name: "output", - Value: string(buf), - } - if buf, err = json.Marshal(at.State); err != nil { - return nil, err - } - node.Fields[4] = &Field{ - Type: "agenttask.State", - Name: "state", - Value: string(buf), - } - if buf, err = json.Marshal(at.ErrorMessage); err != nil { - return nil, err - } - node.Fields[5] = &Field{ - Type: "string", - Name: "error_message", - Value: string(buf), - } - node.Edges[0] = &Edge{ - Type: "ProvisioningStep", - Name: "AgentTaskToProvisioningStep", - } - err = at.QueryAgentTaskToProvisioningStep(). - Select(provisioningstep.FieldID). - Scan(ctx, &node.Edges[0].IDs) - if err != nil { - return nil, err - } - node.Edges[1] = &Edge{ - Type: "ProvisionedHost", - Name: "AgentTaskToProvisionedHost", - } - err = at.QueryAgentTaskToProvisionedHost(). - Select(provisionedhost.FieldID). - Scan(ctx, &node.Edges[1].IDs) - if err != nil { - return nil, err - } - node.Edges[2] = &Edge{ - Type: "AdhocPlan", - Name: "AgentTaskToAdhocPlan", - } - err = at.QueryAgentTaskToAdhocPlan(). - Select(adhocplan.FieldID). - Scan(ctx, &node.Edges[2].IDs) - if err != nil { - return nil, err - } - return node, nil -} +// IsNode implements the Node interface check for GQLGen. +func (n *Build) IsNode() {} -func (a *Ansible) Node(ctx context.Context) (node *Node, err error) { - node = &Node{ - ID: a.ID, - Type: "Ansible", - Fields: make([]*Field, 9), - Edges: make([]*Edge, 2), - } - var buf []byte - if buf, err = json.Marshal(a.Name); err != nil { - return nil, err - } - node.Fields[0] = &Field{ - Type: "string", - Name: "name", - Value: string(buf), - } - if buf, err = json.Marshal(a.HclID); err != nil { - return nil, err - } - node.Fields[1] = &Field{ - Type: "string", - Name: "hcl_id", - Value: string(buf), - } - if buf, err = json.Marshal(a.Description); err != nil { - return nil, err - } - node.Fields[2] = &Field{ - Type: "string", - Name: "description", - Value: string(buf), - } - if buf, err = json.Marshal(a.Source); err != nil { - return nil, err - } - node.Fields[3] = &Field{ - Type: "string", - Name: "source", - Value: string(buf), - } - if buf, err = json.Marshal(a.PlaybookName); err != nil { - return nil, err - } - node.Fields[4] = &Field{ - Type: "string", - Name: "playbook_name", - Value: string(buf), - } - if buf, err = json.Marshal(a.Method); err != nil { - return nil, err - } - node.Fields[5] = &Field{ - Type: "ansible.Method", - Name: "method", - Value: string(buf), - } - if buf, err = json.Marshal(a.Inventory); err != nil { - return nil, err - } - node.Fields[6] = &Field{ - Type: "string", - Name: "inventory", - Value: string(buf), - } - if buf, err = json.Marshal(a.AbsPath); err != nil { - return nil, err - } - node.Fields[7] = &Field{ - Type: "string", - Name: "abs_path", - Value: string(buf), - } - if buf, err = json.Marshal(a.Tags); err != nil { - return nil, err - } - node.Fields[8] = &Field{ - Type: "map[string]string", - Name: "tags", - Value: string(buf), - } - node.Edges[0] = &Edge{ - Type: "User", - Name: "AnsibleToUser", - } - err = a.QueryAnsibleToUser(). - Select(user.FieldID). - Scan(ctx, &node.Edges[0].IDs) - if err != nil { - return nil, err - } - node.Edges[1] = &Edge{ - Type: "Environment", - Name: "AnsibleFromEnvironment", - } - err = a.QueryAnsibleFromEnvironment(). - Select(environment.FieldID). - Scan(ctx, &node.Edges[1].IDs) - if err != nil { - return nil, err - } - return node, nil -} +// IsNode implements the Node interface check for GQLGen. +func (n *BuildCommit) IsNode() {} -func (au *AuthUser) Node(ctx context.Context) (node *Node, err error) { - node = &Node{ - ID: au.ID, - Type: "AuthUser", - Fields: make([]*Field, 11), - Edges: make([]*Edge, 2), - } - var buf []byte - if buf, err = json.Marshal(au.Username); err != nil { - return nil, err - } - node.Fields[0] = &Field{ - Type: "string", - Name: "username", - Value: string(buf), - } - if buf, err = json.Marshal(au.Password); err != nil { - return nil, err - } - node.Fields[1] = &Field{ - Type: "string", - Name: "password", - Value: string(buf), - } - if buf, err = json.Marshal(au.FirstName); err != nil { - return nil, err - } - node.Fields[2] = &Field{ - Type: "string", - Name: "first_name", - Value: string(buf), - } - if buf, err = json.Marshal(au.LastName); err != nil { - return nil, err - } - node.Fields[3] = &Field{ - Type: "string", - Name: "last_name", - Value: string(buf), - } - if buf, err = json.Marshal(au.Email); err != nil { - return nil, err - } - node.Fields[4] = &Field{ - Type: "string", - Name: "email", - Value: string(buf), - } - if buf, err = json.Marshal(au.Phone); err != nil { - return nil, err - } - node.Fields[5] = &Field{ - Type: "string", - Name: "phone", - Value: string(buf), - } - if buf, err = json.Marshal(au.Company); err != nil { - return nil, err - } - node.Fields[6] = &Field{ - Type: "string", - Name: "company", - Value: string(buf), - } - if buf, err = json.Marshal(au.Occupation); err != nil { - return nil, err - } - node.Fields[7] = &Field{ - Type: "string", - Name: "occupation", - Value: string(buf), - } - if buf, err = json.Marshal(au.PrivateKeyPath); err != nil { - return nil, err - } - node.Fields[8] = &Field{ - Type: "string", - Name: "private_key_path", - Value: string(buf), - } - if buf, err = json.Marshal(au.Role); err != nil { - return nil, err - } - node.Fields[9] = &Field{ - Type: "authuser.Role", - Name: "role", - Value: string(buf), - } - if buf, err = json.Marshal(au.Provider); err != nil { - return nil, err - } - node.Fields[10] = &Field{ - Type: "authuser.Provider", - Name: "provider", - Value: string(buf), - } - node.Edges[0] = &Edge{ - Type: "Token", - Name: "AuthUserToToken", - } - err = au.QueryAuthUserToToken(). - Select(token.FieldID). - Scan(ctx, &node.Edges[0].IDs) - if err != nil { - return nil, err - } - node.Edges[1] = &Edge{ - Type: "ServerTask", - Name: "AuthUserToServerTasks", - } - err = au.QueryAuthUserToServerTasks(). - Select(servertask.FieldID). - Scan(ctx, &node.Edges[1].IDs) - if err != nil { - return nil, err - } - return node, nil -} +// IsNode implements the Node interface check for GQLGen. +func (n *Command) IsNode() {} -func (b *Build) Node(ctx context.Context) (node *Node, err error) { - node = &Node{ - ID: b.ID, - Type: "Build", - Fields: make([]*Field, 4), - Edges: make([]*Edge, 12), - } - var buf []byte - if buf, err = json.Marshal(b.Revision); err != nil { - return nil, err - } - node.Fields[0] = &Field{ - Type: "int", - Name: "revision", - Value: string(buf), - } - if buf, err = json.Marshal(b.EnvironmentRevision); err != nil { - return nil, err - } - node.Fields[1] = &Field{ - Type: "int", - Name: "environment_revision", - Value: string(buf), - } - if buf, err = json.Marshal(b.Vars); err != nil { - return nil, err - } - node.Fields[2] = &Field{ - Type: "map[string]string", - Name: "vars", - Value: string(buf), - } - if buf, err = json.Marshal(b.CompletedPlan); err != nil { - return nil, err - } - node.Fields[3] = &Field{ - Type: "bool", - Name: "completed_plan", - Value: string(buf), - } - node.Edges[0] = &Edge{ - Type: "Status", - Name: "BuildToStatus", - } - err = b.QueryBuildToStatus(). - Select(status.FieldID). - Scan(ctx, &node.Edges[0].IDs) - if err != nil { - return nil, err - } - node.Edges[1] = &Edge{ - Type: "Environment", - Name: "BuildToEnvironment", - } - err = b.QueryBuildToEnvironment(). - Select(environment.FieldID). - Scan(ctx, &node.Edges[1].IDs) - if err != nil { - return nil, err - } - node.Edges[2] = &Edge{ - Type: "Competition", - Name: "BuildToCompetition", - } - err = b.QueryBuildToCompetition(). - Select(competition.FieldID). - Scan(ctx, &node.Edges[2].IDs) - if err != nil { - return nil, err - } - node.Edges[3] = &Edge{ - Type: "BuildCommit", - Name: "BuildToLatestBuildCommit", - } - err = b.QueryBuildToLatestBuildCommit(). - Select(buildcommit.FieldID). - Scan(ctx, &node.Edges[3].IDs) - if err != nil { - return nil, err - } - node.Edges[4] = &Edge{ - Type: "RepoCommit", - Name: "BuildToRepoCommit", - } - err = b.QueryBuildToRepoCommit(). - Select(repocommit.FieldID). - Scan(ctx, &node.Edges[4].IDs) - if err != nil { - return nil, err - } - node.Edges[5] = &Edge{ - Type: "ProvisionedNetwork", - Name: "BuildToProvisionedNetwork", - } - err = b.QueryBuildToProvisionedNetwork(). - Select(provisionednetwork.FieldID). - Scan(ctx, &node.Edges[5].IDs) - if err != nil { - return nil, err - } - node.Edges[6] = &Edge{ - Type: "Team", - Name: "BuildToTeam", - } - err = b.QueryBuildToTeam(). - Select(team.FieldID). - Scan(ctx, &node.Edges[6].IDs) - if err != nil { - return nil, err - } - node.Edges[7] = &Edge{ - Type: "Plan", - Name: "BuildToPlan", - } - err = b.QueryBuildToPlan(). - Select(plan.FieldID). - Scan(ctx, &node.Edges[7].IDs) - if err != nil { - return nil, err - } - node.Edges[8] = &Edge{ - Type: "BuildCommit", - Name: "BuildToBuildCommits", - } - err = b.QueryBuildToBuildCommits(). - Select(buildcommit.FieldID). - Scan(ctx, &node.Edges[8].IDs) - if err != nil { - return nil, err - } - node.Edges[9] = &Edge{ - Type: "AdhocPlan", - Name: "BuildToAdhocPlans", - } - err = b.QueryBuildToAdhocPlans(). - Select(adhocplan.FieldID). - Scan(ctx, &node.Edges[9].IDs) - if err != nil { - return nil, err - } - node.Edges[10] = &Edge{ - Type: "AgentStatus", - Name: "BuildToAgentStatuses", - } - err = b.QueryBuildToAgentStatuses(). - Select(agentstatus.FieldID). - Scan(ctx, &node.Edges[10].IDs) - if err != nil { - return nil, err - } - node.Edges[11] = &Edge{ - Type: "ServerTask", - Name: "BuildToServerTasks", - } - err = b.QueryBuildToServerTasks(). - Select(servertask.FieldID). - Scan(ctx, &node.Edges[11].IDs) - if err != nil { - return nil, err - } - return node, nil -} +// IsNode implements the Node interface check for GQLGen. +func (n *Competition) IsNode() {} -func (bc *BuildCommit) Node(ctx context.Context) (node *Node, err error) { - node = &Node{ - ID: bc.ID, - Type: "BuildCommit", - Fields: make([]*Field, 4), - Edges: make([]*Edge, 3), - } - var buf []byte - if buf, err = json.Marshal(bc.Type); err != nil { - return nil, err - } - node.Fields[0] = &Field{ - Type: "buildcommit.Type", - Name: "type", - Value: string(buf), - } - if buf, err = json.Marshal(bc.Revision); err != nil { - return nil, err - } - node.Fields[1] = &Field{ - Type: "int", - Name: "revision", - Value: string(buf), - } - if buf, err = json.Marshal(bc.State); err != nil { - return nil, err - } - node.Fields[2] = &Field{ - Type: "buildcommit.State", - Name: "state", - Value: string(buf), - } - if buf, err = json.Marshal(bc.CreatedAt); err != nil { - return nil, err - } - node.Fields[3] = &Field{ - Type: "time.Time", - Name: "created_at", - Value: string(buf), - } - node.Edges[0] = &Edge{ - Type: "Build", - Name: "BuildCommitToBuild", - } - err = bc.QueryBuildCommitToBuild(). - Select(build.FieldID). - Scan(ctx, &node.Edges[0].IDs) - if err != nil { - return nil, err - } - node.Edges[1] = &Edge{ - Type: "ServerTask", - Name: "BuildCommitToServerTask", - } - err = bc.QueryBuildCommitToServerTask(). - Select(servertask.FieldID). - Scan(ctx, &node.Edges[1].IDs) - if err != nil { - return nil, err - } - node.Edges[2] = &Edge{ - Type: "PlanDiff", - Name: "BuildCommitToPlanDiffs", - } - err = bc.QueryBuildCommitToPlanDiffs(). - Select(plandiff.FieldID). - Scan(ctx, &node.Edges[2].IDs) - if err != nil { - return nil, err - } - return node, nil -} - -func (c *Command) Node(ctx context.Context) (node *Node, err error) { - node = &Node{ - ID: c.ID, - Type: "Command", - Fields: make([]*Field, 11), - Edges: make([]*Edge, 2), - } - var buf []byte - if buf, err = json.Marshal(c.HclID); err != nil { - return nil, err - } - node.Fields[0] = &Field{ - Type: "string", - Name: "hcl_id", - Value: string(buf), - } - if buf, err = json.Marshal(c.Name); err != nil { - return nil, err - } - node.Fields[1] = &Field{ - Type: "string", - Name: "name", - Value: string(buf), - } - if buf, err = json.Marshal(c.Description); err != nil { - return nil, err - } - node.Fields[2] = &Field{ - Type: "string", - Name: "description", - Value: string(buf), - } - if buf, err = json.Marshal(c.Program); err != nil { - return nil, err - } - node.Fields[3] = &Field{ - Type: "string", - Name: "program", - Value: string(buf), - } - if buf, err = json.Marshal(c.Args); err != nil { - return nil, err - } - node.Fields[4] = &Field{ - Type: "[]string", - Name: "args", - Value: string(buf), - } - if buf, err = json.Marshal(c.IgnoreErrors); err != nil { - return nil, err - } - node.Fields[5] = &Field{ - Type: "bool", - Name: "ignore_errors", - Value: string(buf), - } - if buf, err = json.Marshal(c.Disabled); err != nil { - return nil, err - } - node.Fields[6] = &Field{ - Type: "bool", - Name: "disabled", - Value: string(buf), - } - if buf, err = json.Marshal(c.Cooldown); err != nil { - return nil, err - } - node.Fields[7] = &Field{ - Type: "int", - Name: "cooldown", - Value: string(buf), - } - if buf, err = json.Marshal(c.Timeout); err != nil { - return nil, err - } - node.Fields[8] = &Field{ - Type: "int", - Name: "timeout", - Value: string(buf), - } - if buf, err = json.Marshal(c.Vars); err != nil { - return nil, err - } - node.Fields[9] = &Field{ - Type: "map[string]string", - Name: "vars", - Value: string(buf), - } - if buf, err = json.Marshal(c.Tags); err != nil { - return nil, err - } - node.Fields[10] = &Field{ - Type: "map[string]string", - Name: "tags", - Value: string(buf), - } - node.Edges[0] = &Edge{ - Type: "User", - Name: "CommandToUser", - } - err = c.QueryCommandToUser(). - Select(user.FieldID). - Scan(ctx, &node.Edges[0].IDs) - if err != nil { - return nil, err - } - node.Edges[1] = &Edge{ - Type: "Environment", - Name: "CommandToEnvironment", - } - err = c.QueryCommandToEnvironment(). - Select(environment.FieldID). - Scan(ctx, &node.Edges[1].IDs) - if err != nil { - return nil, err - } - return node, nil -} - -func (c *Competition) Node(ctx context.Context) (node *Node, err error) { - node = &Node{ - ID: c.ID, - Type: "Competition", - Fields: make([]*Field, 4), - Edges: make([]*Edge, 3), - } - var buf []byte - if buf, err = json.Marshal(c.HclID); err != nil { - return nil, err - } - node.Fields[0] = &Field{ - Type: "string", - Name: "hcl_id", - Value: string(buf), - } - if buf, err = json.Marshal(c.RootPassword); err != nil { - return nil, err - } - node.Fields[1] = &Field{ - Type: "string", - Name: "root_password", - Value: string(buf), - } - if buf, err = json.Marshal(c.Config); err != nil { - return nil, err - } - node.Fields[2] = &Field{ - Type: "map[string]string", - Name: "config", - Value: string(buf), - } - if buf, err = json.Marshal(c.Tags); err != nil { - return nil, err - } - node.Fields[3] = &Field{ - Type: "map[string]string", - Name: "tags", - Value: string(buf), - } - node.Edges[0] = &Edge{ - Type: "DNS", - Name: "CompetitionToDNS", - } - err = c.QueryCompetitionToDNS(). - Select(dns.FieldID). - Scan(ctx, &node.Edges[0].IDs) - if err != nil { - return nil, err - } - node.Edges[1] = &Edge{ - Type: "Environment", - Name: "CompetitionToEnvironment", - } - err = c.QueryCompetitionToEnvironment(). - Select(environment.FieldID). - Scan(ctx, &node.Edges[1].IDs) - if err != nil { - return nil, err - } - node.Edges[2] = &Edge{ - Type: "Build", - Name: "CompetitionToBuild", - } - err = c.QueryCompetitionToBuild(). - Select(build.FieldID). - Scan(ctx, &node.Edges[2].IDs) - if err != nil { - return nil, err - } - return node, nil -} - -func (d *DNS) Node(ctx context.Context) (node *Node, err error) { - node = &Node{ - ID: d.ID, - Type: "DNS", - Fields: make([]*Field, 6), - Edges: make([]*Edge, 2), - } - var buf []byte - if buf, err = json.Marshal(d.HclID); err != nil { - return nil, err - } - node.Fields[0] = &Field{ - Type: "string", - Name: "hcl_id", - Value: string(buf), - } - if buf, err = json.Marshal(d.Type); err != nil { - return nil, err - } - node.Fields[1] = &Field{ - Type: "string", - Name: "type", - Value: string(buf), - } - if buf, err = json.Marshal(d.RootDomain); err != nil { - return nil, err - } - node.Fields[2] = &Field{ - Type: "string", - Name: "root_domain", - Value: string(buf), - } - if buf, err = json.Marshal(d.DNSServers); err != nil { - return nil, err - } - node.Fields[3] = &Field{ - Type: "[]string", - Name: "dns_servers", - Value: string(buf), - } - if buf, err = json.Marshal(d.NtpServers); err != nil { - return nil, err - } - node.Fields[4] = &Field{ - Type: "[]string", - Name: "ntp_servers", - Value: string(buf), - } - if buf, err = json.Marshal(d.Config); err != nil { - return nil, err - } - node.Fields[5] = &Field{ - Type: "map[string]string", - Name: "config", - Value: string(buf), - } - node.Edges[0] = &Edge{ - Type: "Environment", - Name: "DNSToEnvironment", - } - err = d.QueryDNSToEnvironment(). - Select(environment.FieldID). - Scan(ctx, &node.Edges[0].IDs) - if err != nil { - return nil, err - } - node.Edges[1] = &Edge{ - Type: "Competition", - Name: "DNSToCompetition", - } - err = d.QueryDNSToCompetition(). - Select(competition.FieldID). - Scan(ctx, &node.Edges[1].IDs) - if err != nil { - return nil, err - } - return node, nil -} - -func (dr *DNSRecord) Node(ctx context.Context) (node *Node, err error) { - node = &Node{ - ID: dr.ID, - Type: "DNSRecord", - Fields: make([]*Field, 8), - Edges: make([]*Edge, 1), - } - var buf []byte - if buf, err = json.Marshal(dr.HclID); err != nil { - return nil, err - } - node.Fields[0] = &Field{ - Type: "string", - Name: "hcl_id", - Value: string(buf), - } - if buf, err = json.Marshal(dr.Name); err != nil { - return nil, err - } - node.Fields[1] = &Field{ - Type: "string", - Name: "name", - Value: string(buf), - } - if buf, err = json.Marshal(dr.Values); err != nil { - return nil, err - } - node.Fields[2] = &Field{ - Type: "[]string", - Name: "values", - Value: string(buf), - } - if buf, err = json.Marshal(dr.Type); err != nil { - return nil, err - } - node.Fields[3] = &Field{ - Type: "string", - Name: "type", - Value: string(buf), - } - if buf, err = json.Marshal(dr.Zone); err != nil { - return nil, err - } - node.Fields[4] = &Field{ - Type: "string", - Name: "zone", - Value: string(buf), - } - if buf, err = json.Marshal(dr.Vars); err != nil { - return nil, err - } - node.Fields[5] = &Field{ - Type: "map[string]string", - Name: "vars", - Value: string(buf), - } - if buf, err = json.Marshal(dr.Disabled); err != nil { - return nil, err - } - node.Fields[6] = &Field{ - Type: "bool", - Name: "disabled", - Value: string(buf), - } - if buf, err = json.Marshal(dr.Tags); err != nil { - return nil, err - } - node.Fields[7] = &Field{ - Type: "map[string]string", - Name: "tags", - Value: string(buf), - } - node.Edges[0] = &Edge{ - Type: "Environment", - Name: "DNSRecordToEnvironment", - } - err = dr.QueryDNSRecordToEnvironment(). - Select(environment.FieldID). - Scan(ctx, &node.Edges[0].IDs) - if err != nil { - return nil, err - } - return node, nil -} - -func (d *Disk) Node(ctx context.Context) (node *Node, err error) { - node = &Node{ - ID: d.ID, - Type: "Disk", - Fields: make([]*Field, 1), - Edges: make([]*Edge, 1), - } - var buf []byte - if buf, err = json.Marshal(d.Size); err != nil { - return nil, err - } - node.Fields[0] = &Field{ - Type: "int", - Name: "size", - Value: string(buf), - } - node.Edges[0] = &Edge{ - Type: "Host", - Name: "DiskToHost", - } - err = d.QueryDiskToHost(). - Select(host.FieldID). - Scan(ctx, &node.Edges[0].IDs) - if err != nil { - return nil, err - } - return node, nil -} - -func (e *Environment) Node(ctx context.Context) (node *Node, err error) { - node = &Node{ - ID: e.ID, - Type: "Environment", - Fields: make([]*Field, 11), - Edges: make([]*Edge, 19), - } - var buf []byte - if buf, err = json.Marshal(e.HclID); err != nil { - return nil, err - } - node.Fields[0] = &Field{ - Type: "string", - Name: "hcl_id", - Value: string(buf), - } - if buf, err = json.Marshal(e.CompetitionID); err != nil { - return nil, err - } - node.Fields[1] = &Field{ - Type: "string", - Name: "competition_id", - Value: string(buf), - } - if buf, err = json.Marshal(e.Name); err != nil { - return nil, err - } - node.Fields[2] = &Field{ - Type: "string", - Name: "name", - Value: string(buf), - } - if buf, err = json.Marshal(e.Description); err != nil { - return nil, err - } - node.Fields[3] = &Field{ - Type: "string", - Name: "description", - Value: string(buf), - } - if buf, err = json.Marshal(e.Builder); err != nil { - return nil, err - } - node.Fields[4] = &Field{ - Type: "string", - Name: "builder", - Value: string(buf), - } - if buf, err = json.Marshal(e.TeamCount); err != nil { - return nil, err - } - node.Fields[5] = &Field{ - Type: "int", - Name: "team_count", - Value: string(buf), - } - if buf, err = json.Marshal(e.Revision); err != nil { - return nil, err - } - node.Fields[6] = &Field{ - Type: "int", - Name: "revision", - Value: string(buf), - } - if buf, err = json.Marshal(e.AdminCidrs); err != nil { - return nil, err - } - node.Fields[7] = &Field{ - Type: "[]string", - Name: "admin_cidrs", - Value: string(buf), - } - if buf, err = json.Marshal(e.ExposedVdiPorts); err != nil { - return nil, err - } - node.Fields[8] = &Field{ - Type: "[]string", - Name: "exposed_vdi_ports", - Value: string(buf), - } - if buf, err = json.Marshal(e.Config); err != nil { - return nil, err - } - node.Fields[9] = &Field{ - Type: "map[string]string", - Name: "config", - Value: string(buf), - } - if buf, err = json.Marshal(e.Tags); err != nil { - return nil, err - } - node.Fields[10] = &Field{ - Type: "map[string]string", - Name: "tags", - Value: string(buf), - } - node.Edges[0] = &Edge{ - Type: "User", - Name: "EnvironmentToUser", - } - err = e.QueryEnvironmentToUser(). - Select(user.FieldID). - Scan(ctx, &node.Edges[0].IDs) - if err != nil { - return nil, err - } - node.Edges[1] = &Edge{ - Type: "Host", - Name: "EnvironmentToHost", - } - err = e.QueryEnvironmentToHost(). - Select(host.FieldID). - Scan(ctx, &node.Edges[1].IDs) - if err != nil { - return nil, err - } - node.Edges[2] = &Edge{ - Type: "Competition", - Name: "EnvironmentToCompetition", - } - err = e.QueryEnvironmentToCompetition(). - Select(competition.FieldID). - Scan(ctx, &node.Edges[2].IDs) - if err != nil { - return nil, err - } - node.Edges[3] = &Edge{ - Type: "Identity", - Name: "EnvironmentToIdentity", - } - err = e.QueryEnvironmentToIdentity(). - Select(identity.FieldID). - Scan(ctx, &node.Edges[3].IDs) - if err != nil { - return nil, err - } - node.Edges[4] = &Edge{ - Type: "Command", - Name: "EnvironmentToCommand", - } - err = e.QueryEnvironmentToCommand(). - Select(command.FieldID). - Scan(ctx, &node.Edges[4].IDs) - if err != nil { - return nil, err - } - node.Edges[5] = &Edge{ - Type: "Script", - Name: "EnvironmentToScript", - } - err = e.QueryEnvironmentToScript(). - Select(script.FieldID). - Scan(ctx, &node.Edges[5].IDs) - if err != nil { - return nil, err - } - node.Edges[6] = &Edge{ - Type: "FileDownload", - Name: "EnvironmentToFileDownload", - } - err = e.QueryEnvironmentToFileDownload(). - Select(filedownload.FieldID). - Scan(ctx, &node.Edges[6].IDs) - if err != nil { - return nil, err - } - node.Edges[7] = &Edge{ - Type: "FileDelete", - Name: "EnvironmentToFileDelete", - } - err = e.QueryEnvironmentToFileDelete(). - Select(filedelete.FieldID). - Scan(ctx, &node.Edges[7].IDs) - if err != nil { - return nil, err - } - node.Edges[8] = &Edge{ - Type: "FileExtract", - Name: "EnvironmentToFileExtract", - } - err = e.QueryEnvironmentToFileExtract(). - Select(fileextract.FieldID). - Scan(ctx, &node.Edges[8].IDs) - if err != nil { - return nil, err - } - node.Edges[9] = &Edge{ - Type: "IncludedNetwork", - Name: "EnvironmentToIncludedNetwork", - } - err = e.QueryEnvironmentToIncludedNetwork(). - Select(includednetwork.FieldID). - Scan(ctx, &node.Edges[9].IDs) - if err != nil { - return nil, err - } - node.Edges[10] = &Edge{ - Type: "Finding", - Name: "EnvironmentToFinding", - } - err = e.QueryEnvironmentToFinding(). - Select(finding.FieldID). - Scan(ctx, &node.Edges[10].IDs) - if err != nil { - return nil, err - } - node.Edges[11] = &Edge{ - Type: "DNSRecord", - Name: "EnvironmentToDNSRecord", - } - err = e.QueryEnvironmentToDNSRecord(). - Select(dnsrecord.FieldID). - Scan(ctx, &node.Edges[11].IDs) - if err != nil { - return nil, err - } - node.Edges[12] = &Edge{ - Type: "DNS", - Name: "EnvironmentToDNS", - } - err = e.QueryEnvironmentToDNS(). - Select(dns.FieldID). - Scan(ctx, &node.Edges[12].IDs) - if err != nil { - return nil, err - } - node.Edges[13] = &Edge{ - Type: "Network", - Name: "EnvironmentToNetwork", - } - err = e.QueryEnvironmentToNetwork(). - Select(network.FieldID). - Scan(ctx, &node.Edges[13].IDs) - if err != nil { - return nil, err - } - node.Edges[14] = &Edge{ - Type: "HostDependency", - Name: "EnvironmentToHostDependency", - } - err = e.QueryEnvironmentToHostDependency(). - Select(hostdependency.FieldID). - Scan(ctx, &node.Edges[14].IDs) - if err != nil { - return nil, err - } - node.Edges[15] = &Edge{ - Type: "Ansible", - Name: "EnvironmentToAnsible", - } - err = e.QueryEnvironmentToAnsible(). - Select(ansible.FieldID). - Scan(ctx, &node.Edges[15].IDs) - if err != nil { - return nil, err - } - node.Edges[16] = &Edge{ - Type: "Build", - Name: "EnvironmentToBuild", - } - err = e.QueryEnvironmentToBuild(). - Select(build.FieldID). - Scan(ctx, &node.Edges[16].IDs) - if err != nil { - return nil, err - } - node.Edges[17] = &Edge{ - Type: "Repository", - Name: "EnvironmentToRepository", - } - err = e.QueryEnvironmentToRepository(). - Select(repository.FieldID). - Scan(ctx, &node.Edges[17].IDs) - if err != nil { - return nil, err - } - node.Edges[18] = &Edge{ - Type: "ServerTask", - Name: "EnvironmentToServerTask", - } - err = e.QueryEnvironmentToServerTask(). - Select(servertask.FieldID). - Scan(ctx, &node.Edges[18].IDs) - if err != nil { - return nil, err - } - return node, nil -} - -func (fd *FileDelete) Node(ctx context.Context) (node *Node, err error) { - node = &Node{ - ID: fd.ID, - Type: "FileDelete", - Fields: make([]*Field, 3), - Edges: make([]*Edge, 1), - } - var buf []byte - if buf, err = json.Marshal(fd.HclID); err != nil { - return nil, err - } - node.Fields[0] = &Field{ - Type: "string", - Name: "hcl_id", - Value: string(buf), - } - if buf, err = json.Marshal(fd.Path); err != nil { - return nil, err - } - node.Fields[1] = &Field{ - Type: "string", - Name: "path", - Value: string(buf), - } - if buf, err = json.Marshal(fd.Tags); err != nil { - return nil, err - } - node.Fields[2] = &Field{ - Type: "map[string]string", - Name: "tags", - Value: string(buf), - } - node.Edges[0] = &Edge{ - Type: "Environment", - Name: "FileDeleteToEnvironment", - } - err = fd.QueryFileDeleteToEnvironment(). - Select(environment.FieldID). - Scan(ctx, &node.Edges[0].IDs) - if err != nil { - return nil, err - } - return node, nil -} - -func (fd *FileDownload) Node(ctx context.Context) (node *Node, err error) { - node = &Node{ - ID: fd.ID, - Type: "FileDownload", - Fields: make([]*Field, 11), - Edges: make([]*Edge, 1), - } - var buf []byte - if buf, err = json.Marshal(fd.HclID); err != nil { - return nil, err - } - node.Fields[0] = &Field{ - Type: "string", - Name: "hcl_id", - Value: string(buf), - } - if buf, err = json.Marshal(fd.SourceType); err != nil { - return nil, err - } - node.Fields[1] = &Field{ - Type: "string", - Name: "source_type", - Value: string(buf), - } - if buf, err = json.Marshal(fd.Source); err != nil { - return nil, err - } - node.Fields[2] = &Field{ - Type: "string", - Name: "source", - Value: string(buf), - } - if buf, err = json.Marshal(fd.Destination); err != nil { - return nil, err - } - node.Fields[3] = &Field{ - Type: "string", - Name: "destination", - Value: string(buf), - } - if buf, err = json.Marshal(fd.Template); err != nil { - return nil, err - } - node.Fields[4] = &Field{ - Type: "bool", - Name: "template", - Value: string(buf), - } - if buf, err = json.Marshal(fd.Perms); err != nil { - return nil, err - } - node.Fields[5] = &Field{ - Type: "string", - Name: "perms", - Value: string(buf), - } - if buf, err = json.Marshal(fd.Disabled); err != nil { - return nil, err - } - node.Fields[6] = &Field{ - Type: "bool", - Name: "disabled", - Value: string(buf), - } - if buf, err = json.Marshal(fd.Md5); err != nil { - return nil, err - } - node.Fields[7] = &Field{ - Type: "string", - Name: "md5", - Value: string(buf), - } - if buf, err = json.Marshal(fd.AbsPath); err != nil { - return nil, err - } - node.Fields[8] = &Field{ - Type: "string", - Name: "abs_path", - Value: string(buf), - } - if buf, err = json.Marshal(fd.IsTxt); err != nil { - return nil, err - } - node.Fields[9] = &Field{ - Type: "bool", - Name: "is_txt", - Value: string(buf), - } - if buf, err = json.Marshal(fd.Tags); err != nil { - return nil, err - } - node.Fields[10] = &Field{ - Type: "map[string]string", - Name: "tags", - Value: string(buf), - } - node.Edges[0] = &Edge{ - Type: "Environment", - Name: "FileDownloadToEnvironment", - } - err = fd.QueryFileDownloadToEnvironment(). - Select(environment.FieldID). - Scan(ctx, &node.Edges[0].IDs) - if err != nil { - return nil, err - } - return node, nil -} - -func (fe *FileExtract) Node(ctx context.Context) (node *Node, err error) { - node = &Node{ - ID: fe.ID, - Type: "FileExtract", - Fields: make([]*Field, 5), - Edges: make([]*Edge, 1), - } - var buf []byte - if buf, err = json.Marshal(fe.HclID); err != nil { - return nil, err - } - node.Fields[0] = &Field{ - Type: "string", - Name: "hcl_id", - Value: string(buf), - } - if buf, err = json.Marshal(fe.Source); err != nil { - return nil, err - } - node.Fields[1] = &Field{ - Type: "string", - Name: "source", - Value: string(buf), - } - if buf, err = json.Marshal(fe.Destination); err != nil { - return nil, err - } - node.Fields[2] = &Field{ - Type: "string", - Name: "destination", - Value: string(buf), - } - if buf, err = json.Marshal(fe.Type); err != nil { - return nil, err - } - node.Fields[3] = &Field{ - Type: "string", - Name: "type", - Value: string(buf), - } - if buf, err = json.Marshal(fe.Tags); err != nil { - return nil, err - } - node.Fields[4] = &Field{ - Type: "map[string]string", - Name: "tags", - Value: string(buf), - } - node.Edges[0] = &Edge{ - Type: "Environment", - Name: "FileExtractToEnvironment", - } - err = fe.QueryFileExtractToEnvironment(). - Select(environment.FieldID). - Scan(ctx, &node.Edges[0].IDs) - if err != nil { - return nil, err - } - return node, nil -} - -func (f *Finding) Node(ctx context.Context) (node *Node, err error) { - node = &Node{ - ID: f.ID, - Type: "Finding", - Fields: make([]*Field, 5), - Edges: make([]*Edge, 4), - } - var buf []byte - if buf, err = json.Marshal(f.Name); err != nil { - return nil, err - } - node.Fields[0] = &Field{ - Type: "string", - Name: "name", - Value: string(buf), - } - if buf, err = json.Marshal(f.Description); err != nil { - return nil, err - } - node.Fields[1] = &Field{ - Type: "string", - Name: "description", - Value: string(buf), - } - if buf, err = json.Marshal(f.Severity); err != nil { - return nil, err - } - node.Fields[2] = &Field{ - Type: "finding.Severity", - Name: "severity", - Value: string(buf), - } - if buf, err = json.Marshal(f.Difficulty); err != nil { - return nil, err - } - node.Fields[3] = &Field{ - Type: "finding.Difficulty", - Name: "difficulty", - Value: string(buf), - } - if buf, err = json.Marshal(f.Tags); err != nil { - return nil, err - } - node.Fields[4] = &Field{ - Type: "map[string]string", - Name: "tags", - Value: string(buf), - } - node.Edges[0] = &Edge{ - Type: "User", - Name: "FindingToUser", - } - err = f.QueryFindingToUser(). - Select(user.FieldID). - Scan(ctx, &node.Edges[0].IDs) - if err != nil { - return nil, err - } - node.Edges[1] = &Edge{ - Type: "Host", - Name: "FindingToHost", - } - err = f.QueryFindingToHost(). - Select(host.FieldID). - Scan(ctx, &node.Edges[1].IDs) - if err != nil { - return nil, err - } - node.Edges[2] = &Edge{ - Type: "Script", - Name: "FindingToScript", - } - err = f.QueryFindingToScript(). - Select(script.FieldID). - Scan(ctx, &node.Edges[2].IDs) - if err != nil { - return nil, err - } - node.Edges[3] = &Edge{ - Type: "Environment", - Name: "FindingToEnvironment", - } - err = f.QueryFindingToEnvironment(). - Select(environment.FieldID). - Scan(ctx, &node.Edges[3].IDs) - if err != nil { - return nil, err - } - return node, nil -} - -func (gfm *GinFileMiddleware) Node(ctx context.Context) (node *Node, err error) { - node = &Node{ - ID: gfm.ID, - Type: "GinFileMiddleware", - Fields: make([]*Field, 3), - Edges: make([]*Edge, 2), - } - var buf []byte - if buf, err = json.Marshal(gfm.URLID); err != nil { - return nil, err - } - node.Fields[0] = &Field{ - Type: "string", - Name: "url_id", - Value: string(buf), - } - if buf, err = json.Marshal(gfm.FilePath); err != nil { - return nil, err - } - node.Fields[1] = &Field{ - Type: "string", - Name: "file_path", - Value: string(buf), - } - if buf, err = json.Marshal(gfm.Accessed); err != nil { - return nil, err - } - node.Fields[2] = &Field{ - Type: "bool", - Name: "accessed", - Value: string(buf), - } - node.Edges[0] = &Edge{ - Type: "ProvisionedHost", - Name: "GinFileMiddlewareToProvisionedHost", - } - err = gfm.QueryGinFileMiddlewareToProvisionedHost(). - Select(provisionedhost.FieldID). - Scan(ctx, &node.Edges[0].IDs) - if err != nil { - return nil, err - } - node.Edges[1] = &Edge{ - Type: "ProvisioningStep", - Name: "GinFileMiddlewareToProvisioningStep", - } - err = gfm.QueryGinFileMiddlewareToProvisioningStep(). - Select(provisioningstep.FieldID). - Scan(ctx, &node.Edges[1].IDs) - if err != nil { - return nil, err - } - return node, nil -} - -func (h *Host) Node(ctx context.Context) (node *Node, err error) { - node = &Node{ - ID: h.ID, - Type: "Host", - Fields: make([]*Field, 14), - Edges: make([]*Edge, 6), - } - var buf []byte - if buf, err = json.Marshal(h.HclID); err != nil { - return nil, err - } - node.Fields[0] = &Field{ - Type: "string", - Name: "hcl_id", - Value: string(buf), - } - if buf, err = json.Marshal(h.Hostname); err != nil { - return nil, err - } - node.Fields[1] = &Field{ - Type: "string", - Name: "hostname", - Value: string(buf), - } - if buf, err = json.Marshal(h.Description); err != nil { - return nil, err - } - node.Fields[2] = &Field{ - Type: "string", - Name: "description", - Value: string(buf), - } - if buf, err = json.Marshal(h.OS); err != nil { - return nil, err - } - node.Fields[3] = &Field{ - Type: "string", - Name: "OS", - Value: string(buf), - } - if buf, err = json.Marshal(h.LastOctet); err != nil { - return nil, err - } - node.Fields[4] = &Field{ - Type: "int", - Name: "last_octet", - Value: string(buf), - } - if buf, err = json.Marshal(h.InstanceSize); err != nil { - return nil, err - } - node.Fields[5] = &Field{ - Type: "string", - Name: "instance_size", - Value: string(buf), - } - if buf, err = json.Marshal(h.AllowMACChanges); err != nil { - return nil, err - } - node.Fields[6] = &Field{ - Type: "bool", - Name: "allow_mac_changes", - Value: string(buf), - } - if buf, err = json.Marshal(h.ExposedTCPPorts); err != nil { - return nil, err - } - node.Fields[7] = &Field{ - Type: "[]string", - Name: "exposed_tcp_ports", - Value: string(buf), - } - if buf, err = json.Marshal(h.ExposedUDPPorts); err != nil { - return nil, err - } - node.Fields[8] = &Field{ - Type: "[]string", - Name: "exposed_udp_ports", - Value: string(buf), - } - if buf, err = json.Marshal(h.OverridePassword); err != nil { - return nil, err - } - node.Fields[9] = &Field{ - Type: "string", - Name: "override_password", - Value: string(buf), - } - if buf, err = json.Marshal(h.Vars); err != nil { - return nil, err - } - node.Fields[10] = &Field{ - Type: "map[string]string", - Name: "vars", - Value: string(buf), - } - if buf, err = json.Marshal(h.UserGroups); err != nil { - return nil, err - } - node.Fields[11] = &Field{ - Type: "[]string", - Name: "user_groups", - Value: string(buf), - } - if buf, err = json.Marshal(h.ProvisionSteps); err != nil { - return nil, err - } - node.Fields[12] = &Field{ - Type: "[]string", - Name: "provision_steps", - Value: string(buf), - } - if buf, err = json.Marshal(h.Tags); err != nil { - return nil, err - } - node.Fields[13] = &Field{ - Type: "map[string]string", - Name: "tags", - Value: string(buf), - } - node.Edges[0] = &Edge{ - Type: "Disk", - Name: "HostToDisk", - } - err = h.QueryHostToDisk(). - Select(disk.FieldID). - Scan(ctx, &node.Edges[0].IDs) - if err != nil { - return nil, err - } - node.Edges[1] = &Edge{ - Type: "User", - Name: "HostToUser", - } - err = h.QueryHostToUser(). - Select(user.FieldID). - Scan(ctx, &node.Edges[1].IDs) - if err != nil { - return nil, err - } - node.Edges[2] = &Edge{ - Type: "Environment", - Name: "HostToEnvironment", - } - err = h.QueryHostToEnvironment(). - Select(environment.FieldID). - Scan(ctx, &node.Edges[2].IDs) - if err != nil { - return nil, err - } - node.Edges[3] = &Edge{ - Type: "IncludedNetwork", - Name: "HostToIncludedNetwork", - } - err = h.QueryHostToIncludedNetwork(). - Select(includednetwork.FieldID). - Scan(ctx, &node.Edges[3].IDs) - if err != nil { - return nil, err - } - node.Edges[4] = &Edge{ - Type: "HostDependency", - Name: "DependOnHostToHostDependency", - } - err = h.QueryDependOnHostToHostDependency(). - Select(hostdependency.FieldID). - Scan(ctx, &node.Edges[4].IDs) - if err != nil { - return nil, err - } - node.Edges[5] = &Edge{ - Type: "HostDependency", - Name: "DependByHostToHostDependency", - } - err = h.QueryDependByHostToHostDependency(). - Select(hostdependency.FieldID). - Scan(ctx, &node.Edges[5].IDs) - if err != nil { - return nil, err - } - return node, nil -} - -func (hd *HostDependency) Node(ctx context.Context) (node *Node, err error) { - node = &Node{ - ID: hd.ID, - Type: "HostDependency", - Fields: make([]*Field, 2), - Edges: make([]*Edge, 4), - } - var buf []byte - if buf, err = json.Marshal(hd.HostID); err != nil { - return nil, err - } - node.Fields[0] = &Field{ - Type: "string", - Name: "host_id", - Value: string(buf), - } - if buf, err = json.Marshal(hd.NetworkID); err != nil { - return nil, err - } - node.Fields[1] = &Field{ - Type: "string", - Name: "network_id", - Value: string(buf), - } - node.Edges[0] = &Edge{ - Type: "Host", - Name: "HostDependencyToDependOnHost", - } - err = hd.QueryHostDependencyToDependOnHost(). - Select(host.FieldID). - Scan(ctx, &node.Edges[0].IDs) - if err != nil { - return nil, err - } - node.Edges[1] = &Edge{ - Type: "Host", - Name: "HostDependencyToDependByHost", - } - err = hd.QueryHostDependencyToDependByHost(). - Select(host.FieldID). - Scan(ctx, &node.Edges[1].IDs) - if err != nil { - return nil, err - } - node.Edges[2] = &Edge{ - Type: "Network", - Name: "HostDependencyToNetwork", - } - err = hd.QueryHostDependencyToNetwork(). - Select(network.FieldID). - Scan(ctx, &node.Edges[2].IDs) - if err != nil { - return nil, err - } - node.Edges[3] = &Edge{ - Type: "Environment", - Name: "HostDependencyToEnvironment", - } - err = hd.QueryHostDependencyToEnvironment(). - Select(environment.FieldID). - Scan(ctx, &node.Edges[3].IDs) - if err != nil { - return nil, err - } - return node, nil -} - -func (i *Identity) Node(ctx context.Context) (node *Node, err error) { - node = &Node{ - ID: i.ID, - Type: "Identity", - Fields: make([]*Field, 9), - Edges: make([]*Edge, 1), - } - var buf []byte - if buf, err = json.Marshal(i.HclID); err != nil { - return nil, err - } - node.Fields[0] = &Field{ - Type: "string", - Name: "hcl_id", - Value: string(buf), - } - if buf, err = json.Marshal(i.FirstName); err != nil { - return nil, err - } - node.Fields[1] = &Field{ - Type: "string", - Name: "first_name", - Value: string(buf), - } - if buf, err = json.Marshal(i.LastName); err != nil { - return nil, err - } - node.Fields[2] = &Field{ - Type: "string", - Name: "last_name", - Value: string(buf), - } - if buf, err = json.Marshal(i.Email); err != nil { - return nil, err - } - node.Fields[3] = &Field{ - Type: "string", - Name: "email", - Value: string(buf), - } - if buf, err = json.Marshal(i.Password); err != nil { - return nil, err - } - node.Fields[4] = &Field{ - Type: "string", - Name: "password", - Value: string(buf), - } - if buf, err = json.Marshal(i.Description); err != nil { - return nil, err - } - node.Fields[5] = &Field{ - Type: "string", - Name: "description", - Value: string(buf), - } - if buf, err = json.Marshal(i.AvatarFile); err != nil { - return nil, err - } - node.Fields[6] = &Field{ - Type: "string", - Name: "avatar_file", - Value: string(buf), - } - if buf, err = json.Marshal(i.Vars); err != nil { - return nil, err - } - node.Fields[7] = &Field{ - Type: "map[string]string", - Name: "vars", - Value: string(buf), - } - if buf, err = json.Marshal(i.Tags); err != nil { - return nil, err - } - node.Fields[8] = &Field{ - Type: "map[string]string", - Name: "tags", - Value: string(buf), - } - node.Edges[0] = &Edge{ - Type: "Environment", - Name: "IdentityToEnvironment", - } - err = i.QueryIdentityToEnvironment(). - Select(environment.FieldID). - Scan(ctx, &node.Edges[0].IDs) - if err != nil { - return nil, err - } - return node, nil -} - -func (in *IncludedNetwork) Node(ctx context.Context) (node *Node, err error) { - node = &Node{ - ID: in.ID, - Type: "IncludedNetwork", - Fields: make([]*Field, 2), - Edges: make([]*Edge, 4), - } - var buf []byte - if buf, err = json.Marshal(in.Name); err != nil { - return nil, err - } - node.Fields[0] = &Field{ - Type: "string", - Name: "name", - Value: string(buf), - } - if buf, err = json.Marshal(in.Hosts); err != nil { - return nil, err - } - node.Fields[1] = &Field{ - Type: "[]string", - Name: "hosts", - Value: string(buf), - } - node.Edges[0] = &Edge{ - Type: "Tag", - Name: "IncludedNetworkToTag", - } - err = in.QueryIncludedNetworkToTag(). - Select(tag.FieldID). - Scan(ctx, &node.Edges[0].IDs) - if err != nil { - return nil, err - } - node.Edges[1] = &Edge{ - Type: "Host", - Name: "IncludedNetworkToHost", - } - err = in.QueryIncludedNetworkToHost(). - Select(host.FieldID). - Scan(ctx, &node.Edges[1].IDs) - if err != nil { - return nil, err - } - node.Edges[2] = &Edge{ - Type: "Network", - Name: "IncludedNetworkToNetwork", - } - err = in.QueryIncludedNetworkToNetwork(). - Select(network.FieldID). - Scan(ctx, &node.Edges[2].IDs) - if err != nil { - return nil, err - } - node.Edges[3] = &Edge{ - Type: "Environment", - Name: "IncludedNetworkToEnvironment", - } - err = in.QueryIncludedNetworkToEnvironment(). - Select(environment.FieldID). - Scan(ctx, &node.Edges[3].IDs) - if err != nil { - return nil, err - } - return node, nil -} - -func (n *Network) Node(ctx context.Context) (node *Node, err error) { - node = &Node{ - ID: n.ID, - Type: "Network", - Fields: make([]*Field, 6), - Edges: make([]*Edge, 3), - } - var buf []byte - if buf, err = json.Marshal(n.HclID); err != nil { - return nil, err - } - node.Fields[0] = &Field{ - Type: "string", - Name: "hcl_id", - Value: string(buf), - } - if buf, err = json.Marshal(n.Name); err != nil { - return nil, err - } - node.Fields[1] = &Field{ - Type: "string", - Name: "name", - Value: string(buf), - } - if buf, err = json.Marshal(n.Cidr); err != nil { - return nil, err - } - node.Fields[2] = &Field{ - Type: "string", - Name: "cidr", - Value: string(buf), - } - if buf, err = json.Marshal(n.VdiVisible); err != nil { - return nil, err - } - node.Fields[3] = &Field{ - Type: "bool", - Name: "vdi_visible", - Value: string(buf), - } - if buf, err = json.Marshal(n.Vars); err != nil { - return nil, err - } - node.Fields[4] = &Field{ - Type: "map[string]string", - Name: "vars", - Value: string(buf), - } - if buf, err = json.Marshal(n.Tags); err != nil { - return nil, err - } - node.Fields[5] = &Field{ - Type: "map[string]string", - Name: "tags", - Value: string(buf), - } - node.Edges[0] = &Edge{ - Type: "Environment", - Name: "NetworkToEnvironment", - } - err = n.QueryNetworkToEnvironment(). - Select(environment.FieldID). - Scan(ctx, &node.Edges[0].IDs) - if err != nil { - return nil, err - } - node.Edges[1] = &Edge{ - Type: "HostDependency", - Name: "NetworkToHostDependency", - } - err = n.QueryNetworkToHostDependency(). - Select(hostdependency.FieldID). - Scan(ctx, &node.Edges[1].IDs) - if err != nil { - return nil, err - } - node.Edges[2] = &Edge{ - Type: "IncludedNetwork", - Name: "NetworkToIncludedNetwork", - } - err = n.QueryNetworkToIncludedNetwork(). - Select(includednetwork.FieldID). - Scan(ctx, &node.Edges[2].IDs) - if err != nil { - return nil, err - } - return node, nil -} +// IsNode implements the Node interface check for GQLGen. +func (n *DNS) IsNode() {} -func (pl *Plan) Node(ctx context.Context) (node *Node, err error) { - node = &Node{ - ID: pl.ID, - Type: "Plan", - Fields: make([]*Field, 3), - Edges: make([]*Edge, 9), - } - var buf []byte - if buf, err = json.Marshal(pl.StepNumber); err != nil { - return nil, err - } - node.Fields[0] = &Field{ - Type: "int", - Name: "step_number", - Value: string(buf), - } - if buf, err = json.Marshal(pl.Type); err != nil { - return nil, err - } - node.Fields[1] = &Field{ - Type: "plan.Type", - Name: "type", - Value: string(buf), - } - if buf, err = json.Marshal(pl.BuildID); err != nil { - return nil, err - } - node.Fields[2] = &Field{ - Type: "string", - Name: "build_id", - Value: string(buf), - } - node.Edges[0] = &Edge{ - Type: "Plan", - Name: "PrevPlan", - } - err = pl.QueryPrevPlan(). - Select(plan.FieldID). - Scan(ctx, &node.Edges[0].IDs) - if err != nil { - return nil, err - } - node.Edges[1] = &Edge{ - Type: "Plan", - Name: "NextPlan", - } - err = pl.QueryNextPlan(). - Select(plan.FieldID). - Scan(ctx, &node.Edges[1].IDs) - if err != nil { - return nil, err - } - node.Edges[2] = &Edge{ - Type: "Build", - Name: "PlanToBuild", - } - err = pl.QueryPlanToBuild(). - Select(build.FieldID). - Scan(ctx, &node.Edges[2].IDs) - if err != nil { - return nil, err - } - node.Edges[3] = &Edge{ - Type: "Team", - Name: "PlanToTeam", - } - err = pl.QueryPlanToTeam(). - Select(team.FieldID). - Scan(ctx, &node.Edges[3].IDs) - if err != nil { - return nil, err - } - node.Edges[4] = &Edge{ - Type: "ProvisionedNetwork", - Name: "PlanToProvisionedNetwork", - } - err = pl.QueryPlanToProvisionedNetwork(). - Select(provisionednetwork.FieldID). - Scan(ctx, &node.Edges[4].IDs) - if err != nil { - return nil, err - } - node.Edges[5] = &Edge{ - Type: "ProvisionedHost", - Name: "PlanToProvisionedHost", - } - err = pl.QueryPlanToProvisionedHost(). - Select(provisionedhost.FieldID). - Scan(ctx, &node.Edges[5].IDs) - if err != nil { - return nil, err - } - node.Edges[6] = &Edge{ - Type: "ProvisioningStep", - Name: "PlanToProvisioningStep", - } - err = pl.QueryPlanToProvisioningStep(). - Select(provisioningstep.FieldID). - Scan(ctx, &node.Edges[6].IDs) - if err != nil { - return nil, err - } - node.Edges[7] = &Edge{ - Type: "Status", - Name: "PlanToStatus", - } - err = pl.QueryPlanToStatus(). - Select(status.FieldID). - Scan(ctx, &node.Edges[7].IDs) - if err != nil { - return nil, err - } - node.Edges[8] = &Edge{ - Type: "PlanDiff", - Name: "PlanToPlanDiffs", - } - err = pl.QueryPlanToPlanDiffs(). - Select(plandiff.FieldID). - Scan(ctx, &node.Edges[8].IDs) - if err != nil { - return nil, err - } - return node, nil -} +// IsNode implements the Node interface check for GQLGen. +func (n *DNSRecord) IsNode() {} -func (pd *PlanDiff) Node(ctx context.Context) (node *Node, err error) { - node = &Node{ - ID: pd.ID, - Type: "PlanDiff", - Fields: make([]*Field, 2), - Edges: make([]*Edge, 2), - } - var buf []byte - if buf, err = json.Marshal(pd.Revision); err != nil { - return nil, err - } - node.Fields[0] = &Field{ - Type: "int", - Name: "revision", - Value: string(buf), - } - if buf, err = json.Marshal(pd.NewState); err != nil { - return nil, err - } - node.Fields[1] = &Field{ - Type: "plandiff.NewState", - Name: "new_state", - Value: string(buf), - } - node.Edges[0] = &Edge{ - Type: "BuildCommit", - Name: "PlanDiffToBuildCommit", - } - err = pd.QueryPlanDiffToBuildCommit(). - Select(buildcommit.FieldID). - Scan(ctx, &node.Edges[0].IDs) - if err != nil { - return nil, err - } - node.Edges[1] = &Edge{ - Type: "Plan", - Name: "PlanDiffToPlan", - } - err = pd.QueryPlanDiffToPlan(). - Select(plan.FieldID). - Scan(ctx, &node.Edges[1].IDs) - if err != nil { - return nil, err - } - return node, nil -} +// IsNode implements the Node interface check for GQLGen. +func (n *Disk) IsNode() {} -func (ph *ProvisionedHost) Node(ctx context.Context) (node *Node, err error) { - node = &Node{ - ID: ph.ID, - Type: "ProvisionedHost", - Fields: make([]*Field, 3), - Edges: make([]*Edge, 10), - } - var buf []byte - if buf, err = json.Marshal(ph.SubnetIP); err != nil { - return nil, err - } - node.Fields[0] = &Field{ - Type: "string", - Name: "subnet_ip", - Value: string(buf), - } - if buf, err = json.Marshal(ph.AddonType); err != nil { - return nil, err - } - node.Fields[1] = &Field{ - Type: "provisionedhost.AddonType", - Name: "addon_type", - Value: string(buf), - } - if buf, err = json.Marshal(ph.Vars); err != nil { - return nil, err - } - node.Fields[2] = &Field{ - Type: "map[string]string", - Name: "vars", - Value: string(buf), - } - node.Edges[0] = &Edge{ - Type: "Status", - Name: "ProvisionedHostToStatus", - } - err = ph.QueryProvisionedHostToStatus(). - Select(status.FieldID). - Scan(ctx, &node.Edges[0].IDs) - if err != nil { - return nil, err - } - node.Edges[1] = &Edge{ - Type: "ProvisionedNetwork", - Name: "ProvisionedHostToProvisionedNetwork", - } - err = ph.QueryProvisionedHostToProvisionedNetwork(). - Select(provisionednetwork.FieldID). - Scan(ctx, &node.Edges[1].IDs) - if err != nil { - return nil, err - } - node.Edges[2] = &Edge{ - Type: "Host", - Name: "ProvisionedHostToHost", - } - err = ph.QueryProvisionedHostToHost(). - Select(host.FieldID). - Scan(ctx, &node.Edges[2].IDs) - if err != nil { - return nil, err - } - node.Edges[3] = &Edge{ - Type: "Plan", - Name: "ProvisionedHostToEndStepPlan", - } - err = ph.QueryProvisionedHostToEndStepPlan(). - Select(plan.FieldID). - Scan(ctx, &node.Edges[3].IDs) - if err != nil { - return nil, err - } - node.Edges[4] = &Edge{ - Type: "Build", - Name: "ProvisionedHostToBuild", - } - err = ph.QueryProvisionedHostToBuild(). - Select(build.FieldID). - Scan(ctx, &node.Edges[4].IDs) - if err != nil { - return nil, err - } - node.Edges[5] = &Edge{ - Type: "ProvisioningStep", - Name: "ProvisionedHostToProvisioningStep", - } - err = ph.QueryProvisionedHostToProvisioningStep(). - Select(provisioningstep.FieldID). - Scan(ctx, &node.Edges[5].IDs) - if err != nil { - return nil, err - } - node.Edges[6] = &Edge{ - Type: "AgentStatus", - Name: "ProvisionedHostToAgentStatus", - } - err = ph.QueryProvisionedHostToAgentStatus(). - Select(agentstatus.FieldID). - Scan(ctx, &node.Edges[6].IDs) - if err != nil { - return nil, err - } - node.Edges[7] = &Edge{ - Type: "AgentTask", - Name: "ProvisionedHostToAgentTask", - } - err = ph.QueryProvisionedHostToAgentTask(). - Select(agenttask.FieldID). - Scan(ctx, &node.Edges[7].IDs) - if err != nil { - return nil, err - } - node.Edges[8] = &Edge{ - Type: "Plan", - Name: "ProvisionedHostToPlan", - } - err = ph.QueryProvisionedHostToPlan(). - Select(plan.FieldID). - Scan(ctx, &node.Edges[8].IDs) - if err != nil { - return nil, err - } - node.Edges[9] = &Edge{ - Type: "GinFileMiddleware", - Name: "ProvisionedHostToGinFileMiddleware", - } - err = ph.QueryProvisionedHostToGinFileMiddleware(). - Select(ginfilemiddleware.FieldID). - Scan(ctx, &node.Edges[9].IDs) - if err != nil { - return nil, err - } - return node, nil -} +// IsNode implements the Node interface check for GQLGen. +func (n *Environment) IsNode() {} -func (pn *ProvisionedNetwork) Node(ctx context.Context) (node *Node, err error) { - node = &Node{ - ID: pn.ID, - Type: "ProvisionedNetwork", - Fields: make([]*Field, 3), - Edges: make([]*Edge, 6), - } - var buf []byte - if buf, err = json.Marshal(pn.Name); err != nil { - return nil, err - } - node.Fields[0] = &Field{ - Type: "string", - Name: "name", - Value: string(buf), - } - if buf, err = json.Marshal(pn.Cidr); err != nil { - return nil, err - } - node.Fields[1] = &Field{ - Type: "string", - Name: "cidr", - Value: string(buf), - } - if buf, err = json.Marshal(pn.Vars); err != nil { - return nil, err - } - node.Fields[2] = &Field{ - Type: "map[string]string", - Name: "vars", - Value: string(buf), - } - node.Edges[0] = &Edge{ - Type: "Status", - Name: "ProvisionedNetworkToStatus", - } - err = pn.QueryProvisionedNetworkToStatus(). - Select(status.FieldID). - Scan(ctx, &node.Edges[0].IDs) - if err != nil { - return nil, err - } - node.Edges[1] = &Edge{ - Type: "Network", - Name: "ProvisionedNetworkToNetwork", - } - err = pn.QueryProvisionedNetworkToNetwork(). - Select(network.FieldID). - Scan(ctx, &node.Edges[1].IDs) - if err != nil { - return nil, err - } - node.Edges[2] = &Edge{ - Type: "Build", - Name: "ProvisionedNetworkToBuild", - } - err = pn.QueryProvisionedNetworkToBuild(). - Select(build.FieldID). - Scan(ctx, &node.Edges[2].IDs) - if err != nil { - return nil, err - } - node.Edges[3] = &Edge{ - Type: "Team", - Name: "ProvisionedNetworkToTeam", - } - err = pn.QueryProvisionedNetworkToTeam(). - Select(team.FieldID). - Scan(ctx, &node.Edges[3].IDs) - if err != nil { - return nil, err - } - node.Edges[4] = &Edge{ - Type: "ProvisionedHost", - Name: "ProvisionedNetworkToProvisionedHost", - } - err = pn.QueryProvisionedNetworkToProvisionedHost(). - Select(provisionedhost.FieldID). - Scan(ctx, &node.Edges[4].IDs) - if err != nil { - return nil, err - } - node.Edges[5] = &Edge{ - Type: "Plan", - Name: "ProvisionedNetworkToPlan", - } - err = pn.QueryProvisionedNetworkToPlan(). - Select(plan.FieldID). - Scan(ctx, &node.Edges[5].IDs) - if err != nil { - return nil, err - } - return node, nil -} +// IsNode implements the Node interface check for GQLGen. +func (n *FileDelete) IsNode() {} -func (ps *ProvisioningStep) Node(ctx context.Context) (node *Node, err error) { - node = &Node{ - ID: ps.ID, - Type: "ProvisioningStep", - Fields: make([]*Field, 2), - Edges: make([]*Edge, 12), - } - var buf []byte - if buf, err = json.Marshal(ps.Type); err != nil { - return nil, err - } - node.Fields[0] = &Field{ - Type: "provisioningstep.Type", - Name: "type", - Value: string(buf), - } - if buf, err = json.Marshal(ps.StepNumber); err != nil { - return nil, err - } - node.Fields[1] = &Field{ - Type: "int", - Name: "step_number", - Value: string(buf), - } - node.Edges[0] = &Edge{ - Type: "Status", - Name: "ProvisioningStepToStatus", - } - err = ps.QueryProvisioningStepToStatus(). - Select(status.FieldID). - Scan(ctx, &node.Edges[0].IDs) - if err != nil { - return nil, err - } - node.Edges[1] = &Edge{ - Type: "ProvisionedHost", - Name: "ProvisioningStepToProvisionedHost", - } - err = ps.QueryProvisioningStepToProvisionedHost(). - Select(provisionedhost.FieldID). - Scan(ctx, &node.Edges[1].IDs) - if err != nil { - return nil, err - } - node.Edges[2] = &Edge{ - Type: "Script", - Name: "ProvisioningStepToScript", - } - err = ps.QueryProvisioningStepToScript(). - Select(script.FieldID). - Scan(ctx, &node.Edges[2].IDs) - if err != nil { - return nil, err - } - node.Edges[3] = &Edge{ - Type: "Command", - Name: "ProvisioningStepToCommand", - } - err = ps.QueryProvisioningStepToCommand(). - Select(command.FieldID). - Scan(ctx, &node.Edges[3].IDs) - if err != nil { - return nil, err - } - node.Edges[4] = &Edge{ - Type: "DNSRecord", - Name: "ProvisioningStepToDNSRecord", - } - err = ps.QueryProvisioningStepToDNSRecord(). - Select(dnsrecord.FieldID). - Scan(ctx, &node.Edges[4].IDs) - if err != nil { - return nil, err - } - node.Edges[5] = &Edge{ - Type: "FileDelete", - Name: "ProvisioningStepToFileDelete", - } - err = ps.QueryProvisioningStepToFileDelete(). - Select(filedelete.FieldID). - Scan(ctx, &node.Edges[5].IDs) - if err != nil { - return nil, err - } - node.Edges[6] = &Edge{ - Type: "FileDownload", - Name: "ProvisioningStepToFileDownload", - } - err = ps.QueryProvisioningStepToFileDownload(). - Select(filedownload.FieldID). - Scan(ctx, &node.Edges[6].IDs) - if err != nil { - return nil, err - } - node.Edges[7] = &Edge{ - Type: "FileExtract", - Name: "ProvisioningStepToFileExtract", - } - err = ps.QueryProvisioningStepToFileExtract(). - Select(fileextract.FieldID). - Scan(ctx, &node.Edges[7].IDs) - if err != nil { - return nil, err - } - node.Edges[8] = &Edge{ - Type: "Ansible", - Name: "ProvisioningStepToAnsible", - } - err = ps.QueryProvisioningStepToAnsible(). - Select(ansible.FieldID). - Scan(ctx, &node.Edges[8].IDs) - if err != nil { - return nil, err - } - node.Edges[9] = &Edge{ - Type: "Plan", - Name: "ProvisioningStepToPlan", - } - err = ps.QueryProvisioningStepToPlan(). - Select(plan.FieldID). - Scan(ctx, &node.Edges[9].IDs) - if err != nil { - return nil, err - } - node.Edges[10] = &Edge{ - Type: "AgentTask", - Name: "ProvisioningStepToAgentTask", - } - err = ps.QueryProvisioningStepToAgentTask(). - Select(agenttask.FieldID). - Scan(ctx, &node.Edges[10].IDs) - if err != nil { - return nil, err - } - node.Edges[11] = &Edge{ - Type: "GinFileMiddleware", - Name: "ProvisioningStepToGinFileMiddleware", - } - err = ps.QueryProvisioningStepToGinFileMiddleware(). - Select(ginfilemiddleware.FieldID). - Scan(ctx, &node.Edges[11].IDs) - if err != nil { - return nil, err - } - return node, nil -} +// IsNode implements the Node interface check for GQLGen. +func (n *FileDownload) IsNode() {} -func (rc *RepoCommit) Node(ctx context.Context) (node *Node, err error) { - node = &Node{ - ID: rc.ID, - Type: "RepoCommit", - Fields: make([]*Field, 8), - Edges: make([]*Edge, 1), - } - var buf []byte - if buf, err = json.Marshal(rc.Revision); err != nil { - return nil, err - } - node.Fields[0] = &Field{ - Type: "int", - Name: "revision", - Value: string(buf), - } - if buf, err = json.Marshal(rc.Hash); err != nil { - return nil, err - } - node.Fields[1] = &Field{ - Type: "string", - Name: "hash", - Value: string(buf), - } - if buf, err = json.Marshal(rc.Author); err != nil { - return nil, err - } - node.Fields[2] = &Field{ - Type: "object.Signature", - Name: "author", - Value: string(buf), - } - if buf, err = json.Marshal(rc.Committer); err != nil { - return nil, err - } - node.Fields[3] = &Field{ - Type: "object.Signature", - Name: "committer", - Value: string(buf), - } - if buf, err = json.Marshal(rc.PgpSignature); err != nil { - return nil, err - } - node.Fields[4] = &Field{ - Type: "string", - Name: "pgp_signature", - Value: string(buf), - } - if buf, err = json.Marshal(rc.Message); err != nil { - return nil, err - } - node.Fields[5] = &Field{ - Type: "string", - Name: "message", - Value: string(buf), - } - if buf, err = json.Marshal(rc.TreeHash); err != nil { - return nil, err - } - node.Fields[6] = &Field{ - Type: "string", - Name: "tree_hash", - Value: string(buf), - } - if buf, err = json.Marshal(rc.ParentHashes); err != nil { - return nil, err - } - node.Fields[7] = &Field{ - Type: "[]string", - Name: "parent_hashes", - Value: string(buf), - } - node.Edges[0] = &Edge{ - Type: "Repository", - Name: "RepoCommitToRepository", - } - err = rc.QueryRepoCommitToRepository(). - Select(repository.FieldID). - Scan(ctx, &node.Edges[0].IDs) - if err != nil { - return nil, err - } - return node, nil -} +// IsNode implements the Node interface check for GQLGen. +func (n *FileExtract) IsNode() {} -func (r *Repository) Node(ctx context.Context) (node *Node, err error) { - node = &Node{ - ID: r.ID, - Type: "Repository", - Fields: make([]*Field, 4), - Edges: make([]*Edge, 2), - } - var buf []byte - if buf, err = json.Marshal(r.RepoURL); err != nil { - return nil, err - } - node.Fields[0] = &Field{ - Type: "string", - Name: "repo_url", - Value: string(buf), - } - if buf, err = json.Marshal(r.BranchName); err != nil { - return nil, err - } - node.Fields[1] = &Field{ - Type: "string", - Name: "branch_name", - Value: string(buf), - } - if buf, err = json.Marshal(r.EnviromentFilepath); err != nil { - return nil, err - } - node.Fields[2] = &Field{ - Type: "string", - Name: "enviroment_filepath", - Value: string(buf), - } - if buf, err = json.Marshal(r.FolderPath); err != nil { - return nil, err - } - node.Fields[3] = &Field{ - Type: "string", - Name: "folder_path", - Value: string(buf), - } - node.Edges[0] = &Edge{ - Type: "Environment", - Name: "RepositoryToEnvironment", - } - err = r.QueryRepositoryToEnvironment(). - Select(environment.FieldID). - Scan(ctx, &node.Edges[0].IDs) - if err != nil { - return nil, err - } - node.Edges[1] = &Edge{ - Type: "RepoCommit", - Name: "RepositoryToRepoCommit", - } - err = r.QueryRepositoryToRepoCommit(). - Select(repocommit.FieldID). - Scan(ctx, &node.Edges[1].IDs) - if err != nil { - return nil, err - } - return node, nil -} +// IsNode implements the Node interface check for GQLGen. +func (n *Finding) IsNode() {} -func (s *Script) Node(ctx context.Context) (node *Node, err error) { - node = &Node{ - ID: s.ID, - Type: "Script", - Fields: make([]*Field, 14), - Edges: make([]*Edge, 3), - } - var buf []byte - if buf, err = json.Marshal(s.HclID); err != nil { - return nil, err - } - node.Fields[0] = &Field{ - Type: "string", - Name: "hcl_id", - Value: string(buf), - } - if buf, err = json.Marshal(s.Name); err != nil { - return nil, err - } - node.Fields[1] = &Field{ - Type: "string", - Name: "name", - Value: string(buf), - } - if buf, err = json.Marshal(s.Language); err != nil { - return nil, err - } - node.Fields[2] = &Field{ - Type: "string", - Name: "language", - Value: string(buf), - } - if buf, err = json.Marshal(s.Description); err != nil { - return nil, err - } - node.Fields[3] = &Field{ - Type: "string", - Name: "description", - Value: string(buf), - } - if buf, err = json.Marshal(s.Source); err != nil { - return nil, err - } - node.Fields[4] = &Field{ - Type: "string", - Name: "source", - Value: string(buf), - } - if buf, err = json.Marshal(s.SourceType); err != nil { - return nil, err - } - node.Fields[5] = &Field{ - Type: "string", - Name: "source_type", - Value: string(buf), - } - if buf, err = json.Marshal(s.Cooldown); err != nil { - return nil, err - } - node.Fields[6] = &Field{ - Type: "int", - Name: "cooldown", - Value: string(buf), - } - if buf, err = json.Marshal(s.Timeout); err != nil { - return nil, err - } - node.Fields[7] = &Field{ - Type: "int", - Name: "timeout", - Value: string(buf), - } - if buf, err = json.Marshal(s.IgnoreErrors); err != nil { - return nil, err - } - node.Fields[8] = &Field{ - Type: "bool", - Name: "ignore_errors", - Value: string(buf), - } - if buf, err = json.Marshal(s.Args); err != nil { - return nil, err - } - node.Fields[9] = &Field{ - Type: "[]string", - Name: "args", - Value: string(buf), - } - if buf, err = json.Marshal(s.Disabled); err != nil { - return nil, err - } - node.Fields[10] = &Field{ - Type: "bool", - Name: "disabled", - Value: string(buf), - } - if buf, err = json.Marshal(s.Vars); err != nil { - return nil, err - } - node.Fields[11] = &Field{ - Type: "map[string]string", - Name: "vars", - Value: string(buf), - } - if buf, err = json.Marshal(s.AbsPath); err != nil { - return nil, err - } - node.Fields[12] = &Field{ - Type: "string", - Name: "abs_path", - Value: string(buf), - } - if buf, err = json.Marshal(s.Tags); err != nil { - return nil, err - } - node.Fields[13] = &Field{ - Type: "map[string]string", - Name: "tags", - Value: string(buf), - } - node.Edges[0] = &Edge{ - Type: "User", - Name: "ScriptToUser", - } - err = s.QueryScriptToUser(). - Select(user.FieldID). - Scan(ctx, &node.Edges[0].IDs) - if err != nil { - return nil, err - } - node.Edges[1] = &Edge{ - Type: "Finding", - Name: "ScriptToFinding", - } - err = s.QueryScriptToFinding(). - Select(finding.FieldID). - Scan(ctx, &node.Edges[1].IDs) - if err != nil { - return nil, err - } - node.Edges[2] = &Edge{ - Type: "Environment", - Name: "ScriptToEnvironment", - } - err = s.QueryScriptToEnvironment(). - Select(environment.FieldID). - Scan(ctx, &node.Edges[2].IDs) - if err != nil { - return nil, err - } - return node, nil -} +// IsNode implements the Node interface check for GQLGen. +func (n *GinFileMiddleware) IsNode() {} -func (st *ServerTask) Node(ctx context.Context) (node *Node, err error) { - node = &Node{ - ID: st.ID, - Type: "ServerTask", - Fields: make([]*Field, 5), - Edges: make([]*Edge, 6), - } - var buf []byte - if buf, err = json.Marshal(st.Type); err != nil { - return nil, err - } - node.Fields[0] = &Field{ - Type: "servertask.Type", - Name: "type", - Value: string(buf), - } - if buf, err = json.Marshal(st.StartTime); err != nil { - return nil, err - } - node.Fields[1] = &Field{ - Type: "time.Time", - Name: "start_time", - Value: string(buf), - } - if buf, err = json.Marshal(st.EndTime); err != nil { - return nil, err - } - node.Fields[2] = &Field{ - Type: "time.Time", - Name: "end_time", - Value: string(buf), - } - if buf, err = json.Marshal(st.Errors); err != nil { - return nil, err - } - node.Fields[3] = &Field{ - Type: "[]string", - Name: "errors", - Value: string(buf), - } - if buf, err = json.Marshal(st.LogFilePath); err != nil { - return nil, err - } - node.Fields[4] = &Field{ - Type: "string", - Name: "log_file_path", - Value: string(buf), - } - node.Edges[0] = &Edge{ - Type: "AuthUser", - Name: "ServerTaskToAuthUser", - } - err = st.QueryServerTaskToAuthUser(). - Select(authuser.FieldID). - Scan(ctx, &node.Edges[0].IDs) - if err != nil { - return nil, err - } - node.Edges[1] = &Edge{ - Type: "Status", - Name: "ServerTaskToStatus", - } - err = st.QueryServerTaskToStatus(). - Select(status.FieldID). - Scan(ctx, &node.Edges[1].IDs) - if err != nil { - return nil, err - } - node.Edges[2] = &Edge{ - Type: "Environment", - Name: "ServerTaskToEnvironment", - } - err = st.QueryServerTaskToEnvironment(). - Select(environment.FieldID). - Scan(ctx, &node.Edges[2].IDs) - if err != nil { - return nil, err - } - node.Edges[3] = &Edge{ - Type: "Build", - Name: "ServerTaskToBuild", - } - err = st.QueryServerTaskToBuild(). - Select(build.FieldID). - Scan(ctx, &node.Edges[3].IDs) - if err != nil { - return nil, err - } - node.Edges[4] = &Edge{ - Type: "BuildCommit", - Name: "ServerTaskToBuildCommit", - } - err = st.QueryServerTaskToBuildCommit(). - Select(buildcommit.FieldID). - Scan(ctx, &node.Edges[4].IDs) - if err != nil { - return nil, err - } - node.Edges[5] = &Edge{ - Type: "GinFileMiddleware", - Name: "ServerTaskToGinFileMiddleware", - } - err = st.QueryServerTaskToGinFileMiddleware(). - Select(ginfilemiddleware.FieldID). - Scan(ctx, &node.Edges[5].IDs) - if err != nil { - return nil, err - } - return node, nil -} +// IsNode implements the Node interface check for GQLGen. +func (n *Host) IsNode() {} -func (s *Status) Node(ctx context.Context) (node *Node, err error) { - node = &Node{ - ID: s.ID, - Type: "Status", - Fields: make([]*Field, 7), - Edges: make([]*Edge, 8), - } - var buf []byte - if buf, err = json.Marshal(s.State); err != nil { - return nil, err - } - node.Fields[0] = &Field{ - Type: "status.State", - Name: "state", - Value: string(buf), - } - if buf, err = json.Marshal(s.StatusFor); err != nil { - return nil, err - } - node.Fields[1] = &Field{ - Type: "status.StatusFor", - Name: "status_for", - Value: string(buf), - } - if buf, err = json.Marshal(s.StartedAt); err != nil { - return nil, err - } - node.Fields[2] = &Field{ - Type: "time.Time", - Name: "started_at", - Value: string(buf), - } - if buf, err = json.Marshal(s.EndedAt); err != nil { - return nil, err - } - node.Fields[3] = &Field{ - Type: "time.Time", - Name: "ended_at", - Value: string(buf), - } - if buf, err = json.Marshal(s.Failed); err != nil { - return nil, err - } - node.Fields[4] = &Field{ - Type: "bool", - Name: "failed", - Value: string(buf), - } - if buf, err = json.Marshal(s.Completed); err != nil { - return nil, err - } - node.Fields[5] = &Field{ - Type: "bool", - Name: "completed", - Value: string(buf), - } - if buf, err = json.Marshal(s.Error); err != nil { - return nil, err - } - node.Fields[6] = &Field{ - Type: "string", - Name: "error", - Value: string(buf), - } - node.Edges[0] = &Edge{ - Type: "Build", - Name: "StatusToBuild", - } - err = s.QueryStatusToBuild(). - Select(build.FieldID). - Scan(ctx, &node.Edges[0].IDs) - if err != nil { - return nil, err - } - node.Edges[1] = &Edge{ - Type: "ProvisionedNetwork", - Name: "StatusToProvisionedNetwork", - } - err = s.QueryStatusToProvisionedNetwork(). - Select(provisionednetwork.FieldID). - Scan(ctx, &node.Edges[1].IDs) - if err != nil { - return nil, err - } - node.Edges[2] = &Edge{ - Type: "ProvisionedHost", - Name: "StatusToProvisionedHost", - } - err = s.QueryStatusToProvisionedHost(). - Select(provisionedhost.FieldID). - Scan(ctx, &node.Edges[2].IDs) - if err != nil { - return nil, err - } - node.Edges[3] = &Edge{ - Type: "ProvisioningStep", - Name: "StatusToProvisioningStep", - } - err = s.QueryStatusToProvisioningStep(). - Select(provisioningstep.FieldID). - Scan(ctx, &node.Edges[3].IDs) - if err != nil { - return nil, err - } - node.Edges[4] = &Edge{ - Type: "Team", - Name: "StatusToTeam", - } - err = s.QueryStatusToTeam(). - Select(team.FieldID). - Scan(ctx, &node.Edges[4].IDs) - if err != nil { - return nil, err - } - node.Edges[5] = &Edge{ - Type: "Plan", - Name: "StatusToPlan", - } - err = s.QueryStatusToPlan(). - Select(plan.FieldID). - Scan(ctx, &node.Edges[5].IDs) - if err != nil { - return nil, err - } - node.Edges[6] = &Edge{ - Type: "ServerTask", - Name: "StatusToServerTask", - } - err = s.QueryStatusToServerTask(). - Select(servertask.FieldID). - Scan(ctx, &node.Edges[6].IDs) - if err != nil { - return nil, err - } - node.Edges[7] = &Edge{ - Type: "AdhocPlan", - Name: "StatusToAdhocPlan", - } - err = s.QueryStatusToAdhocPlan(). - Select(adhocplan.FieldID). - Scan(ctx, &node.Edges[7].IDs) - if err != nil { - return nil, err - } - return node, nil -} +// IsNode implements the Node interface check for GQLGen. +func (n *HostDependency) IsNode() {} -func (t *Tag) Node(ctx context.Context) (node *Node, err error) { - node = &Node{ - ID: t.ID, - Type: "Tag", - Fields: make([]*Field, 3), - Edges: make([]*Edge, 0), - } - var buf []byte - if buf, err = json.Marshal(t.UUID); err != nil { - return nil, err - } - node.Fields[0] = &Field{ - Type: "uuid.UUID", - Name: "uuid", - Value: string(buf), - } - if buf, err = json.Marshal(t.Name); err != nil { - return nil, err - } - node.Fields[1] = &Field{ - Type: "string", - Name: "name", - Value: string(buf), - } - if buf, err = json.Marshal(t.Description); err != nil { - return nil, err - } - node.Fields[2] = &Field{ - Type: "map[string]string", - Name: "description", - Value: string(buf), - } - return node, nil -} +// IsNode implements the Node interface check for GQLGen. +func (n *Identity) IsNode() {} -func (t *Team) Node(ctx context.Context) (node *Node, err error) { - node = &Node{ - ID: t.ID, - Type: "Team", - Fields: make([]*Field, 2), - Edges: make([]*Edge, 4), - } - var buf []byte - if buf, err = json.Marshal(t.TeamNumber); err != nil { - return nil, err - } - node.Fields[0] = &Field{ - Type: "int", - Name: "team_number", - Value: string(buf), - } - if buf, err = json.Marshal(t.Vars); err != nil { - return nil, err - } - node.Fields[1] = &Field{ - Type: "map[string]string", - Name: "vars", - Value: string(buf), - } - node.Edges[0] = &Edge{ - Type: "Build", - Name: "TeamToBuild", - } - err = t.QueryTeamToBuild(). - Select(build.FieldID). - Scan(ctx, &node.Edges[0].IDs) - if err != nil { - return nil, err - } - node.Edges[1] = &Edge{ - Type: "Status", - Name: "TeamToStatus", - } - err = t.QueryTeamToStatus(). - Select(status.FieldID). - Scan(ctx, &node.Edges[1].IDs) - if err != nil { - return nil, err - } - node.Edges[2] = &Edge{ - Type: "ProvisionedNetwork", - Name: "TeamToProvisionedNetwork", - } - err = t.QueryTeamToProvisionedNetwork(). - Select(provisionednetwork.FieldID). - Scan(ctx, &node.Edges[2].IDs) - if err != nil { - return nil, err - } - node.Edges[3] = &Edge{ - Type: "Plan", - Name: "TeamToPlan", - } - err = t.QueryTeamToPlan(). - Select(plan.FieldID). - Scan(ctx, &node.Edges[3].IDs) - if err != nil { - return nil, err - } - return node, nil -} +// IsNode implements the Node interface check for GQLGen. +func (n *IncludedNetwork) IsNode() {} -func (t *Token) Node(ctx context.Context) (node *Node, err error) { - node = &Node{ - ID: t.ID, - Type: "Token", - Fields: make([]*Field, 2), - Edges: make([]*Edge, 1), - } - var buf []byte - if buf, err = json.Marshal(t.Token); err != nil { - return nil, err - } - node.Fields[0] = &Field{ - Type: "string", - Name: "token", - Value: string(buf), - } - if buf, err = json.Marshal(t.ExpireAt); err != nil { - return nil, err - } - node.Fields[1] = &Field{ - Type: "int64", - Name: "expire_at", - Value: string(buf), - } - node.Edges[0] = &Edge{ - Type: "AuthUser", - Name: "TokenToAuthUser", - } - err = t.QueryTokenToAuthUser(). - Select(authuser.FieldID). - Scan(ctx, &node.Edges[0].IDs) - if err != nil { - return nil, err - } - return node, nil -} +// IsNode implements the Node interface check for GQLGen. +func (n *Network) IsNode() {} + +// IsNode implements the Node interface check for GQLGen. +func (n *Plan) IsNode() {} + +// IsNode implements the Node interface check for GQLGen. +func (n *PlanDiff) IsNode() {} + +// IsNode implements the Node interface check for GQLGen. +func (n *ProvisionedHost) IsNode() {} + +// IsNode implements the Node interface check for GQLGen. +func (n *ProvisionedNetwork) IsNode() {} + +// IsNode implements the Node interface check for GQLGen. +func (n *ProvisioningStep) IsNode() {} -func (u *User) Node(ctx context.Context) (node *Node, err error) { - node = &Node{ - ID: u.ID, - Type: "User", - Fields: make([]*Field, 4), - Edges: make([]*Edge, 2), - } - var buf []byte - if buf, err = json.Marshal(u.Name); err != nil { - return nil, err - } - node.Fields[0] = &Field{ - Type: "string", - Name: "name", - Value: string(buf), - } - if buf, err = json.Marshal(u.UUID); err != nil { - return nil, err - } - node.Fields[1] = &Field{ - Type: "string", - Name: "uuid", - Value: string(buf), - } - if buf, err = json.Marshal(u.Email); err != nil { - return nil, err - } - node.Fields[2] = &Field{ - Type: "string", - Name: "email", - Value: string(buf), - } - if buf, err = json.Marshal(u.HclID); err != nil { - return nil, err - } - node.Fields[3] = &Field{ - Type: "string", - Name: "hcl_id", - Value: string(buf), - } - node.Edges[0] = &Edge{ - Type: "Tag", - Name: "UserToTag", - } - err = u.QueryUserToTag(). - Select(tag.FieldID). - Scan(ctx, &node.Edges[0].IDs) - if err != nil { - return nil, err - } - node.Edges[1] = &Edge{ - Type: "Environment", - Name: "UserToEnvironment", - } - err = u.QueryUserToEnvironment(). - Select(environment.FieldID). - Scan(ctx, &node.Edges[1].IDs) - if err != nil { - return nil, err - } - return node, nil -} +// IsNode implements the Node interface check for GQLGen. +func (n *RepoCommit) IsNode() {} -func (c *Client) Node(ctx context.Context, id uuid.UUID) (*Node, error) { - n, err := c.Noder(ctx, id) - if err != nil { - return nil, err - } - return n.Node(ctx) -} +// IsNode implements the Node interface check for GQLGen. +func (n *Repository) IsNode() {} + +// IsNode implements the Node interface check for GQLGen. +func (n *Script) IsNode() {} + +// IsNode implements the Node interface check for GQLGen. +func (n *ServerTask) IsNode() {} + +// IsNode implements the Node interface check for GQLGen. +func (n *Status) IsNode() {} + +// IsNode implements the Node interface check for GQLGen. +func (n *Tag) IsNode() {} + +// IsNode implements the Node interface check for GQLGen. +func (n *Team) IsNode() {} + +// IsNode implements the Node interface check for GQLGen. +func (n *Token) IsNode() {} + +// IsNode implements the Node interface check for GQLGen. +func (n *User) IsNode() {} var errNodeInvalidID = &NotFoundError{"node"} @@ -3730,9 +206,8 @@ func (c *Client) newNodeOpts(opts []NodeOption) *nodeOptions { // Noder returns a Node by its id. If the NodeType was not provided, it will // be derived from the id value according to the universal-id configuration. // -// c.Noder(ctx, id) -// c.Noder(ctx, id, ent.WithNodeType(pet.Table)) -// +// c.Noder(ctx, id) +// c.Noder(ctx, id, ent.WithNodeType(typeResolver)) func (c *Client) Noder(ctx context.Context, id uuid.UUID, opts ...NodeOption) (_ Noder, err error) { defer func() { if IsNotFound(err) { @@ -3749,334 +224,445 @@ func (c *Client) Noder(ctx context.Context, id uuid.UUID, opts ...NodeOption) (_ func (c *Client) noder(ctx context.Context, table string, id uuid.UUID) (Noder, error) { switch table { case adhocplan.Table: - n, err := c.AdhocPlan.Query(). - Where(adhocplan.ID(id)). - CollectFields(ctx, "AdhocPlan"). - Only(ctx) + query := c.AdhocPlan.Query(). + Where(adhocplan.ID(id)) + query, err := query.CollectFields(ctx, "AdhocPlan") + if err != nil { + return nil, err + } + n, err := query.Only(ctx) if err != nil { return nil, err } return n, nil case agentstatus.Table: - n, err := c.AgentStatus.Query(). - Where(agentstatus.ID(id)). - CollectFields(ctx, "AgentStatus"). - Only(ctx) + query := c.AgentStatus.Query(). + Where(agentstatus.ID(id)) + query, err := query.CollectFields(ctx, "AgentStatus") + if err != nil { + return nil, err + } + n, err := query.Only(ctx) if err != nil { return nil, err } return n, nil case agenttask.Table: - n, err := c.AgentTask.Query(). - Where(agenttask.ID(id)). - CollectFields(ctx, "AgentTask"). - Only(ctx) + query := c.AgentTask.Query(). + Where(agenttask.ID(id)) + query, err := query.CollectFields(ctx, "AgentTask") + if err != nil { + return nil, err + } + n, err := query.Only(ctx) if err != nil { return nil, err } return n, nil case ansible.Table: - n, err := c.Ansible.Query(). - Where(ansible.ID(id)). - CollectFields(ctx, "Ansible"). - Only(ctx) + query := c.Ansible.Query(). + Where(ansible.ID(id)) + query, err := query.CollectFields(ctx, "Ansible") + if err != nil { + return nil, err + } + n, err := query.Only(ctx) if err != nil { return nil, err } return n, nil case authuser.Table: - n, err := c.AuthUser.Query(). - Where(authuser.ID(id)). - CollectFields(ctx, "AuthUser"). - Only(ctx) + query := c.AuthUser.Query(). + Where(authuser.ID(id)) + query, err := query.CollectFields(ctx, "AuthUser") + if err != nil { + return nil, err + } + n, err := query.Only(ctx) if err != nil { return nil, err } return n, nil case build.Table: - n, err := c.Build.Query(). - Where(build.ID(id)). - CollectFields(ctx, "Build"). - Only(ctx) + query := c.Build.Query(). + Where(build.ID(id)) + query, err := query.CollectFields(ctx, "Build") + if err != nil { + return nil, err + } + n, err := query.Only(ctx) if err != nil { return nil, err } return n, nil case buildcommit.Table: - n, err := c.BuildCommit.Query(). - Where(buildcommit.ID(id)). - CollectFields(ctx, "BuildCommit"). - Only(ctx) + query := c.BuildCommit.Query(). + Where(buildcommit.ID(id)) + query, err := query.CollectFields(ctx, "BuildCommit") + if err != nil { + return nil, err + } + n, err := query.Only(ctx) if err != nil { return nil, err } return n, nil case command.Table: - n, err := c.Command.Query(). - Where(command.ID(id)). - CollectFields(ctx, "Command"). - Only(ctx) + query := c.Command.Query(). + Where(command.ID(id)) + query, err := query.CollectFields(ctx, "Command") + if err != nil { + return nil, err + } + n, err := query.Only(ctx) if err != nil { return nil, err } return n, nil case competition.Table: - n, err := c.Competition.Query(). - Where(competition.ID(id)). - CollectFields(ctx, "Competition"). - Only(ctx) + query := c.Competition.Query(). + Where(competition.ID(id)) + query, err := query.CollectFields(ctx, "Competition") + if err != nil { + return nil, err + } + n, err := query.Only(ctx) if err != nil { return nil, err } return n, nil case dns.Table: - n, err := c.DNS.Query(). - Where(dns.ID(id)). - CollectFields(ctx, "DNS"). - Only(ctx) + query := c.DNS.Query(). + Where(dns.ID(id)) + query, err := query.CollectFields(ctx, "DNS") + if err != nil { + return nil, err + } + n, err := query.Only(ctx) if err != nil { return nil, err } return n, nil case dnsrecord.Table: - n, err := c.DNSRecord.Query(). - Where(dnsrecord.ID(id)). - CollectFields(ctx, "DNSRecord"). - Only(ctx) + query := c.DNSRecord.Query(). + Where(dnsrecord.ID(id)) + query, err := query.CollectFields(ctx, "DNSRecord") + if err != nil { + return nil, err + } + n, err := query.Only(ctx) if err != nil { return nil, err } return n, nil case disk.Table: - n, err := c.Disk.Query(). - Where(disk.ID(id)). - CollectFields(ctx, "Disk"). - Only(ctx) + query := c.Disk.Query(). + Where(disk.ID(id)) + query, err := query.CollectFields(ctx, "Disk") + if err != nil { + return nil, err + } + n, err := query.Only(ctx) if err != nil { return nil, err } return n, nil case environment.Table: - n, err := c.Environment.Query(). - Where(environment.ID(id)). - CollectFields(ctx, "Environment"). - Only(ctx) + query := c.Environment.Query(). + Where(environment.ID(id)) + query, err := query.CollectFields(ctx, "Environment") + if err != nil { + return nil, err + } + n, err := query.Only(ctx) if err != nil { return nil, err } return n, nil case filedelete.Table: - n, err := c.FileDelete.Query(). - Where(filedelete.ID(id)). - CollectFields(ctx, "FileDelete"). - Only(ctx) + query := c.FileDelete.Query(). + Where(filedelete.ID(id)) + query, err := query.CollectFields(ctx, "FileDelete") + if err != nil { + return nil, err + } + n, err := query.Only(ctx) if err != nil { return nil, err } return n, nil case filedownload.Table: - n, err := c.FileDownload.Query(). - Where(filedownload.ID(id)). - CollectFields(ctx, "FileDownload"). - Only(ctx) + query := c.FileDownload.Query(). + Where(filedownload.ID(id)) + query, err := query.CollectFields(ctx, "FileDownload") + if err != nil { + return nil, err + } + n, err := query.Only(ctx) if err != nil { return nil, err } return n, nil case fileextract.Table: - n, err := c.FileExtract.Query(). - Where(fileextract.ID(id)). - CollectFields(ctx, "FileExtract"). - Only(ctx) + query := c.FileExtract.Query(). + Where(fileextract.ID(id)) + query, err := query.CollectFields(ctx, "FileExtract") + if err != nil { + return nil, err + } + n, err := query.Only(ctx) if err != nil { return nil, err } return n, nil case finding.Table: - n, err := c.Finding.Query(). - Where(finding.ID(id)). - CollectFields(ctx, "Finding"). - Only(ctx) + query := c.Finding.Query(). + Where(finding.ID(id)) + query, err := query.CollectFields(ctx, "Finding") + if err != nil { + return nil, err + } + n, err := query.Only(ctx) if err != nil { return nil, err } return n, nil case ginfilemiddleware.Table: - n, err := c.GinFileMiddleware.Query(). - Where(ginfilemiddleware.ID(id)). - CollectFields(ctx, "GinFileMiddleware"). - Only(ctx) + query := c.GinFileMiddleware.Query(). + Where(ginfilemiddleware.ID(id)) + query, err := query.CollectFields(ctx, "GinFileMiddleware") + if err != nil { + return nil, err + } + n, err := query.Only(ctx) if err != nil { return nil, err } return n, nil case host.Table: - n, err := c.Host.Query(). - Where(host.ID(id)). - CollectFields(ctx, "Host"). - Only(ctx) + query := c.Host.Query(). + Where(host.ID(id)) + query, err := query.CollectFields(ctx, "Host") + if err != nil { + return nil, err + } + n, err := query.Only(ctx) if err != nil { return nil, err } return n, nil case hostdependency.Table: - n, err := c.HostDependency.Query(). - Where(hostdependency.ID(id)). - CollectFields(ctx, "HostDependency"). - Only(ctx) + query := c.HostDependency.Query(). + Where(hostdependency.ID(id)) + query, err := query.CollectFields(ctx, "HostDependency") + if err != nil { + return nil, err + } + n, err := query.Only(ctx) if err != nil { return nil, err } return n, nil case identity.Table: - n, err := c.Identity.Query(). - Where(identity.ID(id)). - CollectFields(ctx, "Identity"). - Only(ctx) + query := c.Identity.Query(). + Where(identity.ID(id)) + query, err := query.CollectFields(ctx, "Identity") + if err != nil { + return nil, err + } + n, err := query.Only(ctx) if err != nil { return nil, err } return n, nil case includednetwork.Table: - n, err := c.IncludedNetwork.Query(). - Where(includednetwork.ID(id)). - CollectFields(ctx, "IncludedNetwork"). - Only(ctx) + query := c.IncludedNetwork.Query(). + Where(includednetwork.ID(id)) + query, err := query.CollectFields(ctx, "IncludedNetwork") + if err != nil { + return nil, err + } + n, err := query.Only(ctx) if err != nil { return nil, err } return n, nil case network.Table: - n, err := c.Network.Query(). - Where(network.ID(id)). - CollectFields(ctx, "Network"). - Only(ctx) + query := c.Network.Query(). + Where(network.ID(id)) + query, err := query.CollectFields(ctx, "Network") + if err != nil { + return nil, err + } + n, err := query.Only(ctx) if err != nil { return nil, err } return n, nil case plan.Table: - n, err := c.Plan.Query(). - Where(plan.ID(id)). - CollectFields(ctx, "Plan"). - Only(ctx) + query := c.Plan.Query(). + Where(plan.ID(id)) + query, err := query.CollectFields(ctx, "Plan") + if err != nil { + return nil, err + } + n, err := query.Only(ctx) if err != nil { return nil, err } return n, nil case plandiff.Table: - n, err := c.PlanDiff.Query(). - Where(plandiff.ID(id)). - CollectFields(ctx, "PlanDiff"). - Only(ctx) + query := c.PlanDiff.Query(). + Where(plandiff.ID(id)) + query, err := query.CollectFields(ctx, "PlanDiff") + if err != nil { + return nil, err + } + n, err := query.Only(ctx) if err != nil { return nil, err } return n, nil case provisionedhost.Table: - n, err := c.ProvisionedHost.Query(). - Where(provisionedhost.ID(id)). - CollectFields(ctx, "ProvisionedHost"). - Only(ctx) + query := c.ProvisionedHost.Query(). + Where(provisionedhost.ID(id)) + query, err := query.CollectFields(ctx, "ProvisionedHost") + if err != nil { + return nil, err + } + n, err := query.Only(ctx) if err != nil { return nil, err } return n, nil case provisionednetwork.Table: - n, err := c.ProvisionedNetwork.Query(). - Where(provisionednetwork.ID(id)). - CollectFields(ctx, "ProvisionedNetwork"). - Only(ctx) + query := c.ProvisionedNetwork.Query(). + Where(provisionednetwork.ID(id)) + query, err := query.CollectFields(ctx, "ProvisionedNetwork") + if err != nil { + return nil, err + } + n, err := query.Only(ctx) if err != nil { return nil, err } return n, nil case provisioningstep.Table: - n, err := c.ProvisioningStep.Query(). - Where(provisioningstep.ID(id)). - CollectFields(ctx, "ProvisioningStep"). - Only(ctx) + query := c.ProvisioningStep.Query(). + Where(provisioningstep.ID(id)) + query, err := query.CollectFields(ctx, "ProvisioningStep") + if err != nil { + return nil, err + } + n, err := query.Only(ctx) if err != nil { return nil, err } return n, nil case repocommit.Table: - n, err := c.RepoCommit.Query(). - Where(repocommit.ID(id)). - CollectFields(ctx, "RepoCommit"). - Only(ctx) + query := c.RepoCommit.Query(). + Where(repocommit.ID(id)) + query, err := query.CollectFields(ctx, "RepoCommit") + if err != nil { + return nil, err + } + n, err := query.Only(ctx) if err != nil { return nil, err } return n, nil case repository.Table: - n, err := c.Repository.Query(). - Where(repository.ID(id)). - CollectFields(ctx, "Repository"). - Only(ctx) + query := c.Repository.Query(). + Where(repository.ID(id)) + query, err := query.CollectFields(ctx, "Repository") + if err != nil { + return nil, err + } + n, err := query.Only(ctx) if err != nil { return nil, err } return n, nil case script.Table: - n, err := c.Script.Query(). - Where(script.ID(id)). - CollectFields(ctx, "Script"). - Only(ctx) + query := c.Script.Query(). + Where(script.ID(id)) + query, err := query.CollectFields(ctx, "Script") + if err != nil { + return nil, err + } + n, err := query.Only(ctx) if err != nil { return nil, err } return n, nil case servertask.Table: - n, err := c.ServerTask.Query(). - Where(servertask.ID(id)). - CollectFields(ctx, "ServerTask"). - Only(ctx) + query := c.ServerTask.Query(). + Where(servertask.ID(id)) + query, err := query.CollectFields(ctx, "ServerTask") + if err != nil { + return nil, err + } + n, err := query.Only(ctx) if err != nil { return nil, err } return n, nil case status.Table: - n, err := c.Status.Query(). - Where(status.ID(id)). - CollectFields(ctx, "Status"). - Only(ctx) + query := c.Status.Query(). + Where(status.ID(id)) + query, err := query.CollectFields(ctx, "Status") + if err != nil { + return nil, err + } + n, err := query.Only(ctx) if err != nil { return nil, err } return n, nil case tag.Table: - n, err := c.Tag.Query(). - Where(tag.ID(id)). - CollectFields(ctx, "Tag"). - Only(ctx) + query := c.Tag.Query(). + Where(tag.ID(id)) + query, err := query.CollectFields(ctx, "Tag") + if err != nil { + return nil, err + } + n, err := query.Only(ctx) if err != nil { return nil, err } return n, nil case team.Table: - n, err := c.Team.Query(). - Where(team.ID(id)). - CollectFields(ctx, "Team"). - Only(ctx) + query := c.Team.Query(). + Where(team.ID(id)) + query, err := query.CollectFields(ctx, "Team") + if err != nil { + return nil, err + } + n, err := query.Only(ctx) if err != nil { return nil, err } return n, nil case token.Table: - n, err := c.Token.Query(). - Where(token.ID(id)). - CollectFields(ctx, "Token"). - Only(ctx) + query := c.Token.Query(). + Where(token.ID(id)) + query, err := query.CollectFields(ctx, "Token") + if err != nil { + return nil, err + } + n, err := query.Only(ctx) if err != nil { return nil, err } return n, nil case user.Table: - n, err := c.User.Query(). - Where(user.ID(id)). - CollectFields(ctx, "User"). - Only(ctx) + query := c.User.Query(). + Where(user.ID(id)) + query, err := query.CollectFields(ctx, "User") + if err != nil { + return nil, err + } + n, err := query.Only(ctx) if err != nil { return nil, err } @@ -4155,10 +741,13 @@ func (c *Client) noders(ctx context.Context, table string, ids []uuid.UUID) ([]N } switch table { case adhocplan.Table: - nodes, err := c.AdhocPlan.Query(). - Where(adhocplan.IDIn(ids...)). - CollectFields(ctx, "AdhocPlan"). - All(ctx) + query := c.AdhocPlan.Query(). + Where(adhocplan.IDIn(ids...)) + query, err := query.CollectFields(ctx, "AdhocPlan") + if err != nil { + return nil, err + } + nodes, err := query.All(ctx) if err != nil { return nil, err } @@ -4168,10 +757,13 @@ func (c *Client) noders(ctx context.Context, table string, ids []uuid.UUID) ([]N } } case agentstatus.Table: - nodes, err := c.AgentStatus.Query(). - Where(agentstatus.IDIn(ids...)). - CollectFields(ctx, "AgentStatus"). - All(ctx) + query := c.AgentStatus.Query(). + Where(agentstatus.IDIn(ids...)) + query, err := query.CollectFields(ctx, "AgentStatus") + if err != nil { + return nil, err + } + nodes, err := query.All(ctx) if err != nil { return nil, err } @@ -4181,10 +773,13 @@ func (c *Client) noders(ctx context.Context, table string, ids []uuid.UUID) ([]N } } case agenttask.Table: - nodes, err := c.AgentTask.Query(). - Where(agenttask.IDIn(ids...)). - CollectFields(ctx, "AgentTask"). - All(ctx) + query := c.AgentTask.Query(). + Where(agenttask.IDIn(ids...)) + query, err := query.CollectFields(ctx, "AgentTask") + if err != nil { + return nil, err + } + nodes, err := query.All(ctx) if err != nil { return nil, err } @@ -4194,10 +789,13 @@ func (c *Client) noders(ctx context.Context, table string, ids []uuid.UUID) ([]N } } case ansible.Table: - nodes, err := c.Ansible.Query(). - Where(ansible.IDIn(ids...)). - CollectFields(ctx, "Ansible"). - All(ctx) + query := c.Ansible.Query(). + Where(ansible.IDIn(ids...)) + query, err := query.CollectFields(ctx, "Ansible") + if err != nil { + return nil, err + } + nodes, err := query.All(ctx) if err != nil { return nil, err } @@ -4207,10 +805,13 @@ func (c *Client) noders(ctx context.Context, table string, ids []uuid.UUID) ([]N } } case authuser.Table: - nodes, err := c.AuthUser.Query(). - Where(authuser.IDIn(ids...)). - CollectFields(ctx, "AuthUser"). - All(ctx) + query := c.AuthUser.Query(). + Where(authuser.IDIn(ids...)) + query, err := query.CollectFields(ctx, "AuthUser") + if err != nil { + return nil, err + } + nodes, err := query.All(ctx) if err != nil { return nil, err } @@ -4220,10 +821,13 @@ func (c *Client) noders(ctx context.Context, table string, ids []uuid.UUID) ([]N } } case build.Table: - nodes, err := c.Build.Query(). - Where(build.IDIn(ids...)). - CollectFields(ctx, "Build"). - All(ctx) + query := c.Build.Query(). + Where(build.IDIn(ids...)) + query, err := query.CollectFields(ctx, "Build") + if err != nil { + return nil, err + } + nodes, err := query.All(ctx) if err != nil { return nil, err } @@ -4233,10 +837,13 @@ func (c *Client) noders(ctx context.Context, table string, ids []uuid.UUID) ([]N } } case buildcommit.Table: - nodes, err := c.BuildCommit.Query(). - Where(buildcommit.IDIn(ids...)). - CollectFields(ctx, "BuildCommit"). - All(ctx) + query := c.BuildCommit.Query(). + Where(buildcommit.IDIn(ids...)) + query, err := query.CollectFields(ctx, "BuildCommit") + if err != nil { + return nil, err + } + nodes, err := query.All(ctx) if err != nil { return nil, err } @@ -4246,10 +853,13 @@ func (c *Client) noders(ctx context.Context, table string, ids []uuid.UUID) ([]N } } case command.Table: - nodes, err := c.Command.Query(). - Where(command.IDIn(ids...)). - CollectFields(ctx, "Command"). - All(ctx) + query := c.Command.Query(). + Where(command.IDIn(ids...)) + query, err := query.CollectFields(ctx, "Command") + if err != nil { + return nil, err + } + nodes, err := query.All(ctx) if err != nil { return nil, err } @@ -4259,10 +869,13 @@ func (c *Client) noders(ctx context.Context, table string, ids []uuid.UUID) ([]N } } case competition.Table: - nodes, err := c.Competition.Query(). - Where(competition.IDIn(ids...)). - CollectFields(ctx, "Competition"). - All(ctx) + query := c.Competition.Query(). + Where(competition.IDIn(ids...)) + query, err := query.CollectFields(ctx, "Competition") + if err != nil { + return nil, err + } + nodes, err := query.All(ctx) if err != nil { return nil, err } @@ -4272,10 +885,13 @@ func (c *Client) noders(ctx context.Context, table string, ids []uuid.UUID) ([]N } } case dns.Table: - nodes, err := c.DNS.Query(). - Where(dns.IDIn(ids...)). - CollectFields(ctx, "DNS"). - All(ctx) + query := c.DNS.Query(). + Where(dns.IDIn(ids...)) + query, err := query.CollectFields(ctx, "DNS") + if err != nil { + return nil, err + } + nodes, err := query.All(ctx) if err != nil { return nil, err } @@ -4285,10 +901,13 @@ func (c *Client) noders(ctx context.Context, table string, ids []uuid.UUID) ([]N } } case dnsrecord.Table: - nodes, err := c.DNSRecord.Query(). - Where(dnsrecord.IDIn(ids...)). - CollectFields(ctx, "DNSRecord"). - All(ctx) + query := c.DNSRecord.Query(). + Where(dnsrecord.IDIn(ids...)) + query, err := query.CollectFields(ctx, "DNSRecord") + if err != nil { + return nil, err + } + nodes, err := query.All(ctx) if err != nil { return nil, err } @@ -4298,10 +917,13 @@ func (c *Client) noders(ctx context.Context, table string, ids []uuid.UUID) ([]N } } case disk.Table: - nodes, err := c.Disk.Query(). - Where(disk.IDIn(ids...)). - CollectFields(ctx, "Disk"). - All(ctx) + query := c.Disk.Query(). + Where(disk.IDIn(ids...)) + query, err := query.CollectFields(ctx, "Disk") + if err != nil { + return nil, err + } + nodes, err := query.All(ctx) if err != nil { return nil, err } @@ -4311,10 +933,13 @@ func (c *Client) noders(ctx context.Context, table string, ids []uuid.UUID) ([]N } } case environment.Table: - nodes, err := c.Environment.Query(). - Where(environment.IDIn(ids...)). - CollectFields(ctx, "Environment"). - All(ctx) + query := c.Environment.Query(). + Where(environment.IDIn(ids...)) + query, err := query.CollectFields(ctx, "Environment") + if err != nil { + return nil, err + } + nodes, err := query.All(ctx) if err != nil { return nil, err } @@ -4324,10 +949,13 @@ func (c *Client) noders(ctx context.Context, table string, ids []uuid.UUID) ([]N } } case filedelete.Table: - nodes, err := c.FileDelete.Query(). - Where(filedelete.IDIn(ids...)). - CollectFields(ctx, "FileDelete"). - All(ctx) + query := c.FileDelete.Query(). + Where(filedelete.IDIn(ids...)) + query, err := query.CollectFields(ctx, "FileDelete") + if err != nil { + return nil, err + } + nodes, err := query.All(ctx) if err != nil { return nil, err } @@ -4337,10 +965,13 @@ func (c *Client) noders(ctx context.Context, table string, ids []uuid.UUID) ([]N } } case filedownload.Table: - nodes, err := c.FileDownload.Query(). - Where(filedownload.IDIn(ids...)). - CollectFields(ctx, "FileDownload"). - All(ctx) + query := c.FileDownload.Query(). + Where(filedownload.IDIn(ids...)) + query, err := query.CollectFields(ctx, "FileDownload") + if err != nil { + return nil, err + } + nodes, err := query.All(ctx) if err != nil { return nil, err } @@ -4350,10 +981,13 @@ func (c *Client) noders(ctx context.Context, table string, ids []uuid.UUID) ([]N } } case fileextract.Table: - nodes, err := c.FileExtract.Query(). - Where(fileextract.IDIn(ids...)). - CollectFields(ctx, "FileExtract"). - All(ctx) + query := c.FileExtract.Query(). + Where(fileextract.IDIn(ids...)) + query, err := query.CollectFields(ctx, "FileExtract") + if err != nil { + return nil, err + } + nodes, err := query.All(ctx) if err != nil { return nil, err } @@ -4363,10 +997,13 @@ func (c *Client) noders(ctx context.Context, table string, ids []uuid.UUID) ([]N } } case finding.Table: - nodes, err := c.Finding.Query(). - Where(finding.IDIn(ids...)). - CollectFields(ctx, "Finding"). - All(ctx) + query := c.Finding.Query(). + Where(finding.IDIn(ids...)) + query, err := query.CollectFields(ctx, "Finding") + if err != nil { + return nil, err + } + nodes, err := query.All(ctx) if err != nil { return nil, err } @@ -4376,10 +1013,13 @@ func (c *Client) noders(ctx context.Context, table string, ids []uuid.UUID) ([]N } } case ginfilemiddleware.Table: - nodes, err := c.GinFileMiddleware.Query(). - Where(ginfilemiddleware.IDIn(ids...)). - CollectFields(ctx, "GinFileMiddleware"). - All(ctx) + query := c.GinFileMiddleware.Query(). + Where(ginfilemiddleware.IDIn(ids...)) + query, err := query.CollectFields(ctx, "GinFileMiddleware") + if err != nil { + return nil, err + } + nodes, err := query.All(ctx) if err != nil { return nil, err } @@ -4389,10 +1029,13 @@ func (c *Client) noders(ctx context.Context, table string, ids []uuid.UUID) ([]N } } case host.Table: - nodes, err := c.Host.Query(). - Where(host.IDIn(ids...)). - CollectFields(ctx, "Host"). - All(ctx) + query := c.Host.Query(). + Where(host.IDIn(ids...)) + query, err := query.CollectFields(ctx, "Host") + if err != nil { + return nil, err + } + nodes, err := query.All(ctx) if err != nil { return nil, err } @@ -4402,10 +1045,13 @@ func (c *Client) noders(ctx context.Context, table string, ids []uuid.UUID) ([]N } } case hostdependency.Table: - nodes, err := c.HostDependency.Query(). - Where(hostdependency.IDIn(ids...)). - CollectFields(ctx, "HostDependency"). - All(ctx) + query := c.HostDependency.Query(). + Where(hostdependency.IDIn(ids...)) + query, err := query.CollectFields(ctx, "HostDependency") + if err != nil { + return nil, err + } + nodes, err := query.All(ctx) if err != nil { return nil, err } @@ -4415,10 +1061,13 @@ func (c *Client) noders(ctx context.Context, table string, ids []uuid.UUID) ([]N } } case identity.Table: - nodes, err := c.Identity.Query(). - Where(identity.IDIn(ids...)). - CollectFields(ctx, "Identity"). - All(ctx) + query := c.Identity.Query(). + Where(identity.IDIn(ids...)) + query, err := query.CollectFields(ctx, "Identity") + if err != nil { + return nil, err + } + nodes, err := query.All(ctx) if err != nil { return nil, err } @@ -4428,10 +1077,13 @@ func (c *Client) noders(ctx context.Context, table string, ids []uuid.UUID) ([]N } } case includednetwork.Table: - nodes, err := c.IncludedNetwork.Query(). - Where(includednetwork.IDIn(ids...)). - CollectFields(ctx, "IncludedNetwork"). - All(ctx) + query := c.IncludedNetwork.Query(). + Where(includednetwork.IDIn(ids...)) + query, err := query.CollectFields(ctx, "IncludedNetwork") + if err != nil { + return nil, err + } + nodes, err := query.All(ctx) if err != nil { return nil, err } @@ -4441,10 +1093,13 @@ func (c *Client) noders(ctx context.Context, table string, ids []uuid.UUID) ([]N } } case network.Table: - nodes, err := c.Network.Query(). - Where(network.IDIn(ids...)). - CollectFields(ctx, "Network"). - All(ctx) + query := c.Network.Query(). + Where(network.IDIn(ids...)) + query, err := query.CollectFields(ctx, "Network") + if err != nil { + return nil, err + } + nodes, err := query.All(ctx) if err != nil { return nil, err } @@ -4454,10 +1109,13 @@ func (c *Client) noders(ctx context.Context, table string, ids []uuid.UUID) ([]N } } case plan.Table: - nodes, err := c.Plan.Query(). - Where(plan.IDIn(ids...)). - CollectFields(ctx, "Plan"). - All(ctx) + query := c.Plan.Query(). + Where(plan.IDIn(ids...)) + query, err := query.CollectFields(ctx, "Plan") + if err != nil { + return nil, err + } + nodes, err := query.All(ctx) if err != nil { return nil, err } @@ -4467,10 +1125,13 @@ func (c *Client) noders(ctx context.Context, table string, ids []uuid.UUID) ([]N } } case plandiff.Table: - nodes, err := c.PlanDiff.Query(). - Where(plandiff.IDIn(ids...)). - CollectFields(ctx, "PlanDiff"). - All(ctx) + query := c.PlanDiff.Query(). + Where(plandiff.IDIn(ids...)) + query, err := query.CollectFields(ctx, "PlanDiff") + if err != nil { + return nil, err + } + nodes, err := query.All(ctx) if err != nil { return nil, err } @@ -4480,10 +1141,13 @@ func (c *Client) noders(ctx context.Context, table string, ids []uuid.UUID) ([]N } } case provisionedhost.Table: - nodes, err := c.ProvisionedHost.Query(). - Where(provisionedhost.IDIn(ids...)). - CollectFields(ctx, "ProvisionedHost"). - All(ctx) + query := c.ProvisionedHost.Query(). + Where(provisionedhost.IDIn(ids...)) + query, err := query.CollectFields(ctx, "ProvisionedHost") + if err != nil { + return nil, err + } + nodes, err := query.All(ctx) if err != nil { return nil, err } @@ -4493,10 +1157,13 @@ func (c *Client) noders(ctx context.Context, table string, ids []uuid.UUID) ([]N } } case provisionednetwork.Table: - nodes, err := c.ProvisionedNetwork.Query(). - Where(provisionednetwork.IDIn(ids...)). - CollectFields(ctx, "ProvisionedNetwork"). - All(ctx) + query := c.ProvisionedNetwork.Query(). + Where(provisionednetwork.IDIn(ids...)) + query, err := query.CollectFields(ctx, "ProvisionedNetwork") + if err != nil { + return nil, err + } + nodes, err := query.All(ctx) if err != nil { return nil, err } @@ -4506,10 +1173,13 @@ func (c *Client) noders(ctx context.Context, table string, ids []uuid.UUID) ([]N } } case provisioningstep.Table: - nodes, err := c.ProvisioningStep.Query(). - Where(provisioningstep.IDIn(ids...)). - CollectFields(ctx, "ProvisioningStep"). - All(ctx) + query := c.ProvisioningStep.Query(). + Where(provisioningstep.IDIn(ids...)) + query, err := query.CollectFields(ctx, "ProvisioningStep") + if err != nil { + return nil, err + } + nodes, err := query.All(ctx) if err != nil { return nil, err } @@ -4519,10 +1189,13 @@ func (c *Client) noders(ctx context.Context, table string, ids []uuid.UUID) ([]N } } case repocommit.Table: - nodes, err := c.RepoCommit.Query(). - Where(repocommit.IDIn(ids...)). - CollectFields(ctx, "RepoCommit"). - All(ctx) + query := c.RepoCommit.Query(). + Where(repocommit.IDIn(ids...)) + query, err := query.CollectFields(ctx, "RepoCommit") + if err != nil { + return nil, err + } + nodes, err := query.All(ctx) if err != nil { return nil, err } @@ -4532,10 +1205,13 @@ func (c *Client) noders(ctx context.Context, table string, ids []uuid.UUID) ([]N } } case repository.Table: - nodes, err := c.Repository.Query(). - Where(repository.IDIn(ids...)). - CollectFields(ctx, "Repository"). - All(ctx) + query := c.Repository.Query(). + Where(repository.IDIn(ids...)) + query, err := query.CollectFields(ctx, "Repository") + if err != nil { + return nil, err + } + nodes, err := query.All(ctx) if err != nil { return nil, err } @@ -4545,10 +1221,13 @@ func (c *Client) noders(ctx context.Context, table string, ids []uuid.UUID) ([]N } } case script.Table: - nodes, err := c.Script.Query(). - Where(script.IDIn(ids...)). - CollectFields(ctx, "Script"). - All(ctx) + query := c.Script.Query(). + Where(script.IDIn(ids...)) + query, err := query.CollectFields(ctx, "Script") + if err != nil { + return nil, err + } + nodes, err := query.All(ctx) if err != nil { return nil, err } @@ -4558,10 +1237,13 @@ func (c *Client) noders(ctx context.Context, table string, ids []uuid.UUID) ([]N } } case servertask.Table: - nodes, err := c.ServerTask.Query(). - Where(servertask.IDIn(ids...)). - CollectFields(ctx, "ServerTask"). - All(ctx) + query := c.ServerTask.Query(). + Where(servertask.IDIn(ids...)) + query, err := query.CollectFields(ctx, "ServerTask") + if err != nil { + return nil, err + } + nodes, err := query.All(ctx) if err != nil { return nil, err } @@ -4571,10 +1253,13 @@ func (c *Client) noders(ctx context.Context, table string, ids []uuid.UUID) ([]N } } case status.Table: - nodes, err := c.Status.Query(). - Where(status.IDIn(ids...)). - CollectFields(ctx, "Status"). - All(ctx) + query := c.Status.Query(). + Where(status.IDIn(ids...)) + query, err := query.CollectFields(ctx, "Status") + if err != nil { + return nil, err + } + nodes, err := query.All(ctx) if err != nil { return nil, err } @@ -4584,10 +1269,13 @@ func (c *Client) noders(ctx context.Context, table string, ids []uuid.UUID) ([]N } } case tag.Table: - nodes, err := c.Tag.Query(). - Where(tag.IDIn(ids...)). - CollectFields(ctx, "Tag"). - All(ctx) + query := c.Tag.Query(). + Where(tag.IDIn(ids...)) + query, err := query.CollectFields(ctx, "Tag") + if err != nil { + return nil, err + } + nodes, err := query.All(ctx) if err != nil { return nil, err } @@ -4597,10 +1285,13 @@ func (c *Client) noders(ctx context.Context, table string, ids []uuid.UUID) ([]N } } case team.Table: - nodes, err := c.Team.Query(). - Where(team.IDIn(ids...)). - CollectFields(ctx, "Team"). - All(ctx) + query := c.Team.Query(). + Where(team.IDIn(ids...)) + query, err := query.CollectFields(ctx, "Team") + if err != nil { + return nil, err + } + nodes, err := query.All(ctx) if err != nil { return nil, err } @@ -4610,10 +1301,13 @@ func (c *Client) noders(ctx context.Context, table string, ids []uuid.UUID) ([]N } } case token.Table: - nodes, err := c.Token.Query(). - Where(token.IDIn(ids...)). - CollectFields(ctx, "Token"). - All(ctx) + query := c.Token.Query(). + Where(token.IDIn(ids...)) + query, err := query.CollectFields(ctx, "Token") + if err != nil { + return nil, err + } + nodes, err := query.All(ctx) if err != nil { return nil, err } @@ -4623,10 +1317,13 @@ func (c *Client) noders(ctx context.Context, table string, ids []uuid.UUID) ([]N } } case user.Table: - nodes, err := c.User.Query(). - Where(user.IDIn(ids...)). - CollectFields(ctx, "User"). - All(ctx) + query := c.User.Query(). + Where(user.IDIn(ids...)) + query, err := query.CollectFields(ctx, "User") + if err != nil { + return nil, err + } + nodes, err := query.All(ctx) if err != nil { return nil, err } diff --git a/ent/gql_pagination.go b/ent/gql_pagination.go index ce36970d..ef57e7d9 100644 --- a/ent/gql_pagination.go +++ b/ent/gql_pagination.go @@ -1,16 +1,13 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "encoding/base64" "errors" - "fmt" - "io" - "strconv" - "strings" + "entgo.io/contrib/entgql" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/99designs/gqlgen/graphql" "github.com/99designs/gqlgen/graphql/errcode" @@ -53,165 +50,22 @@ import ( "github.com/gen0cide/laforge/ent/user" "github.com/google/uuid" "github.com/vektah/gqlparser/v2/gqlerror" - "github.com/vmihailenco/msgpack/v5" ) -// OrderDirection defines the directions in which to order a list of items. -type OrderDirection string - -const ( - // OrderDirectionAsc specifies an ascending order. - OrderDirectionAsc OrderDirection = "ASC" - // OrderDirectionDesc specifies a descending order. - OrderDirectionDesc OrderDirection = "DESC" +// Common entgql types. +type ( + Cursor = entgql.Cursor[uuid.UUID] + PageInfo = entgql.PageInfo[uuid.UUID] + OrderDirection = entgql.OrderDirection ) -// Validate the order direction value. -func (o OrderDirection) Validate() error { - if o != OrderDirectionAsc && o != OrderDirectionDesc { - return fmt.Errorf("%s is not a valid OrderDirection", o) - } - return nil -} - -// String implements fmt.Stringer interface. -func (o OrderDirection) String() string { - return string(o) -} - -// MarshalGQL implements graphql.Marshaler interface. -func (o OrderDirection) MarshalGQL(w io.Writer) { - io.WriteString(w, strconv.Quote(o.String())) -} - -// UnmarshalGQL implements graphql.Unmarshaler interface. -func (o *OrderDirection) UnmarshalGQL(val interface{}) error { - str, ok := val.(string) - if !ok { - return fmt.Errorf("order direction %T must be a string", val) - } - *o = OrderDirection(str) - return o.Validate() -} - -func (o OrderDirection) reverse() OrderDirection { - if o == OrderDirectionDesc { - return OrderDirectionAsc - } - return OrderDirectionDesc -} - -func (o OrderDirection) orderFunc(field string) OrderFunc { - if o == OrderDirectionDesc { +func orderFunc(o OrderDirection, field string) func(*sql.Selector) { + if o == entgql.OrderDirectionDesc { return Desc(field) } return Asc(field) } -func cursorsToPredicates(direction OrderDirection, after, before *Cursor, field, idField string) []func(s *sql.Selector) { - var predicates []func(s *sql.Selector) - if after != nil { - if after.Value != nil { - var predicate func([]string, ...interface{}) *sql.Predicate - if direction == OrderDirectionAsc { - predicate = sql.CompositeGT - } else { - predicate = sql.CompositeLT - } - predicates = append(predicates, func(s *sql.Selector) { - s.Where(predicate( - s.Columns(field, idField), - after.Value, after.ID, - )) - }) - } else { - var predicate func(string, interface{}) *sql.Predicate - if direction == OrderDirectionAsc { - predicate = sql.GT - } else { - predicate = sql.LT - } - predicates = append(predicates, func(s *sql.Selector) { - s.Where(predicate( - s.C(idField), - after.ID, - )) - }) - } - } - if before != nil { - if before.Value != nil { - var predicate func([]string, ...interface{}) *sql.Predicate - if direction == OrderDirectionAsc { - predicate = sql.CompositeLT - } else { - predicate = sql.CompositeGT - } - predicates = append(predicates, func(s *sql.Selector) { - s.Where(predicate( - s.Columns(field, idField), - before.Value, before.ID, - )) - }) - } else { - var predicate func(string, interface{}) *sql.Predicate - if direction == OrderDirectionAsc { - predicate = sql.LT - } else { - predicate = sql.GT - } - predicates = append(predicates, func(s *sql.Selector) { - s.Where(predicate( - s.C(idField), - before.ID, - )) - }) - } - } - return predicates -} - -// PageInfo of a connection type. -type PageInfo struct { - HasNextPage bool `json:"hasNextPage"` - HasPreviousPage bool `json:"hasPreviousPage"` - StartCursor *Cursor `json:"startCursor"` - EndCursor *Cursor `json:"endCursor"` -} - -// Cursor of an edge type. -type Cursor struct { - ID uuid.UUID `msgpack:"i"` - Value Value `msgpack:"v,omitempty"` -} - -// MarshalGQL implements graphql.Marshaler interface. -func (c Cursor) MarshalGQL(w io.Writer) { - quote := []byte{'"'} - w.Write(quote) - defer w.Write(quote) - wc := base64.NewEncoder(base64.RawStdEncoding, w) - defer wc.Close() - _ = msgpack.NewEncoder(wc).Encode(c) -} - -// UnmarshalGQL implements graphql.Unmarshaler interface. -func (c *Cursor) UnmarshalGQL(v interface{}) error { - s, ok := v.(string) - if !ok { - return fmt.Errorf("%T is not a string", v) - } - if err := msgpack.NewDecoder( - base64.NewDecoder( - base64.RawStdEncoding, - strings.NewReader(s), - ), - ).Decode(c); err != nil { - return fmt.Errorf("cannot decode cursor: %w", err) - } - return nil -} - const errInvalidPagination = "INVALID_PAGINATION" func validateFirstLast(first, last *int) (err *gqlerror.Error) { @@ -234,18 +88,17 @@ func validateFirstLast(first, last *int) (err *gqlerror.Error) { return err } -func getCollectedField(ctx context.Context, path ...string) *graphql.CollectedField { +func collectedField(ctx context.Context, path ...string) *graphql.CollectedField { fc := graphql.GetFieldContext(ctx) if fc == nil { return nil } - oc := graphql.GetOperationContext(ctx) field := fc.Field - + oc := graphql.GetOperationContext(ctx) walk: for _, name := range path { for _, f := range graphql.CollectFields(oc, field.Selections, nil) { - if f.Name == name { + if f.Alias == name { field = f continue walk } @@ -259,7 +112,7 @@ func hasCollectedField(ctx context.Context, path ...string) bool { if graphql.GetFieldContext(ctx) == nil { return true } - return getCollectedField(ctx, path...) != nil + return collectedField(ctx, path...) != nil } const ( @@ -269,6 +122,16 @@ const ( totalCountField = "totalCount" ) +func paginateLimit(first, last *int) int { + var limit int + if first != nil { + limit = *first + 1 + } else if last != nil { + limit = *last + 1 + } + return limit +} + // AdhocPlanEdge is the edge representation of AdhocPlan. type AdhocPlanEdge struct { Node *AdhocPlan `json:"node"` @@ -282,8 +145,46 @@ type AdhocPlanConnection struct { TotalCount int `json:"totalCount"` } +func (c *AdhocPlanConnection) build(nodes []*AdhocPlan, pager *adhocplanPager, after *Cursor, first *int, before *Cursor, last *int) { + c.PageInfo.HasNextPage = before != nil + c.PageInfo.HasPreviousPage = after != nil + if first != nil && *first+1 == len(nodes) { + c.PageInfo.HasNextPage = true + nodes = nodes[:len(nodes)-1] + } else if last != nil && *last+1 == len(nodes) { + c.PageInfo.HasPreviousPage = true + nodes = nodes[:len(nodes)-1] + } + var nodeAt func(int) *AdhocPlan + if last != nil { + n := len(nodes) - 1 + nodeAt = func(i int) *AdhocPlan { + return nodes[n-i] + } + } else { + nodeAt = func(i int) *AdhocPlan { + return nodes[i] + } + } + c.Edges = make([]*AdhocPlanEdge, len(nodes)) + for i := range nodes { + node := nodeAt(i) + c.Edges[i] = &AdhocPlanEdge{ + Node: node, + Cursor: pager.toCursor(node), + } + } + if l := len(c.Edges); l > 0 { + c.PageInfo.StartCursor = &c.Edges[0].Cursor + c.PageInfo.EndCursor = &c.Edges[l-1].Cursor + } + if c.TotalCount == 0 { + c.TotalCount = len(nodes) + } +} + // AdhocPlanPaginateOption enables pagination customization. -type AdhocPlanPaginateOption func(*adhocPlanPager) error +type AdhocPlanPaginateOption func(*adhocplanPager) error // WithAdhocPlanOrder configures pagination ordering. func WithAdhocPlanOrder(order *AdhocPlanOrder) AdhocPlanPaginateOption { @@ -291,7 +192,7 @@ func WithAdhocPlanOrder(order *AdhocPlanOrder) AdhocPlanPaginateOption { order = DefaultAdhocPlanOrder } o := *order - return func(pager *adhocPlanPager) error { + return func(pager *adhocplanPager) error { if err := o.Direction.Validate(); err != nil { return err } @@ -305,7 +206,7 @@ func WithAdhocPlanOrder(order *AdhocPlanOrder) AdhocPlanPaginateOption { // WithAdhocPlanFilter configures pagination filter. func WithAdhocPlanFilter(filter func(*AdhocPlanQuery) (*AdhocPlanQuery, error)) AdhocPlanPaginateOption { - return func(pager *adhocPlanPager) error { + return func(pager *adhocplanPager) error { if filter == nil { return errors.New("AdhocPlanQuery filter cannot be nil") } @@ -314,13 +215,14 @@ func WithAdhocPlanFilter(filter func(*AdhocPlanQuery) (*AdhocPlanQuery, error)) } } -type adhocPlanPager struct { - order *AdhocPlanOrder - filter func(*AdhocPlanQuery) (*AdhocPlanQuery, error) +type adhocplanPager struct { + reverse bool + order *AdhocPlanOrder + filter func(*AdhocPlanQuery) (*AdhocPlanQuery, error) } -func newAdhocPlanPager(opts []AdhocPlanPaginateOption) (*adhocPlanPager, error) { - pager := &adhocPlanPager{} +func newAdhocPlanPager(opts []AdhocPlanPaginateOption, reverse bool) (*adhocplanPager, error) { + pager := &adhocplanPager{reverse: reverse} for _, opt := range opts { if err := opt(pager); err != nil { return nil, err @@ -332,39 +234,59 @@ func newAdhocPlanPager(opts []AdhocPlanPaginateOption) (*adhocPlanPager, error) return pager, nil } -func (p *adhocPlanPager) applyFilter(query *AdhocPlanQuery) (*AdhocPlanQuery, error) { +func (p *adhocplanPager) applyFilter(query *AdhocPlanQuery) (*AdhocPlanQuery, error) { if p.filter != nil { return p.filter(query) } return query, nil } -func (p *adhocPlanPager) toCursor(ap *AdhocPlan) Cursor { +func (p *adhocplanPager) toCursor(ap *AdhocPlan) Cursor { return p.order.Field.toCursor(ap) } -func (p *adhocPlanPager) applyCursors(query *AdhocPlanQuery, after, before *Cursor) *AdhocPlanQuery { - for _, predicate := range cursorsToPredicates( - p.order.Direction, after, before, - p.order.Field.field, DefaultAdhocPlanOrder.Field.field, - ) { +func (p *adhocplanPager) applyCursors(query *AdhocPlanQuery, after, before *Cursor) (*AdhocPlanQuery, error) { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + for _, predicate := range entgql.CursorsPredicate(after, before, DefaultAdhocPlanOrder.Field.column, p.order.Field.column, direction) { query = query.Where(predicate) } - return query + return query, nil } -func (p *adhocPlanPager) applyOrder(query *AdhocPlanQuery, reverse bool) *AdhocPlanQuery { +func (p *adhocplanPager) applyOrder(query *AdhocPlanQuery) *AdhocPlanQuery { direction := p.order.Direction - if reverse { - direction = direction.reverse() + if p.reverse { + direction = direction.Reverse() } - query = query.Order(direction.orderFunc(p.order.Field.field)) + query = query.Order(p.order.Field.toTerm(direction.OrderTermOption())) if p.order.Field != DefaultAdhocPlanOrder.Field { - query = query.Order(direction.orderFunc(DefaultAdhocPlanOrder.Field.field)) + query = query.Order(DefaultAdhocPlanOrder.Field.toTerm(direction.OrderTermOption())) + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) } return query } +func (p *adhocplanPager) orderExpr(query *AdhocPlanQuery) sql.Querier { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) + } + return sql.ExprFunc(func(b *sql.Builder) { + b.Ident(p.order.Field.column).Pad().WriteString(string(direction)) + if p.order.Field != DefaultAdhocPlanOrder.Field { + b.Comma().Ident(DefaultAdhocPlanOrder.Field.column).Pad().WriteString(string(direction)) + } + }) +} + // Paginate executes the query and returns a relay based cursor connection to AdhocPlan. func (ap *AdhocPlanQuery) Paginate( ctx context.Context, after *Cursor, first *int, @@ -373,98 +295,54 @@ func (ap *AdhocPlanQuery) Paginate( if err := validateFirstLast(first, last); err != nil { return nil, err } - pager, err := newAdhocPlanPager(opts) + pager, err := newAdhocPlanPager(opts, last != nil) if err != nil { return nil, err } - if ap, err = pager.applyFilter(ap); err != nil { return nil, err } - conn := &AdhocPlanConnection{Edges: []*AdhocPlanEdge{}} - if !hasCollectedField(ctx, edgesField) || first != nil && *first == 0 || last != nil && *last == 0 { - if hasCollectedField(ctx, totalCountField) || - hasCollectedField(ctx, pageInfoField) { - count, err := ap.Count(ctx) - if err != nil { + ignoredEdges := !hasCollectedField(ctx, edgesField) + if hasCollectedField(ctx, totalCountField) || hasCollectedField(ctx, pageInfoField) { + hasPagination := after != nil || first != nil || before != nil || last != nil + if hasPagination || ignoredEdges { + if conn.TotalCount, err = ap.Clone().Count(ctx); err != nil { return nil, err } - conn.TotalCount = count - conn.PageInfo.HasNextPage = first != nil && count > 0 - conn.PageInfo.HasPreviousPage = last != nil && count > 0 - } - return conn, nil - } - - if (after != nil || first != nil || before != nil || last != nil) && hasCollectedField(ctx, totalCountField) { - count, err := ap.Clone().Count(ctx) - if err != nil { - return nil, err + conn.PageInfo.HasNextPage = first != nil && conn.TotalCount > 0 + conn.PageInfo.HasPreviousPage = last != nil && conn.TotalCount > 0 } - conn.TotalCount = count - } - - ap = pager.applyCursors(ap, after, before) - ap = pager.applyOrder(ap, last != nil) - var limit int - if first != nil { - limit = *first + 1 - } else if last != nil { - limit = *last + 1 - } - if limit > 0 { - ap = ap.Limit(limit) - } - - if field := getCollectedField(ctx, edgesField, nodeField); field != nil { - ap = ap.collectField(graphql.GetOperationContext(ctx), *field) } - - nodes, err := ap.All(ctx) - if err != nil || len(nodes) == 0 { - return conn, err + if ignoredEdges || (first != nil && *first == 0) || (last != nil && *last == 0) { + return conn, nil } - - if len(nodes) == limit { - conn.PageInfo.HasNextPage = first != nil - conn.PageInfo.HasPreviousPage = last != nil - nodes = nodes[:len(nodes)-1] + if ap, err = pager.applyCursors(ap, after, before); err != nil { + return nil, err } - - var nodeAt func(int) *AdhocPlan - if last != nil { - n := len(nodes) - 1 - nodeAt = func(i int) *AdhocPlan { - return nodes[n-i] - } - } else { - nodeAt = func(i int) *AdhocPlan { - return nodes[i] - } + if limit := paginateLimit(first, last); limit != 0 { + ap.Limit(limit) } - - conn.Edges = make([]*AdhocPlanEdge, len(nodes)) - for i := range nodes { - node := nodeAt(i) - conn.Edges[i] = &AdhocPlanEdge{ - Node: node, - Cursor: pager.toCursor(node), + if field := collectedField(ctx, edgesField, nodeField); field != nil { + if err := ap.collectField(ctx, graphql.GetOperationContext(ctx), *field, []string{edgesField, nodeField}); err != nil { + return nil, err } } - - conn.PageInfo.StartCursor = &conn.Edges[0].Cursor - conn.PageInfo.EndCursor = &conn.Edges[len(conn.Edges)-1].Cursor - if conn.TotalCount == 0 { - conn.TotalCount = len(nodes) + ap = pager.applyOrder(ap) + nodes, err := ap.All(ctx) + if err != nil { + return nil, err } - + conn.build(nodes, pager, after, first, before, last) return conn, nil } // AdhocPlanOrderField defines the ordering field of AdhocPlan. type AdhocPlanOrderField struct { - field string + // Value extracts the ordering value from the given AdhocPlan. + Value func(*AdhocPlan) (ent.Value, error) + column string // field or computed. + toTerm func(...sql.OrderTermOption) adhocplan.OrderOption toCursor func(*AdhocPlan) Cursor } @@ -476,9 +354,13 @@ type AdhocPlanOrder struct { // DefaultAdhocPlanOrder is the default ordering of AdhocPlan. var DefaultAdhocPlanOrder = &AdhocPlanOrder{ - Direction: OrderDirectionAsc, + Direction: entgql.OrderDirectionAsc, Field: &AdhocPlanOrderField{ - field: adhocplan.FieldID, + Value: func(ap *AdhocPlan) (ent.Value, error) { + return ap.ID, nil + }, + column: adhocplan.FieldID, + toTerm: adhocplan.ByID, toCursor: func(ap *AdhocPlan) Cursor { return Cursor{ID: ap.ID} }, @@ -509,8 +391,46 @@ type AgentStatusConnection struct { TotalCount int `json:"totalCount"` } +func (c *AgentStatusConnection) build(nodes []*AgentStatus, pager *agentstatusPager, after *Cursor, first *int, before *Cursor, last *int) { + c.PageInfo.HasNextPage = before != nil + c.PageInfo.HasPreviousPage = after != nil + if first != nil && *first+1 == len(nodes) { + c.PageInfo.HasNextPage = true + nodes = nodes[:len(nodes)-1] + } else if last != nil && *last+1 == len(nodes) { + c.PageInfo.HasPreviousPage = true + nodes = nodes[:len(nodes)-1] + } + var nodeAt func(int) *AgentStatus + if last != nil { + n := len(nodes) - 1 + nodeAt = func(i int) *AgentStatus { + return nodes[n-i] + } + } else { + nodeAt = func(i int) *AgentStatus { + return nodes[i] + } + } + c.Edges = make([]*AgentStatusEdge, len(nodes)) + for i := range nodes { + node := nodeAt(i) + c.Edges[i] = &AgentStatusEdge{ + Node: node, + Cursor: pager.toCursor(node), + } + } + if l := len(c.Edges); l > 0 { + c.PageInfo.StartCursor = &c.Edges[0].Cursor + c.PageInfo.EndCursor = &c.Edges[l-1].Cursor + } + if c.TotalCount == 0 { + c.TotalCount = len(nodes) + } +} + // AgentStatusPaginateOption enables pagination customization. -type AgentStatusPaginateOption func(*agentStatusPager) error +type AgentStatusPaginateOption func(*agentstatusPager) error // WithAgentStatusOrder configures pagination ordering. func WithAgentStatusOrder(order *AgentStatusOrder) AgentStatusPaginateOption { @@ -518,7 +438,7 @@ func WithAgentStatusOrder(order *AgentStatusOrder) AgentStatusPaginateOption { order = DefaultAgentStatusOrder } o := *order - return func(pager *agentStatusPager) error { + return func(pager *agentstatusPager) error { if err := o.Direction.Validate(); err != nil { return err } @@ -532,7 +452,7 @@ func WithAgentStatusOrder(order *AgentStatusOrder) AgentStatusPaginateOption { // WithAgentStatusFilter configures pagination filter. func WithAgentStatusFilter(filter func(*AgentStatusQuery) (*AgentStatusQuery, error)) AgentStatusPaginateOption { - return func(pager *agentStatusPager) error { + return func(pager *agentstatusPager) error { if filter == nil { return errors.New("AgentStatusQuery filter cannot be nil") } @@ -541,13 +461,14 @@ func WithAgentStatusFilter(filter func(*AgentStatusQuery) (*AgentStatusQuery, er } } -type agentStatusPager struct { - order *AgentStatusOrder - filter func(*AgentStatusQuery) (*AgentStatusQuery, error) +type agentstatusPager struct { + reverse bool + order *AgentStatusOrder + filter func(*AgentStatusQuery) (*AgentStatusQuery, error) } -func newAgentStatusPager(opts []AgentStatusPaginateOption) (*agentStatusPager, error) { - pager := &agentStatusPager{} +func newAgentStatusPager(opts []AgentStatusPaginateOption, reverse bool) (*agentstatusPager, error) { + pager := &agentstatusPager{reverse: reverse} for _, opt := range opts { if err := opt(pager); err != nil { return nil, err @@ -559,39 +480,59 @@ func newAgentStatusPager(opts []AgentStatusPaginateOption) (*agentStatusPager, e return pager, nil } -func (p *agentStatusPager) applyFilter(query *AgentStatusQuery) (*AgentStatusQuery, error) { +func (p *agentstatusPager) applyFilter(query *AgentStatusQuery) (*AgentStatusQuery, error) { if p.filter != nil { return p.filter(query) } return query, nil } -func (p *agentStatusPager) toCursor(as *AgentStatus) Cursor { +func (p *agentstatusPager) toCursor(as *AgentStatus) Cursor { return p.order.Field.toCursor(as) } -func (p *agentStatusPager) applyCursors(query *AgentStatusQuery, after, before *Cursor) *AgentStatusQuery { - for _, predicate := range cursorsToPredicates( - p.order.Direction, after, before, - p.order.Field.field, DefaultAgentStatusOrder.Field.field, - ) { +func (p *agentstatusPager) applyCursors(query *AgentStatusQuery, after, before *Cursor) (*AgentStatusQuery, error) { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + for _, predicate := range entgql.CursorsPredicate(after, before, DefaultAgentStatusOrder.Field.column, p.order.Field.column, direction) { query = query.Where(predicate) } - return query + return query, nil } -func (p *agentStatusPager) applyOrder(query *AgentStatusQuery, reverse bool) *AgentStatusQuery { +func (p *agentstatusPager) applyOrder(query *AgentStatusQuery) *AgentStatusQuery { direction := p.order.Direction - if reverse { - direction = direction.reverse() + if p.reverse { + direction = direction.Reverse() } - query = query.Order(direction.orderFunc(p.order.Field.field)) + query = query.Order(p.order.Field.toTerm(direction.OrderTermOption())) if p.order.Field != DefaultAgentStatusOrder.Field { - query = query.Order(direction.orderFunc(DefaultAgentStatusOrder.Field.field)) + query = query.Order(DefaultAgentStatusOrder.Field.toTerm(direction.OrderTermOption())) + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) } return query } +func (p *agentstatusPager) orderExpr(query *AgentStatusQuery) sql.Querier { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) + } + return sql.ExprFunc(func(b *sql.Builder) { + b.Ident(p.order.Field.column).Pad().WriteString(string(direction)) + if p.order.Field != DefaultAgentStatusOrder.Field { + b.Comma().Ident(DefaultAgentStatusOrder.Field.column).Pad().WriteString(string(direction)) + } + }) +} + // Paginate executes the query and returns a relay based cursor connection to AgentStatus. func (as *AgentStatusQuery) Paginate( ctx context.Context, after *Cursor, first *int, @@ -600,98 +541,54 @@ func (as *AgentStatusQuery) Paginate( if err := validateFirstLast(first, last); err != nil { return nil, err } - pager, err := newAgentStatusPager(opts) + pager, err := newAgentStatusPager(opts, last != nil) if err != nil { return nil, err } - if as, err = pager.applyFilter(as); err != nil { return nil, err } - conn := &AgentStatusConnection{Edges: []*AgentStatusEdge{}} - if !hasCollectedField(ctx, edgesField) || first != nil && *first == 0 || last != nil && *last == 0 { - if hasCollectedField(ctx, totalCountField) || - hasCollectedField(ctx, pageInfoField) { - count, err := as.Count(ctx) - if err != nil { + ignoredEdges := !hasCollectedField(ctx, edgesField) + if hasCollectedField(ctx, totalCountField) || hasCollectedField(ctx, pageInfoField) { + hasPagination := after != nil || first != nil || before != nil || last != nil + if hasPagination || ignoredEdges { + if conn.TotalCount, err = as.Clone().Count(ctx); err != nil { return nil, err } - conn.TotalCount = count - conn.PageInfo.HasNextPage = first != nil && count > 0 - conn.PageInfo.HasPreviousPage = last != nil && count > 0 - } - return conn, nil - } - - if (after != nil || first != nil || before != nil || last != nil) && hasCollectedField(ctx, totalCountField) { - count, err := as.Clone().Count(ctx) - if err != nil { - return nil, err + conn.PageInfo.HasNextPage = first != nil && conn.TotalCount > 0 + conn.PageInfo.HasPreviousPage = last != nil && conn.TotalCount > 0 } - conn.TotalCount = count - } - - as = pager.applyCursors(as, after, before) - as = pager.applyOrder(as, last != nil) - var limit int - if first != nil { - limit = *first + 1 - } else if last != nil { - limit = *last + 1 - } - if limit > 0 { - as = as.Limit(limit) - } - - if field := getCollectedField(ctx, edgesField, nodeField); field != nil { - as = as.collectField(graphql.GetOperationContext(ctx), *field) } - - nodes, err := as.All(ctx) - if err != nil || len(nodes) == 0 { - return conn, err + if ignoredEdges || (first != nil && *first == 0) || (last != nil && *last == 0) { + return conn, nil } - - if len(nodes) == limit { - conn.PageInfo.HasNextPage = first != nil - conn.PageInfo.HasPreviousPage = last != nil - nodes = nodes[:len(nodes)-1] + if as, err = pager.applyCursors(as, after, before); err != nil { + return nil, err } - - var nodeAt func(int) *AgentStatus - if last != nil { - n := len(nodes) - 1 - nodeAt = func(i int) *AgentStatus { - return nodes[n-i] - } - } else { - nodeAt = func(i int) *AgentStatus { - return nodes[i] - } + if limit := paginateLimit(first, last); limit != 0 { + as.Limit(limit) } - - conn.Edges = make([]*AgentStatusEdge, len(nodes)) - for i := range nodes { - node := nodeAt(i) - conn.Edges[i] = &AgentStatusEdge{ - Node: node, - Cursor: pager.toCursor(node), + if field := collectedField(ctx, edgesField, nodeField); field != nil { + if err := as.collectField(ctx, graphql.GetOperationContext(ctx), *field, []string{edgesField, nodeField}); err != nil { + return nil, err } } - - conn.PageInfo.StartCursor = &conn.Edges[0].Cursor - conn.PageInfo.EndCursor = &conn.Edges[len(conn.Edges)-1].Cursor - if conn.TotalCount == 0 { - conn.TotalCount = len(nodes) + as = pager.applyOrder(as) + nodes, err := as.All(ctx) + if err != nil { + return nil, err } - + conn.build(nodes, pager, after, first, before, last) return conn, nil } // AgentStatusOrderField defines the ordering field of AgentStatus. type AgentStatusOrderField struct { - field string + // Value extracts the ordering value from the given AgentStatus. + Value func(*AgentStatus) (ent.Value, error) + column string // field or computed. + toTerm func(...sql.OrderTermOption) agentstatus.OrderOption toCursor func(*AgentStatus) Cursor } @@ -703,9 +600,13 @@ type AgentStatusOrder struct { // DefaultAgentStatusOrder is the default ordering of AgentStatus. var DefaultAgentStatusOrder = &AgentStatusOrder{ - Direction: OrderDirectionAsc, + Direction: entgql.OrderDirectionAsc, Field: &AgentStatusOrderField{ - field: agentstatus.FieldID, + Value: func(as *AgentStatus) (ent.Value, error) { + return as.ID, nil + }, + column: agentstatus.FieldID, + toTerm: agentstatus.ByID, toCursor: func(as *AgentStatus) Cursor { return Cursor{ID: as.ID} }, @@ -736,8 +637,46 @@ type AgentTaskConnection struct { TotalCount int `json:"totalCount"` } +func (c *AgentTaskConnection) build(nodes []*AgentTask, pager *agenttaskPager, after *Cursor, first *int, before *Cursor, last *int) { + c.PageInfo.HasNextPage = before != nil + c.PageInfo.HasPreviousPage = after != nil + if first != nil && *first+1 == len(nodes) { + c.PageInfo.HasNextPage = true + nodes = nodes[:len(nodes)-1] + } else if last != nil && *last+1 == len(nodes) { + c.PageInfo.HasPreviousPage = true + nodes = nodes[:len(nodes)-1] + } + var nodeAt func(int) *AgentTask + if last != nil { + n := len(nodes) - 1 + nodeAt = func(i int) *AgentTask { + return nodes[n-i] + } + } else { + nodeAt = func(i int) *AgentTask { + return nodes[i] + } + } + c.Edges = make([]*AgentTaskEdge, len(nodes)) + for i := range nodes { + node := nodeAt(i) + c.Edges[i] = &AgentTaskEdge{ + Node: node, + Cursor: pager.toCursor(node), + } + } + if l := len(c.Edges); l > 0 { + c.PageInfo.StartCursor = &c.Edges[0].Cursor + c.PageInfo.EndCursor = &c.Edges[l-1].Cursor + } + if c.TotalCount == 0 { + c.TotalCount = len(nodes) + } +} + // AgentTaskPaginateOption enables pagination customization. -type AgentTaskPaginateOption func(*agentTaskPager) error +type AgentTaskPaginateOption func(*agenttaskPager) error // WithAgentTaskOrder configures pagination ordering. func WithAgentTaskOrder(order *AgentTaskOrder) AgentTaskPaginateOption { @@ -745,7 +684,7 @@ func WithAgentTaskOrder(order *AgentTaskOrder) AgentTaskPaginateOption { order = DefaultAgentTaskOrder } o := *order - return func(pager *agentTaskPager) error { + return func(pager *agenttaskPager) error { if err := o.Direction.Validate(); err != nil { return err } @@ -759,7 +698,7 @@ func WithAgentTaskOrder(order *AgentTaskOrder) AgentTaskPaginateOption { // WithAgentTaskFilter configures pagination filter. func WithAgentTaskFilter(filter func(*AgentTaskQuery) (*AgentTaskQuery, error)) AgentTaskPaginateOption { - return func(pager *agentTaskPager) error { + return func(pager *agenttaskPager) error { if filter == nil { return errors.New("AgentTaskQuery filter cannot be nil") } @@ -768,13 +707,14 @@ func WithAgentTaskFilter(filter func(*AgentTaskQuery) (*AgentTaskQuery, error)) } } -type agentTaskPager struct { - order *AgentTaskOrder - filter func(*AgentTaskQuery) (*AgentTaskQuery, error) +type agenttaskPager struct { + reverse bool + order *AgentTaskOrder + filter func(*AgentTaskQuery) (*AgentTaskQuery, error) } -func newAgentTaskPager(opts []AgentTaskPaginateOption) (*agentTaskPager, error) { - pager := &agentTaskPager{} +func newAgentTaskPager(opts []AgentTaskPaginateOption, reverse bool) (*agenttaskPager, error) { + pager := &agenttaskPager{reverse: reverse} for _, opt := range opts { if err := opt(pager); err != nil { return nil, err @@ -786,39 +726,59 @@ func newAgentTaskPager(opts []AgentTaskPaginateOption) (*agentTaskPager, error) return pager, nil } -func (p *agentTaskPager) applyFilter(query *AgentTaskQuery) (*AgentTaskQuery, error) { +func (p *agenttaskPager) applyFilter(query *AgentTaskQuery) (*AgentTaskQuery, error) { if p.filter != nil { return p.filter(query) } return query, nil } -func (p *agentTaskPager) toCursor(at *AgentTask) Cursor { +func (p *agenttaskPager) toCursor(at *AgentTask) Cursor { return p.order.Field.toCursor(at) } -func (p *agentTaskPager) applyCursors(query *AgentTaskQuery, after, before *Cursor) *AgentTaskQuery { - for _, predicate := range cursorsToPredicates( - p.order.Direction, after, before, - p.order.Field.field, DefaultAgentTaskOrder.Field.field, - ) { +func (p *agenttaskPager) applyCursors(query *AgentTaskQuery, after, before *Cursor) (*AgentTaskQuery, error) { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + for _, predicate := range entgql.CursorsPredicate(after, before, DefaultAgentTaskOrder.Field.column, p.order.Field.column, direction) { query = query.Where(predicate) } - return query + return query, nil } -func (p *agentTaskPager) applyOrder(query *AgentTaskQuery, reverse bool) *AgentTaskQuery { +func (p *agenttaskPager) applyOrder(query *AgentTaskQuery) *AgentTaskQuery { direction := p.order.Direction - if reverse { - direction = direction.reverse() + if p.reverse { + direction = direction.Reverse() } - query = query.Order(direction.orderFunc(p.order.Field.field)) + query = query.Order(p.order.Field.toTerm(direction.OrderTermOption())) if p.order.Field != DefaultAgentTaskOrder.Field { - query = query.Order(direction.orderFunc(DefaultAgentTaskOrder.Field.field)) + query = query.Order(DefaultAgentTaskOrder.Field.toTerm(direction.OrderTermOption())) + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) } return query } +func (p *agenttaskPager) orderExpr(query *AgentTaskQuery) sql.Querier { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) + } + return sql.ExprFunc(func(b *sql.Builder) { + b.Ident(p.order.Field.column).Pad().WriteString(string(direction)) + if p.order.Field != DefaultAgentTaskOrder.Field { + b.Comma().Ident(DefaultAgentTaskOrder.Field.column).Pad().WriteString(string(direction)) + } + }) +} + // Paginate executes the query and returns a relay based cursor connection to AgentTask. func (at *AgentTaskQuery) Paginate( ctx context.Context, after *Cursor, first *int, @@ -827,98 +787,54 @@ func (at *AgentTaskQuery) Paginate( if err := validateFirstLast(first, last); err != nil { return nil, err } - pager, err := newAgentTaskPager(opts) + pager, err := newAgentTaskPager(opts, last != nil) if err != nil { return nil, err } - if at, err = pager.applyFilter(at); err != nil { return nil, err } - conn := &AgentTaskConnection{Edges: []*AgentTaskEdge{}} - if !hasCollectedField(ctx, edgesField) || first != nil && *first == 0 || last != nil && *last == 0 { - if hasCollectedField(ctx, totalCountField) || - hasCollectedField(ctx, pageInfoField) { - count, err := at.Count(ctx) - if err != nil { + ignoredEdges := !hasCollectedField(ctx, edgesField) + if hasCollectedField(ctx, totalCountField) || hasCollectedField(ctx, pageInfoField) { + hasPagination := after != nil || first != nil || before != nil || last != nil + if hasPagination || ignoredEdges { + if conn.TotalCount, err = at.Clone().Count(ctx); err != nil { return nil, err } - conn.TotalCount = count - conn.PageInfo.HasNextPage = first != nil && count > 0 - conn.PageInfo.HasPreviousPage = last != nil && count > 0 + conn.PageInfo.HasNextPage = first != nil && conn.TotalCount > 0 + conn.PageInfo.HasPreviousPage = last != nil && conn.TotalCount > 0 } + } + if ignoredEdges || (first != nil && *first == 0) || (last != nil && *last == 0) { return conn, nil } - - if (after != nil || first != nil || before != nil || last != nil) && hasCollectedField(ctx, totalCountField) { - count, err := at.Clone().Count(ctx) - if err != nil { + if at, err = pager.applyCursors(at, after, before); err != nil { + return nil, err + } + if limit := paginateLimit(first, last); limit != 0 { + at.Limit(limit) + } + if field := collectedField(ctx, edgesField, nodeField); field != nil { + if err := at.collectField(ctx, graphql.GetOperationContext(ctx), *field, []string{edgesField, nodeField}); err != nil { return nil, err } - conn.TotalCount = count } - - at = pager.applyCursors(at, after, before) - at = pager.applyOrder(at, last != nil) - var limit int - if first != nil { - limit = *first + 1 - } else if last != nil { - limit = *last + 1 - } - if limit > 0 { - at = at.Limit(limit) - } - - if field := getCollectedField(ctx, edgesField, nodeField); field != nil { - at = at.collectField(graphql.GetOperationContext(ctx), *field) - } - - nodes, err := at.All(ctx) - if err != nil || len(nodes) == 0 { - return conn, err - } - - if len(nodes) == limit { - conn.PageInfo.HasNextPage = first != nil - conn.PageInfo.HasPreviousPage = last != nil - nodes = nodes[:len(nodes)-1] - } - - var nodeAt func(int) *AgentTask - if last != nil { - n := len(nodes) - 1 - nodeAt = func(i int) *AgentTask { - return nodes[n-i] - } - } else { - nodeAt = func(i int) *AgentTask { - return nodes[i] - } - } - - conn.Edges = make([]*AgentTaskEdge, len(nodes)) - for i := range nodes { - node := nodeAt(i) - conn.Edges[i] = &AgentTaskEdge{ - Node: node, - Cursor: pager.toCursor(node), - } - } - - conn.PageInfo.StartCursor = &conn.Edges[0].Cursor - conn.PageInfo.EndCursor = &conn.Edges[len(conn.Edges)-1].Cursor - if conn.TotalCount == 0 { - conn.TotalCount = len(nodes) - } - - return conn, nil -} + at = pager.applyOrder(at) + nodes, err := at.All(ctx) + if err != nil { + return nil, err + } + conn.build(nodes, pager, after, first, before, last) + return conn, nil +} // AgentTaskOrderField defines the ordering field of AgentTask. type AgentTaskOrderField struct { - field string + // Value extracts the ordering value from the given AgentTask. + Value func(*AgentTask) (ent.Value, error) + column string // field or computed. + toTerm func(...sql.OrderTermOption) agenttask.OrderOption toCursor func(*AgentTask) Cursor } @@ -930,9 +846,13 @@ type AgentTaskOrder struct { // DefaultAgentTaskOrder is the default ordering of AgentTask. var DefaultAgentTaskOrder = &AgentTaskOrder{ - Direction: OrderDirectionAsc, + Direction: entgql.OrderDirectionAsc, Field: &AgentTaskOrderField{ - field: agenttask.FieldID, + Value: func(at *AgentTask) (ent.Value, error) { + return at.ID, nil + }, + column: agenttask.FieldID, + toTerm: agenttask.ByID, toCursor: func(at *AgentTask) Cursor { return Cursor{ID: at.ID} }, @@ -963,6 +883,44 @@ type AnsibleConnection struct { TotalCount int `json:"totalCount"` } +func (c *AnsibleConnection) build(nodes []*Ansible, pager *ansiblePager, after *Cursor, first *int, before *Cursor, last *int) { + c.PageInfo.HasNextPage = before != nil + c.PageInfo.HasPreviousPage = after != nil + if first != nil && *first+1 == len(nodes) { + c.PageInfo.HasNextPage = true + nodes = nodes[:len(nodes)-1] + } else if last != nil && *last+1 == len(nodes) { + c.PageInfo.HasPreviousPage = true + nodes = nodes[:len(nodes)-1] + } + var nodeAt func(int) *Ansible + if last != nil { + n := len(nodes) - 1 + nodeAt = func(i int) *Ansible { + return nodes[n-i] + } + } else { + nodeAt = func(i int) *Ansible { + return nodes[i] + } + } + c.Edges = make([]*AnsibleEdge, len(nodes)) + for i := range nodes { + node := nodeAt(i) + c.Edges[i] = &AnsibleEdge{ + Node: node, + Cursor: pager.toCursor(node), + } + } + if l := len(c.Edges); l > 0 { + c.PageInfo.StartCursor = &c.Edges[0].Cursor + c.PageInfo.EndCursor = &c.Edges[l-1].Cursor + } + if c.TotalCount == 0 { + c.TotalCount = len(nodes) + } +} + // AnsiblePaginateOption enables pagination customization. type AnsiblePaginateOption func(*ansiblePager) error @@ -996,12 +954,13 @@ func WithAnsibleFilter(filter func(*AnsibleQuery) (*AnsibleQuery, error)) Ansibl } type ansiblePager struct { - order *AnsibleOrder - filter func(*AnsibleQuery) (*AnsibleQuery, error) + reverse bool + order *AnsibleOrder + filter func(*AnsibleQuery) (*AnsibleQuery, error) } -func newAnsiblePager(opts []AnsiblePaginateOption) (*ansiblePager, error) { - pager := &ansiblePager{} +func newAnsiblePager(opts []AnsiblePaginateOption, reverse bool) (*ansiblePager, error) { + pager := &ansiblePager{reverse: reverse} for _, opt := range opts { if err := opt(pager); err != nil { return nil, err @@ -1024,28 +983,48 @@ func (p *ansiblePager) toCursor(a *Ansible) Cursor { return p.order.Field.toCursor(a) } -func (p *ansiblePager) applyCursors(query *AnsibleQuery, after, before *Cursor) *AnsibleQuery { - for _, predicate := range cursorsToPredicates( - p.order.Direction, after, before, - p.order.Field.field, DefaultAnsibleOrder.Field.field, - ) { +func (p *ansiblePager) applyCursors(query *AnsibleQuery, after, before *Cursor) (*AnsibleQuery, error) { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + for _, predicate := range entgql.CursorsPredicate(after, before, DefaultAnsibleOrder.Field.column, p.order.Field.column, direction) { query = query.Where(predicate) } - return query + return query, nil } -func (p *ansiblePager) applyOrder(query *AnsibleQuery, reverse bool) *AnsibleQuery { +func (p *ansiblePager) applyOrder(query *AnsibleQuery) *AnsibleQuery { direction := p.order.Direction - if reverse { - direction = direction.reverse() + if p.reverse { + direction = direction.Reverse() } - query = query.Order(direction.orderFunc(p.order.Field.field)) + query = query.Order(p.order.Field.toTerm(direction.OrderTermOption())) if p.order.Field != DefaultAnsibleOrder.Field { - query = query.Order(direction.orderFunc(DefaultAnsibleOrder.Field.field)) + query = query.Order(DefaultAnsibleOrder.Field.toTerm(direction.OrderTermOption())) + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) } return query } +func (p *ansiblePager) orderExpr(query *AnsibleQuery) sql.Querier { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) + } + return sql.ExprFunc(func(b *sql.Builder) { + b.Ident(p.order.Field.column).Pad().WriteString(string(direction)) + if p.order.Field != DefaultAnsibleOrder.Field { + b.Comma().Ident(DefaultAnsibleOrder.Field.column).Pad().WriteString(string(direction)) + } + }) +} + // Paginate executes the query and returns a relay based cursor connection to Ansible. func (a *AnsibleQuery) Paginate( ctx context.Context, after *Cursor, first *int, @@ -1054,98 +1033,54 @@ func (a *AnsibleQuery) Paginate( if err := validateFirstLast(first, last); err != nil { return nil, err } - pager, err := newAnsiblePager(opts) + pager, err := newAnsiblePager(opts, last != nil) if err != nil { return nil, err } - if a, err = pager.applyFilter(a); err != nil { return nil, err } - conn := &AnsibleConnection{Edges: []*AnsibleEdge{}} - if !hasCollectedField(ctx, edgesField) || first != nil && *first == 0 || last != nil && *last == 0 { - if hasCollectedField(ctx, totalCountField) || - hasCollectedField(ctx, pageInfoField) { - count, err := a.Count(ctx) - if err != nil { + ignoredEdges := !hasCollectedField(ctx, edgesField) + if hasCollectedField(ctx, totalCountField) || hasCollectedField(ctx, pageInfoField) { + hasPagination := after != nil || first != nil || before != nil || last != nil + if hasPagination || ignoredEdges { + if conn.TotalCount, err = a.Clone().Count(ctx); err != nil { return nil, err } - conn.TotalCount = count - conn.PageInfo.HasNextPage = first != nil && count > 0 - conn.PageInfo.HasPreviousPage = last != nil && count > 0 - } - return conn, nil - } - - if (after != nil || first != nil || before != nil || last != nil) && hasCollectedField(ctx, totalCountField) { - count, err := a.Clone().Count(ctx) - if err != nil { - return nil, err + conn.PageInfo.HasNextPage = first != nil && conn.TotalCount > 0 + conn.PageInfo.HasPreviousPage = last != nil && conn.TotalCount > 0 } - conn.TotalCount = count - } - - a = pager.applyCursors(a, after, before) - a = pager.applyOrder(a, last != nil) - var limit int - if first != nil { - limit = *first + 1 - } else if last != nil { - limit = *last + 1 } - if limit > 0 { - a = a.Limit(limit) - } - - if field := getCollectedField(ctx, edgesField, nodeField); field != nil { - a = a.collectField(graphql.GetOperationContext(ctx), *field) - } - - nodes, err := a.All(ctx) - if err != nil || len(nodes) == 0 { - return conn, err + if ignoredEdges || (first != nil && *first == 0) || (last != nil && *last == 0) { + return conn, nil } - - if len(nodes) == limit { - conn.PageInfo.HasNextPage = first != nil - conn.PageInfo.HasPreviousPage = last != nil - nodes = nodes[:len(nodes)-1] + if a, err = pager.applyCursors(a, after, before); err != nil { + return nil, err } - - var nodeAt func(int) *Ansible - if last != nil { - n := len(nodes) - 1 - nodeAt = func(i int) *Ansible { - return nodes[n-i] - } - } else { - nodeAt = func(i int) *Ansible { - return nodes[i] - } + if limit := paginateLimit(first, last); limit != 0 { + a.Limit(limit) } - - conn.Edges = make([]*AnsibleEdge, len(nodes)) - for i := range nodes { - node := nodeAt(i) - conn.Edges[i] = &AnsibleEdge{ - Node: node, - Cursor: pager.toCursor(node), + if field := collectedField(ctx, edgesField, nodeField); field != nil { + if err := a.collectField(ctx, graphql.GetOperationContext(ctx), *field, []string{edgesField, nodeField}); err != nil { + return nil, err } } - - conn.PageInfo.StartCursor = &conn.Edges[0].Cursor - conn.PageInfo.EndCursor = &conn.Edges[len(conn.Edges)-1].Cursor - if conn.TotalCount == 0 { - conn.TotalCount = len(nodes) + a = pager.applyOrder(a) + nodes, err := a.All(ctx) + if err != nil { + return nil, err } - + conn.build(nodes, pager, after, first, before, last) return conn, nil } // AnsibleOrderField defines the ordering field of Ansible. type AnsibleOrderField struct { - field string + // Value extracts the ordering value from the given Ansible. + Value func(*Ansible) (ent.Value, error) + column string // field or computed. + toTerm func(...sql.OrderTermOption) ansible.OrderOption toCursor func(*Ansible) Cursor } @@ -1157,9 +1092,13 @@ type AnsibleOrder struct { // DefaultAnsibleOrder is the default ordering of Ansible. var DefaultAnsibleOrder = &AnsibleOrder{ - Direction: OrderDirectionAsc, + Direction: entgql.OrderDirectionAsc, Field: &AnsibleOrderField{ - field: ansible.FieldID, + Value: func(a *Ansible) (ent.Value, error) { + return a.ID, nil + }, + column: ansible.FieldID, + toTerm: ansible.ByID, toCursor: func(a *Ansible) Cursor { return Cursor{ID: a.ID} }, @@ -1190,8 +1129,46 @@ type AuthUserConnection struct { TotalCount int `json:"totalCount"` } +func (c *AuthUserConnection) build(nodes []*AuthUser, pager *authuserPager, after *Cursor, first *int, before *Cursor, last *int) { + c.PageInfo.HasNextPage = before != nil + c.PageInfo.HasPreviousPage = after != nil + if first != nil && *first+1 == len(nodes) { + c.PageInfo.HasNextPage = true + nodes = nodes[:len(nodes)-1] + } else if last != nil && *last+1 == len(nodes) { + c.PageInfo.HasPreviousPage = true + nodes = nodes[:len(nodes)-1] + } + var nodeAt func(int) *AuthUser + if last != nil { + n := len(nodes) - 1 + nodeAt = func(i int) *AuthUser { + return nodes[n-i] + } + } else { + nodeAt = func(i int) *AuthUser { + return nodes[i] + } + } + c.Edges = make([]*AuthUserEdge, len(nodes)) + for i := range nodes { + node := nodeAt(i) + c.Edges[i] = &AuthUserEdge{ + Node: node, + Cursor: pager.toCursor(node), + } + } + if l := len(c.Edges); l > 0 { + c.PageInfo.StartCursor = &c.Edges[0].Cursor + c.PageInfo.EndCursor = &c.Edges[l-1].Cursor + } + if c.TotalCount == 0 { + c.TotalCount = len(nodes) + } +} + // AuthUserPaginateOption enables pagination customization. -type AuthUserPaginateOption func(*authUserPager) error +type AuthUserPaginateOption func(*authuserPager) error // WithAuthUserOrder configures pagination ordering. func WithAuthUserOrder(order *AuthUserOrder) AuthUserPaginateOption { @@ -1199,7 +1176,7 @@ func WithAuthUserOrder(order *AuthUserOrder) AuthUserPaginateOption { order = DefaultAuthUserOrder } o := *order - return func(pager *authUserPager) error { + return func(pager *authuserPager) error { if err := o.Direction.Validate(); err != nil { return err } @@ -1213,7 +1190,7 @@ func WithAuthUserOrder(order *AuthUserOrder) AuthUserPaginateOption { // WithAuthUserFilter configures pagination filter. func WithAuthUserFilter(filter func(*AuthUserQuery) (*AuthUserQuery, error)) AuthUserPaginateOption { - return func(pager *authUserPager) error { + return func(pager *authuserPager) error { if filter == nil { return errors.New("AuthUserQuery filter cannot be nil") } @@ -1222,13 +1199,14 @@ func WithAuthUserFilter(filter func(*AuthUserQuery) (*AuthUserQuery, error)) Aut } } -type authUserPager struct { - order *AuthUserOrder - filter func(*AuthUserQuery) (*AuthUserQuery, error) +type authuserPager struct { + reverse bool + order *AuthUserOrder + filter func(*AuthUserQuery) (*AuthUserQuery, error) } -func newAuthUserPager(opts []AuthUserPaginateOption) (*authUserPager, error) { - pager := &authUserPager{} +func newAuthUserPager(opts []AuthUserPaginateOption, reverse bool) (*authuserPager, error) { + pager := &authuserPager{reverse: reverse} for _, opt := range opts { if err := opt(pager); err != nil { return nil, err @@ -1240,39 +1218,59 @@ func newAuthUserPager(opts []AuthUserPaginateOption) (*authUserPager, error) { return pager, nil } -func (p *authUserPager) applyFilter(query *AuthUserQuery) (*AuthUserQuery, error) { +func (p *authuserPager) applyFilter(query *AuthUserQuery) (*AuthUserQuery, error) { if p.filter != nil { return p.filter(query) } return query, nil } -func (p *authUserPager) toCursor(au *AuthUser) Cursor { +func (p *authuserPager) toCursor(au *AuthUser) Cursor { return p.order.Field.toCursor(au) } -func (p *authUserPager) applyCursors(query *AuthUserQuery, after, before *Cursor) *AuthUserQuery { - for _, predicate := range cursorsToPredicates( - p.order.Direction, after, before, - p.order.Field.field, DefaultAuthUserOrder.Field.field, - ) { +func (p *authuserPager) applyCursors(query *AuthUserQuery, after, before *Cursor) (*AuthUserQuery, error) { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + for _, predicate := range entgql.CursorsPredicate(after, before, DefaultAuthUserOrder.Field.column, p.order.Field.column, direction) { query = query.Where(predicate) } - return query + return query, nil } -func (p *authUserPager) applyOrder(query *AuthUserQuery, reverse bool) *AuthUserQuery { +func (p *authuserPager) applyOrder(query *AuthUserQuery) *AuthUserQuery { direction := p.order.Direction - if reverse { - direction = direction.reverse() + if p.reverse { + direction = direction.Reverse() } - query = query.Order(direction.orderFunc(p.order.Field.field)) + query = query.Order(p.order.Field.toTerm(direction.OrderTermOption())) if p.order.Field != DefaultAuthUserOrder.Field { - query = query.Order(direction.orderFunc(DefaultAuthUserOrder.Field.field)) + query = query.Order(DefaultAuthUserOrder.Field.toTerm(direction.OrderTermOption())) + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) } return query } +func (p *authuserPager) orderExpr(query *AuthUserQuery) sql.Querier { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) + } + return sql.ExprFunc(func(b *sql.Builder) { + b.Ident(p.order.Field.column).Pad().WriteString(string(direction)) + if p.order.Field != DefaultAuthUserOrder.Field { + b.Comma().Ident(DefaultAuthUserOrder.Field.column).Pad().WriteString(string(direction)) + } + }) +} + // Paginate executes the query and returns a relay based cursor connection to AuthUser. func (au *AuthUserQuery) Paginate( ctx context.Context, after *Cursor, first *int, @@ -1281,98 +1279,54 @@ func (au *AuthUserQuery) Paginate( if err := validateFirstLast(first, last); err != nil { return nil, err } - pager, err := newAuthUserPager(opts) + pager, err := newAuthUserPager(opts, last != nil) if err != nil { return nil, err } - if au, err = pager.applyFilter(au); err != nil { return nil, err } - conn := &AuthUserConnection{Edges: []*AuthUserEdge{}} - if !hasCollectedField(ctx, edgesField) || first != nil && *first == 0 || last != nil && *last == 0 { - if hasCollectedField(ctx, totalCountField) || - hasCollectedField(ctx, pageInfoField) { - count, err := au.Count(ctx) - if err != nil { + ignoredEdges := !hasCollectedField(ctx, edgesField) + if hasCollectedField(ctx, totalCountField) || hasCollectedField(ctx, pageInfoField) { + hasPagination := after != nil || first != nil || before != nil || last != nil + if hasPagination || ignoredEdges { + if conn.TotalCount, err = au.Clone().Count(ctx); err != nil { return nil, err } - conn.TotalCount = count - conn.PageInfo.HasNextPage = first != nil && count > 0 - conn.PageInfo.HasPreviousPage = last != nil && count > 0 - } - return conn, nil - } - - if (after != nil || first != nil || before != nil || last != nil) && hasCollectedField(ctx, totalCountField) { - count, err := au.Clone().Count(ctx) - if err != nil { - return nil, err + conn.PageInfo.HasNextPage = first != nil && conn.TotalCount > 0 + conn.PageInfo.HasPreviousPage = last != nil && conn.TotalCount > 0 } - conn.TotalCount = count - } - - au = pager.applyCursors(au, after, before) - au = pager.applyOrder(au, last != nil) - var limit int - if first != nil { - limit = *first + 1 - } else if last != nil { - limit = *last + 1 - } - if limit > 0 { - au = au.Limit(limit) - } - - if field := getCollectedField(ctx, edgesField, nodeField); field != nil { - au = au.collectField(graphql.GetOperationContext(ctx), *field) } - - nodes, err := au.All(ctx) - if err != nil || len(nodes) == 0 { - return conn, err + if ignoredEdges || (first != nil && *first == 0) || (last != nil && *last == 0) { + return conn, nil } - - if len(nodes) == limit { - conn.PageInfo.HasNextPage = first != nil - conn.PageInfo.HasPreviousPage = last != nil - nodes = nodes[:len(nodes)-1] + if au, err = pager.applyCursors(au, after, before); err != nil { + return nil, err } - - var nodeAt func(int) *AuthUser - if last != nil { - n := len(nodes) - 1 - nodeAt = func(i int) *AuthUser { - return nodes[n-i] - } - } else { - nodeAt = func(i int) *AuthUser { - return nodes[i] - } + if limit := paginateLimit(first, last); limit != 0 { + au.Limit(limit) } - - conn.Edges = make([]*AuthUserEdge, len(nodes)) - for i := range nodes { - node := nodeAt(i) - conn.Edges[i] = &AuthUserEdge{ - Node: node, - Cursor: pager.toCursor(node), + if field := collectedField(ctx, edgesField, nodeField); field != nil { + if err := au.collectField(ctx, graphql.GetOperationContext(ctx), *field, []string{edgesField, nodeField}); err != nil { + return nil, err } } - - conn.PageInfo.StartCursor = &conn.Edges[0].Cursor - conn.PageInfo.EndCursor = &conn.Edges[len(conn.Edges)-1].Cursor - if conn.TotalCount == 0 { - conn.TotalCount = len(nodes) + au = pager.applyOrder(au) + nodes, err := au.All(ctx) + if err != nil { + return nil, err } - + conn.build(nodes, pager, after, first, before, last) return conn, nil } // AuthUserOrderField defines the ordering field of AuthUser. type AuthUserOrderField struct { - field string + // Value extracts the ordering value from the given AuthUser. + Value func(*AuthUser) (ent.Value, error) + column string // field or computed. + toTerm func(...sql.OrderTermOption) authuser.OrderOption toCursor func(*AuthUser) Cursor } @@ -1384,9 +1338,13 @@ type AuthUserOrder struct { // DefaultAuthUserOrder is the default ordering of AuthUser. var DefaultAuthUserOrder = &AuthUserOrder{ - Direction: OrderDirectionAsc, + Direction: entgql.OrderDirectionAsc, Field: &AuthUserOrderField{ - field: authuser.FieldID, + Value: func(au *AuthUser) (ent.Value, error) { + return au.ID, nil + }, + column: authuser.FieldID, + toTerm: authuser.ByID, toCursor: func(au *AuthUser) Cursor { return Cursor{ID: au.ID} }, @@ -1417,6 +1375,44 @@ type BuildConnection struct { TotalCount int `json:"totalCount"` } +func (c *BuildConnection) build(nodes []*Build, pager *buildPager, after *Cursor, first *int, before *Cursor, last *int) { + c.PageInfo.HasNextPage = before != nil + c.PageInfo.HasPreviousPage = after != nil + if first != nil && *first+1 == len(nodes) { + c.PageInfo.HasNextPage = true + nodes = nodes[:len(nodes)-1] + } else if last != nil && *last+1 == len(nodes) { + c.PageInfo.HasPreviousPage = true + nodes = nodes[:len(nodes)-1] + } + var nodeAt func(int) *Build + if last != nil { + n := len(nodes) - 1 + nodeAt = func(i int) *Build { + return nodes[n-i] + } + } else { + nodeAt = func(i int) *Build { + return nodes[i] + } + } + c.Edges = make([]*BuildEdge, len(nodes)) + for i := range nodes { + node := nodeAt(i) + c.Edges[i] = &BuildEdge{ + Node: node, + Cursor: pager.toCursor(node), + } + } + if l := len(c.Edges); l > 0 { + c.PageInfo.StartCursor = &c.Edges[0].Cursor + c.PageInfo.EndCursor = &c.Edges[l-1].Cursor + } + if c.TotalCount == 0 { + c.TotalCount = len(nodes) + } +} + // BuildPaginateOption enables pagination customization. type BuildPaginateOption func(*buildPager) error @@ -1450,12 +1446,13 @@ func WithBuildFilter(filter func(*BuildQuery) (*BuildQuery, error)) BuildPaginat } type buildPager struct { - order *BuildOrder - filter func(*BuildQuery) (*BuildQuery, error) + reverse bool + order *BuildOrder + filter func(*BuildQuery) (*BuildQuery, error) } -func newBuildPager(opts []BuildPaginateOption) (*buildPager, error) { - pager := &buildPager{} +func newBuildPager(opts []BuildPaginateOption, reverse bool) (*buildPager, error) { + pager := &buildPager{reverse: reverse} for _, opt := range opts { if err := opt(pager); err != nil { return nil, err @@ -1478,28 +1475,48 @@ func (p *buildPager) toCursor(b *Build) Cursor { return p.order.Field.toCursor(b) } -func (p *buildPager) applyCursors(query *BuildQuery, after, before *Cursor) *BuildQuery { - for _, predicate := range cursorsToPredicates( - p.order.Direction, after, before, - p.order.Field.field, DefaultBuildOrder.Field.field, - ) { +func (p *buildPager) applyCursors(query *BuildQuery, after, before *Cursor) (*BuildQuery, error) { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + for _, predicate := range entgql.CursorsPredicate(after, before, DefaultBuildOrder.Field.column, p.order.Field.column, direction) { query = query.Where(predicate) } - return query + return query, nil } -func (p *buildPager) applyOrder(query *BuildQuery, reverse bool) *BuildQuery { +func (p *buildPager) applyOrder(query *BuildQuery) *BuildQuery { direction := p.order.Direction - if reverse { - direction = direction.reverse() + if p.reverse { + direction = direction.Reverse() } - query = query.Order(direction.orderFunc(p.order.Field.field)) + query = query.Order(p.order.Field.toTerm(direction.OrderTermOption())) if p.order.Field != DefaultBuildOrder.Field { - query = query.Order(direction.orderFunc(DefaultBuildOrder.Field.field)) + query = query.Order(DefaultBuildOrder.Field.toTerm(direction.OrderTermOption())) + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) } return query } +func (p *buildPager) orderExpr(query *BuildQuery) sql.Querier { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) + } + return sql.ExprFunc(func(b *sql.Builder) { + b.Ident(p.order.Field.column).Pad().WriteString(string(direction)) + if p.order.Field != DefaultBuildOrder.Field { + b.Comma().Ident(DefaultBuildOrder.Field.column).Pad().WriteString(string(direction)) + } + }) +} + // Paginate executes the query and returns a relay based cursor connection to Build. func (b *BuildQuery) Paginate( ctx context.Context, after *Cursor, first *int, @@ -1508,98 +1525,54 @@ func (b *BuildQuery) Paginate( if err := validateFirstLast(first, last); err != nil { return nil, err } - pager, err := newBuildPager(opts) + pager, err := newBuildPager(opts, last != nil) if err != nil { return nil, err } - if b, err = pager.applyFilter(b); err != nil { return nil, err } - conn := &BuildConnection{Edges: []*BuildEdge{}} - if !hasCollectedField(ctx, edgesField) || first != nil && *first == 0 || last != nil && *last == 0 { - if hasCollectedField(ctx, totalCountField) || - hasCollectedField(ctx, pageInfoField) { - count, err := b.Count(ctx) - if err != nil { + ignoredEdges := !hasCollectedField(ctx, edgesField) + if hasCollectedField(ctx, totalCountField) || hasCollectedField(ctx, pageInfoField) { + hasPagination := after != nil || first != nil || before != nil || last != nil + if hasPagination || ignoredEdges { + if conn.TotalCount, err = b.Clone().Count(ctx); err != nil { return nil, err } - conn.TotalCount = count - conn.PageInfo.HasNextPage = first != nil && count > 0 - conn.PageInfo.HasPreviousPage = last != nil && count > 0 + conn.PageInfo.HasNextPage = first != nil && conn.TotalCount > 0 + conn.PageInfo.HasPreviousPage = last != nil && conn.TotalCount > 0 } - return conn, nil } - - if (after != nil || first != nil || before != nil || last != nil) && hasCollectedField(ctx, totalCountField) { - count, err := b.Clone().Count(ctx) - if err != nil { - return nil, err - } - conn.TotalCount = count + if ignoredEdges || (first != nil && *first == 0) || (last != nil && *last == 0) { + return conn, nil } - - b = pager.applyCursors(b, after, before) - b = pager.applyOrder(b, last != nil) - var limit int - if first != nil { - limit = *first + 1 - } else if last != nil { - limit = *last + 1 + if b, err = pager.applyCursors(b, after, before); err != nil { + return nil, err } - if limit > 0 { - b = b.Limit(limit) + if limit := paginateLimit(first, last); limit != 0 { + b.Limit(limit) } - - if field := getCollectedField(ctx, edgesField, nodeField); field != nil { - b = b.collectField(graphql.GetOperationContext(ctx), *field) + if field := collectedField(ctx, edgesField, nodeField); field != nil { + if err := b.collectField(ctx, graphql.GetOperationContext(ctx), *field, []string{edgesField, nodeField}); err != nil { + return nil, err + } } - + b = pager.applyOrder(b) nodes, err := b.All(ctx) - if err != nil || len(nodes) == 0 { - return conn, err + if err != nil { + return nil, err } - - if len(nodes) == limit { - conn.PageInfo.HasNextPage = first != nil - conn.PageInfo.HasPreviousPage = last != nil - nodes = nodes[:len(nodes)-1] - } - - var nodeAt func(int) *Build - if last != nil { - n := len(nodes) - 1 - nodeAt = func(i int) *Build { - return nodes[n-i] - } - } else { - nodeAt = func(i int) *Build { - return nodes[i] - } - } - - conn.Edges = make([]*BuildEdge, len(nodes)) - for i := range nodes { - node := nodeAt(i) - conn.Edges[i] = &BuildEdge{ - Node: node, - Cursor: pager.toCursor(node), - } - } - - conn.PageInfo.StartCursor = &conn.Edges[0].Cursor - conn.PageInfo.EndCursor = &conn.Edges[len(conn.Edges)-1].Cursor - if conn.TotalCount == 0 { - conn.TotalCount = len(nodes) - } - + conn.build(nodes, pager, after, first, before, last) return conn, nil } // BuildOrderField defines the ordering field of Build. type BuildOrderField struct { - field string + // Value extracts the ordering value from the given Build. + Value func(*Build) (ent.Value, error) + column string // field or computed. + toTerm func(...sql.OrderTermOption) build.OrderOption toCursor func(*Build) Cursor } @@ -1611,9 +1584,13 @@ type BuildOrder struct { // DefaultBuildOrder is the default ordering of Build. var DefaultBuildOrder = &BuildOrder{ - Direction: OrderDirectionAsc, + Direction: entgql.OrderDirectionAsc, Field: &BuildOrderField{ - field: build.FieldID, + Value: func(b *Build) (ent.Value, error) { + return b.ID, nil + }, + column: build.FieldID, + toTerm: build.ByID, toCursor: func(b *Build) Cursor { return Cursor{ID: b.ID} }, @@ -1644,8 +1621,46 @@ type BuildCommitConnection struct { TotalCount int `json:"totalCount"` } +func (c *BuildCommitConnection) build(nodes []*BuildCommit, pager *buildcommitPager, after *Cursor, first *int, before *Cursor, last *int) { + c.PageInfo.HasNextPage = before != nil + c.PageInfo.HasPreviousPage = after != nil + if first != nil && *first+1 == len(nodes) { + c.PageInfo.HasNextPage = true + nodes = nodes[:len(nodes)-1] + } else if last != nil && *last+1 == len(nodes) { + c.PageInfo.HasPreviousPage = true + nodes = nodes[:len(nodes)-1] + } + var nodeAt func(int) *BuildCommit + if last != nil { + n := len(nodes) - 1 + nodeAt = func(i int) *BuildCommit { + return nodes[n-i] + } + } else { + nodeAt = func(i int) *BuildCommit { + return nodes[i] + } + } + c.Edges = make([]*BuildCommitEdge, len(nodes)) + for i := range nodes { + node := nodeAt(i) + c.Edges[i] = &BuildCommitEdge{ + Node: node, + Cursor: pager.toCursor(node), + } + } + if l := len(c.Edges); l > 0 { + c.PageInfo.StartCursor = &c.Edges[0].Cursor + c.PageInfo.EndCursor = &c.Edges[l-1].Cursor + } + if c.TotalCount == 0 { + c.TotalCount = len(nodes) + } +} + // BuildCommitPaginateOption enables pagination customization. -type BuildCommitPaginateOption func(*buildCommitPager) error +type BuildCommitPaginateOption func(*buildcommitPager) error // WithBuildCommitOrder configures pagination ordering. func WithBuildCommitOrder(order *BuildCommitOrder) BuildCommitPaginateOption { @@ -1653,7 +1668,7 @@ func WithBuildCommitOrder(order *BuildCommitOrder) BuildCommitPaginateOption { order = DefaultBuildCommitOrder } o := *order - return func(pager *buildCommitPager) error { + return func(pager *buildcommitPager) error { if err := o.Direction.Validate(); err != nil { return err } @@ -1667,7 +1682,7 @@ func WithBuildCommitOrder(order *BuildCommitOrder) BuildCommitPaginateOption { // WithBuildCommitFilter configures pagination filter. func WithBuildCommitFilter(filter func(*BuildCommitQuery) (*BuildCommitQuery, error)) BuildCommitPaginateOption { - return func(pager *buildCommitPager) error { + return func(pager *buildcommitPager) error { if filter == nil { return errors.New("BuildCommitQuery filter cannot be nil") } @@ -1676,13 +1691,14 @@ func WithBuildCommitFilter(filter func(*BuildCommitQuery) (*BuildCommitQuery, er } } -type buildCommitPager struct { - order *BuildCommitOrder - filter func(*BuildCommitQuery) (*BuildCommitQuery, error) +type buildcommitPager struct { + reverse bool + order *BuildCommitOrder + filter func(*BuildCommitQuery) (*BuildCommitQuery, error) } -func newBuildCommitPager(opts []BuildCommitPaginateOption) (*buildCommitPager, error) { - pager := &buildCommitPager{} +func newBuildCommitPager(opts []BuildCommitPaginateOption, reverse bool) (*buildcommitPager, error) { + pager := &buildcommitPager{reverse: reverse} for _, opt := range opts { if err := opt(pager); err != nil { return nil, err @@ -1694,39 +1710,59 @@ func newBuildCommitPager(opts []BuildCommitPaginateOption) (*buildCommitPager, e return pager, nil } -func (p *buildCommitPager) applyFilter(query *BuildCommitQuery) (*BuildCommitQuery, error) { +func (p *buildcommitPager) applyFilter(query *BuildCommitQuery) (*BuildCommitQuery, error) { if p.filter != nil { return p.filter(query) } return query, nil } -func (p *buildCommitPager) toCursor(bc *BuildCommit) Cursor { +func (p *buildcommitPager) toCursor(bc *BuildCommit) Cursor { return p.order.Field.toCursor(bc) } -func (p *buildCommitPager) applyCursors(query *BuildCommitQuery, after, before *Cursor) *BuildCommitQuery { - for _, predicate := range cursorsToPredicates( - p.order.Direction, after, before, - p.order.Field.field, DefaultBuildCommitOrder.Field.field, - ) { +func (p *buildcommitPager) applyCursors(query *BuildCommitQuery, after, before *Cursor) (*BuildCommitQuery, error) { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + for _, predicate := range entgql.CursorsPredicate(after, before, DefaultBuildCommitOrder.Field.column, p.order.Field.column, direction) { query = query.Where(predicate) } - return query + return query, nil } -func (p *buildCommitPager) applyOrder(query *BuildCommitQuery, reverse bool) *BuildCommitQuery { +func (p *buildcommitPager) applyOrder(query *BuildCommitQuery) *BuildCommitQuery { direction := p.order.Direction - if reverse { - direction = direction.reverse() + if p.reverse { + direction = direction.Reverse() } - query = query.Order(direction.orderFunc(p.order.Field.field)) + query = query.Order(p.order.Field.toTerm(direction.OrderTermOption())) if p.order.Field != DefaultBuildCommitOrder.Field { - query = query.Order(direction.orderFunc(DefaultBuildCommitOrder.Field.field)) + query = query.Order(DefaultBuildCommitOrder.Field.toTerm(direction.OrderTermOption())) + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) } return query } +func (p *buildcommitPager) orderExpr(query *BuildCommitQuery) sql.Querier { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) + } + return sql.ExprFunc(func(b *sql.Builder) { + b.Ident(p.order.Field.column).Pad().WriteString(string(direction)) + if p.order.Field != DefaultBuildCommitOrder.Field { + b.Comma().Ident(DefaultBuildCommitOrder.Field.column).Pad().WriteString(string(direction)) + } + }) +} + // Paginate executes the query and returns a relay based cursor connection to BuildCommit. func (bc *BuildCommitQuery) Paginate( ctx context.Context, after *Cursor, first *int, @@ -1735,98 +1771,54 @@ func (bc *BuildCommitQuery) Paginate( if err := validateFirstLast(first, last); err != nil { return nil, err } - pager, err := newBuildCommitPager(opts) + pager, err := newBuildCommitPager(opts, last != nil) if err != nil { return nil, err } - if bc, err = pager.applyFilter(bc); err != nil { return nil, err } - conn := &BuildCommitConnection{Edges: []*BuildCommitEdge{}} - if !hasCollectedField(ctx, edgesField) || first != nil && *first == 0 || last != nil && *last == 0 { - if hasCollectedField(ctx, totalCountField) || - hasCollectedField(ctx, pageInfoField) { - count, err := bc.Count(ctx) - if err != nil { + ignoredEdges := !hasCollectedField(ctx, edgesField) + if hasCollectedField(ctx, totalCountField) || hasCollectedField(ctx, pageInfoField) { + hasPagination := after != nil || first != nil || before != nil || last != nil + if hasPagination || ignoredEdges { + if conn.TotalCount, err = bc.Clone().Count(ctx); err != nil { return nil, err } - conn.TotalCount = count - conn.PageInfo.HasNextPage = first != nil && count > 0 - conn.PageInfo.HasPreviousPage = last != nil && count > 0 - } - return conn, nil - } - - if (after != nil || first != nil || before != nil || last != nil) && hasCollectedField(ctx, totalCountField) { - count, err := bc.Clone().Count(ctx) - if err != nil { - return nil, err + conn.PageInfo.HasNextPage = first != nil && conn.TotalCount > 0 + conn.PageInfo.HasPreviousPage = last != nil && conn.TotalCount > 0 } - conn.TotalCount = count - } - - bc = pager.applyCursors(bc, after, before) - bc = pager.applyOrder(bc, last != nil) - var limit int - if first != nil { - limit = *first + 1 - } else if last != nil { - limit = *last + 1 - } - if limit > 0 { - bc = bc.Limit(limit) } - - if field := getCollectedField(ctx, edgesField, nodeField); field != nil { - bc = bc.collectField(graphql.GetOperationContext(ctx), *field) - } - - nodes, err := bc.All(ctx) - if err != nil || len(nodes) == 0 { - return conn, err + if ignoredEdges || (first != nil && *first == 0) || (last != nil && *last == 0) { + return conn, nil } - - if len(nodes) == limit { - conn.PageInfo.HasNextPage = first != nil - conn.PageInfo.HasPreviousPage = last != nil - nodes = nodes[:len(nodes)-1] + if bc, err = pager.applyCursors(bc, after, before); err != nil { + return nil, err } - - var nodeAt func(int) *BuildCommit - if last != nil { - n := len(nodes) - 1 - nodeAt = func(i int) *BuildCommit { - return nodes[n-i] - } - } else { - nodeAt = func(i int) *BuildCommit { - return nodes[i] - } + if limit := paginateLimit(first, last); limit != 0 { + bc.Limit(limit) } - - conn.Edges = make([]*BuildCommitEdge, len(nodes)) - for i := range nodes { - node := nodeAt(i) - conn.Edges[i] = &BuildCommitEdge{ - Node: node, - Cursor: pager.toCursor(node), + if field := collectedField(ctx, edgesField, nodeField); field != nil { + if err := bc.collectField(ctx, graphql.GetOperationContext(ctx), *field, []string{edgesField, nodeField}); err != nil { + return nil, err } } - - conn.PageInfo.StartCursor = &conn.Edges[0].Cursor - conn.PageInfo.EndCursor = &conn.Edges[len(conn.Edges)-1].Cursor - if conn.TotalCount == 0 { - conn.TotalCount = len(nodes) + bc = pager.applyOrder(bc) + nodes, err := bc.All(ctx) + if err != nil { + return nil, err } - + conn.build(nodes, pager, after, first, before, last) return conn, nil } // BuildCommitOrderField defines the ordering field of BuildCommit. type BuildCommitOrderField struct { - field string + // Value extracts the ordering value from the given BuildCommit. + Value func(*BuildCommit) (ent.Value, error) + column string // field or computed. + toTerm func(...sql.OrderTermOption) buildcommit.OrderOption toCursor func(*BuildCommit) Cursor } @@ -1838,9 +1830,13 @@ type BuildCommitOrder struct { // DefaultBuildCommitOrder is the default ordering of BuildCommit. var DefaultBuildCommitOrder = &BuildCommitOrder{ - Direction: OrderDirectionAsc, + Direction: entgql.OrderDirectionAsc, Field: &BuildCommitOrderField{ - field: buildcommit.FieldID, + Value: func(bc *BuildCommit) (ent.Value, error) { + return bc.ID, nil + }, + column: buildcommit.FieldID, + toTerm: buildcommit.ByID, toCursor: func(bc *BuildCommit) Cursor { return Cursor{ID: bc.ID} }, @@ -1871,6 +1867,44 @@ type CommandConnection struct { TotalCount int `json:"totalCount"` } +func (c *CommandConnection) build(nodes []*Command, pager *commandPager, after *Cursor, first *int, before *Cursor, last *int) { + c.PageInfo.HasNextPage = before != nil + c.PageInfo.HasPreviousPage = after != nil + if first != nil && *first+1 == len(nodes) { + c.PageInfo.HasNextPage = true + nodes = nodes[:len(nodes)-1] + } else if last != nil && *last+1 == len(nodes) { + c.PageInfo.HasPreviousPage = true + nodes = nodes[:len(nodes)-1] + } + var nodeAt func(int) *Command + if last != nil { + n := len(nodes) - 1 + nodeAt = func(i int) *Command { + return nodes[n-i] + } + } else { + nodeAt = func(i int) *Command { + return nodes[i] + } + } + c.Edges = make([]*CommandEdge, len(nodes)) + for i := range nodes { + node := nodeAt(i) + c.Edges[i] = &CommandEdge{ + Node: node, + Cursor: pager.toCursor(node), + } + } + if l := len(c.Edges); l > 0 { + c.PageInfo.StartCursor = &c.Edges[0].Cursor + c.PageInfo.EndCursor = &c.Edges[l-1].Cursor + } + if c.TotalCount == 0 { + c.TotalCount = len(nodes) + } +} + // CommandPaginateOption enables pagination customization. type CommandPaginateOption func(*commandPager) error @@ -1904,12 +1938,13 @@ func WithCommandFilter(filter func(*CommandQuery) (*CommandQuery, error)) Comman } type commandPager struct { - order *CommandOrder - filter func(*CommandQuery) (*CommandQuery, error) + reverse bool + order *CommandOrder + filter func(*CommandQuery) (*CommandQuery, error) } -func newCommandPager(opts []CommandPaginateOption) (*commandPager, error) { - pager := &commandPager{} +func newCommandPager(opts []CommandPaginateOption, reverse bool) (*commandPager, error) { + pager := &commandPager{reverse: reverse} for _, opt := range opts { if err := opt(pager); err != nil { return nil, err @@ -1932,28 +1967,48 @@ func (p *commandPager) toCursor(c *Command) Cursor { return p.order.Field.toCursor(c) } -func (p *commandPager) applyCursors(query *CommandQuery, after, before *Cursor) *CommandQuery { - for _, predicate := range cursorsToPredicates( - p.order.Direction, after, before, - p.order.Field.field, DefaultCommandOrder.Field.field, - ) { +func (p *commandPager) applyCursors(query *CommandQuery, after, before *Cursor) (*CommandQuery, error) { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + for _, predicate := range entgql.CursorsPredicate(after, before, DefaultCommandOrder.Field.column, p.order.Field.column, direction) { query = query.Where(predicate) } - return query + return query, nil } -func (p *commandPager) applyOrder(query *CommandQuery, reverse bool) *CommandQuery { +func (p *commandPager) applyOrder(query *CommandQuery) *CommandQuery { direction := p.order.Direction - if reverse { - direction = direction.reverse() + if p.reverse { + direction = direction.Reverse() } - query = query.Order(direction.orderFunc(p.order.Field.field)) + query = query.Order(p.order.Field.toTerm(direction.OrderTermOption())) if p.order.Field != DefaultCommandOrder.Field { - query = query.Order(direction.orderFunc(DefaultCommandOrder.Field.field)) + query = query.Order(DefaultCommandOrder.Field.toTerm(direction.OrderTermOption())) + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) } return query } +func (p *commandPager) orderExpr(query *CommandQuery) sql.Querier { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) + } + return sql.ExprFunc(func(b *sql.Builder) { + b.Ident(p.order.Field.column).Pad().WriteString(string(direction)) + if p.order.Field != DefaultCommandOrder.Field { + b.Comma().Ident(DefaultCommandOrder.Field.column).Pad().WriteString(string(direction)) + } + }) +} + // Paginate executes the query and returns a relay based cursor connection to Command. func (c *CommandQuery) Paginate( ctx context.Context, after *Cursor, first *int, @@ -1962,98 +2017,54 @@ func (c *CommandQuery) Paginate( if err := validateFirstLast(first, last); err != nil { return nil, err } - pager, err := newCommandPager(opts) + pager, err := newCommandPager(opts, last != nil) if err != nil { return nil, err } - if c, err = pager.applyFilter(c); err != nil { return nil, err } - conn := &CommandConnection{Edges: []*CommandEdge{}} - if !hasCollectedField(ctx, edgesField) || first != nil && *first == 0 || last != nil && *last == 0 { - if hasCollectedField(ctx, totalCountField) || - hasCollectedField(ctx, pageInfoField) { - count, err := c.Count(ctx) - if err != nil { + ignoredEdges := !hasCollectedField(ctx, edgesField) + if hasCollectedField(ctx, totalCountField) || hasCollectedField(ctx, pageInfoField) { + hasPagination := after != nil || first != nil || before != nil || last != nil + if hasPagination || ignoredEdges { + if conn.TotalCount, err = c.Clone().Count(ctx); err != nil { return nil, err } - conn.TotalCount = count - conn.PageInfo.HasNextPage = first != nil && count > 0 - conn.PageInfo.HasPreviousPage = last != nil && count > 0 + conn.PageInfo.HasNextPage = first != nil && conn.TotalCount > 0 + conn.PageInfo.HasPreviousPage = last != nil && conn.TotalCount > 0 } - return conn, nil - } - - if (after != nil || first != nil || before != nil || last != nil) && hasCollectedField(ctx, totalCountField) { - count, err := c.Clone().Count(ctx) - if err != nil { - return nil, err - } - conn.TotalCount = count - } - - c = pager.applyCursors(c, after, before) - c = pager.applyOrder(c, last != nil) - var limit int - if first != nil { - limit = *first + 1 - } else if last != nil { - limit = *last + 1 } - if limit > 0 { - c = c.Limit(limit) - } - - if field := getCollectedField(ctx, edgesField, nodeField); field != nil { - c = c.collectField(graphql.GetOperationContext(ctx), *field) - } - - nodes, err := c.All(ctx) - if err != nil || len(nodes) == 0 { - return conn, err + if ignoredEdges || (first != nil && *first == 0) || (last != nil && *last == 0) { + return conn, nil } - - if len(nodes) == limit { - conn.PageInfo.HasNextPage = first != nil - conn.PageInfo.HasPreviousPage = last != nil - nodes = nodes[:len(nodes)-1] + if c, err = pager.applyCursors(c, after, before); err != nil { + return nil, err } - - var nodeAt func(int) *Command - if last != nil { - n := len(nodes) - 1 - nodeAt = func(i int) *Command { - return nodes[n-i] - } - } else { - nodeAt = func(i int) *Command { - return nodes[i] - } + if limit := paginateLimit(first, last); limit != 0 { + c.Limit(limit) } - - conn.Edges = make([]*CommandEdge, len(nodes)) - for i := range nodes { - node := nodeAt(i) - conn.Edges[i] = &CommandEdge{ - Node: node, - Cursor: pager.toCursor(node), + if field := collectedField(ctx, edgesField, nodeField); field != nil { + if err := c.collectField(ctx, graphql.GetOperationContext(ctx), *field, []string{edgesField, nodeField}); err != nil { + return nil, err } } - - conn.PageInfo.StartCursor = &conn.Edges[0].Cursor - conn.PageInfo.EndCursor = &conn.Edges[len(conn.Edges)-1].Cursor - if conn.TotalCount == 0 { - conn.TotalCount = len(nodes) + c = pager.applyOrder(c) + nodes, err := c.All(ctx) + if err != nil { + return nil, err } - + conn.build(nodes, pager, after, first, before, last) return conn, nil } // CommandOrderField defines the ordering field of Command. type CommandOrderField struct { - field string + // Value extracts the ordering value from the given Command. + Value func(*Command) (ent.Value, error) + column string // field or computed. + toTerm func(...sql.OrderTermOption) command.OrderOption toCursor func(*Command) Cursor } @@ -2065,9 +2076,13 @@ type CommandOrder struct { // DefaultCommandOrder is the default ordering of Command. var DefaultCommandOrder = &CommandOrder{ - Direction: OrderDirectionAsc, + Direction: entgql.OrderDirectionAsc, Field: &CommandOrderField{ - field: command.FieldID, + Value: func(c *Command) (ent.Value, error) { + return c.ID, nil + }, + column: command.FieldID, + toTerm: command.ByID, toCursor: func(c *Command) Cursor { return Cursor{ID: c.ID} }, @@ -2098,6 +2113,44 @@ type CompetitionConnection struct { TotalCount int `json:"totalCount"` } +func (c *CompetitionConnection) build(nodes []*Competition, pager *competitionPager, after *Cursor, first *int, before *Cursor, last *int) { + c.PageInfo.HasNextPage = before != nil + c.PageInfo.HasPreviousPage = after != nil + if first != nil && *first+1 == len(nodes) { + c.PageInfo.HasNextPage = true + nodes = nodes[:len(nodes)-1] + } else if last != nil && *last+1 == len(nodes) { + c.PageInfo.HasPreviousPage = true + nodes = nodes[:len(nodes)-1] + } + var nodeAt func(int) *Competition + if last != nil { + n := len(nodes) - 1 + nodeAt = func(i int) *Competition { + return nodes[n-i] + } + } else { + nodeAt = func(i int) *Competition { + return nodes[i] + } + } + c.Edges = make([]*CompetitionEdge, len(nodes)) + for i := range nodes { + node := nodeAt(i) + c.Edges[i] = &CompetitionEdge{ + Node: node, + Cursor: pager.toCursor(node), + } + } + if l := len(c.Edges); l > 0 { + c.PageInfo.StartCursor = &c.Edges[0].Cursor + c.PageInfo.EndCursor = &c.Edges[l-1].Cursor + } + if c.TotalCount == 0 { + c.TotalCount = len(nodes) + } +} + // CompetitionPaginateOption enables pagination customization. type CompetitionPaginateOption func(*competitionPager) error @@ -2131,12 +2184,13 @@ func WithCompetitionFilter(filter func(*CompetitionQuery) (*CompetitionQuery, er } type competitionPager struct { - order *CompetitionOrder - filter func(*CompetitionQuery) (*CompetitionQuery, error) + reverse bool + order *CompetitionOrder + filter func(*CompetitionQuery) (*CompetitionQuery, error) } -func newCompetitionPager(opts []CompetitionPaginateOption) (*competitionPager, error) { - pager := &competitionPager{} +func newCompetitionPager(opts []CompetitionPaginateOption, reverse bool) (*competitionPager, error) { + pager := &competitionPager{reverse: reverse} for _, opt := range opts { if err := opt(pager); err != nil { return nil, err @@ -2159,28 +2213,48 @@ func (p *competitionPager) toCursor(c *Competition) Cursor { return p.order.Field.toCursor(c) } -func (p *competitionPager) applyCursors(query *CompetitionQuery, after, before *Cursor) *CompetitionQuery { - for _, predicate := range cursorsToPredicates( - p.order.Direction, after, before, - p.order.Field.field, DefaultCompetitionOrder.Field.field, - ) { +func (p *competitionPager) applyCursors(query *CompetitionQuery, after, before *Cursor) (*CompetitionQuery, error) { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + for _, predicate := range entgql.CursorsPredicate(after, before, DefaultCompetitionOrder.Field.column, p.order.Field.column, direction) { query = query.Where(predicate) } - return query + return query, nil } -func (p *competitionPager) applyOrder(query *CompetitionQuery, reverse bool) *CompetitionQuery { +func (p *competitionPager) applyOrder(query *CompetitionQuery) *CompetitionQuery { direction := p.order.Direction - if reverse { - direction = direction.reverse() + if p.reverse { + direction = direction.Reverse() } - query = query.Order(direction.orderFunc(p.order.Field.field)) + query = query.Order(p.order.Field.toTerm(direction.OrderTermOption())) if p.order.Field != DefaultCompetitionOrder.Field { - query = query.Order(direction.orderFunc(DefaultCompetitionOrder.Field.field)) + query = query.Order(DefaultCompetitionOrder.Field.toTerm(direction.OrderTermOption())) + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) } return query } +func (p *competitionPager) orderExpr(query *CompetitionQuery) sql.Querier { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) + } + return sql.ExprFunc(func(b *sql.Builder) { + b.Ident(p.order.Field.column).Pad().WriteString(string(direction)) + if p.order.Field != DefaultCompetitionOrder.Field { + b.Comma().Ident(DefaultCompetitionOrder.Field.column).Pad().WriteString(string(direction)) + } + }) +} + // Paginate executes the query and returns a relay based cursor connection to Competition. func (c *CompetitionQuery) Paginate( ctx context.Context, after *Cursor, first *int, @@ -2189,100 +2263,56 @@ func (c *CompetitionQuery) Paginate( if err := validateFirstLast(first, last); err != nil { return nil, err } - pager, err := newCompetitionPager(opts) + pager, err := newCompetitionPager(opts, last != nil) if err != nil { return nil, err } - if c, err = pager.applyFilter(c); err != nil { return nil, err } - conn := &CompetitionConnection{Edges: []*CompetitionEdge{}} - if !hasCollectedField(ctx, edgesField) || first != nil && *first == 0 || last != nil && *last == 0 { - if hasCollectedField(ctx, totalCountField) || - hasCollectedField(ctx, pageInfoField) { - count, err := c.Count(ctx) - if err != nil { + ignoredEdges := !hasCollectedField(ctx, edgesField) + if hasCollectedField(ctx, totalCountField) || hasCollectedField(ctx, pageInfoField) { + hasPagination := after != nil || first != nil || before != nil || last != nil + if hasPagination || ignoredEdges { + if conn.TotalCount, err = c.Clone().Count(ctx); err != nil { return nil, err } - conn.TotalCount = count - conn.PageInfo.HasNextPage = first != nil && count > 0 - conn.PageInfo.HasPreviousPage = last != nil && count > 0 + conn.PageInfo.HasNextPage = first != nil && conn.TotalCount > 0 + conn.PageInfo.HasPreviousPage = last != nil && conn.TotalCount > 0 } - return conn, nil } - - if (after != nil || first != nil || before != nil || last != nil) && hasCollectedField(ctx, totalCountField) { - count, err := c.Clone().Count(ctx) - if err != nil { - return nil, err - } - conn.TotalCount = count + if ignoredEdges || (first != nil && *first == 0) || (last != nil && *last == 0) { + return conn, nil } - - c = pager.applyCursors(c, after, before) - c = pager.applyOrder(c, last != nil) - var limit int - if first != nil { - limit = *first + 1 - } else if last != nil { - limit = *last + 1 + if c, err = pager.applyCursors(c, after, before); err != nil { + return nil, err } - if limit > 0 { - c = c.Limit(limit) + if limit := paginateLimit(first, last); limit != 0 { + c.Limit(limit) } - - if field := getCollectedField(ctx, edgesField, nodeField); field != nil { - c = c.collectField(graphql.GetOperationContext(ctx), *field) + if field := collectedField(ctx, edgesField, nodeField); field != nil { + if err := c.collectField(ctx, graphql.GetOperationContext(ctx), *field, []string{edgesField, nodeField}); err != nil { + return nil, err + } } - + c = pager.applyOrder(c) nodes, err := c.All(ctx) - if err != nil || len(nodes) == 0 { - return conn, err + if err != nil { + return nil, err } + conn.build(nodes, pager, after, first, before, last) + return conn, nil +} - if len(nodes) == limit { - conn.PageInfo.HasNextPage = first != nil - conn.PageInfo.HasPreviousPage = last != nil - nodes = nodes[:len(nodes)-1] - } - - var nodeAt func(int) *Competition - if last != nil { - n := len(nodes) - 1 - nodeAt = func(i int) *Competition { - return nodes[n-i] - } - } else { - nodeAt = func(i int) *Competition { - return nodes[i] - } - } - - conn.Edges = make([]*CompetitionEdge, len(nodes)) - for i := range nodes { - node := nodeAt(i) - conn.Edges[i] = &CompetitionEdge{ - Node: node, - Cursor: pager.toCursor(node), - } - } - - conn.PageInfo.StartCursor = &conn.Edges[0].Cursor - conn.PageInfo.EndCursor = &conn.Edges[len(conn.Edges)-1].Cursor - if conn.TotalCount == 0 { - conn.TotalCount = len(nodes) - } - - return conn, nil -} - -// CompetitionOrderField defines the ordering field of Competition. -type CompetitionOrderField struct { - field string - toCursor func(*Competition) Cursor -} +// CompetitionOrderField defines the ordering field of Competition. +type CompetitionOrderField struct { + // Value extracts the ordering value from the given Competition. + Value func(*Competition) (ent.Value, error) + column string // field or computed. + toTerm func(...sql.OrderTermOption) competition.OrderOption + toCursor func(*Competition) Cursor +} // CompetitionOrder defines the ordering of Competition. type CompetitionOrder struct { @@ -2292,9 +2322,13 @@ type CompetitionOrder struct { // DefaultCompetitionOrder is the default ordering of Competition. var DefaultCompetitionOrder = &CompetitionOrder{ - Direction: OrderDirectionAsc, + Direction: entgql.OrderDirectionAsc, Field: &CompetitionOrderField{ - field: competition.FieldID, + Value: func(c *Competition) (ent.Value, error) { + return c.ID, nil + }, + column: competition.FieldID, + toTerm: competition.ByID, toCursor: func(c *Competition) Cursor { return Cursor{ID: c.ID} }, @@ -2325,8 +2359,46 @@ type DNSConnection struct { TotalCount int `json:"totalCount"` } +func (c *DNSConnection) build(nodes []*DNS, pager *dnsPager, after *Cursor, first *int, before *Cursor, last *int) { + c.PageInfo.HasNextPage = before != nil + c.PageInfo.HasPreviousPage = after != nil + if first != nil && *first+1 == len(nodes) { + c.PageInfo.HasNextPage = true + nodes = nodes[:len(nodes)-1] + } else if last != nil && *last+1 == len(nodes) { + c.PageInfo.HasPreviousPage = true + nodes = nodes[:len(nodes)-1] + } + var nodeAt func(int) *DNS + if last != nil { + n := len(nodes) - 1 + nodeAt = func(i int) *DNS { + return nodes[n-i] + } + } else { + nodeAt = func(i int) *DNS { + return nodes[i] + } + } + c.Edges = make([]*DNSEdge, len(nodes)) + for i := range nodes { + node := nodeAt(i) + c.Edges[i] = &DNSEdge{ + Node: node, + Cursor: pager.toCursor(node), + } + } + if l := len(c.Edges); l > 0 { + c.PageInfo.StartCursor = &c.Edges[0].Cursor + c.PageInfo.EndCursor = &c.Edges[l-1].Cursor + } + if c.TotalCount == 0 { + c.TotalCount = len(nodes) + } +} + // DNSPaginateOption enables pagination customization. -type DNSPaginateOption func(*dNSPager) error +type DNSPaginateOption func(*dnsPager) error // WithDNSOrder configures pagination ordering. func WithDNSOrder(order *DNSOrder) DNSPaginateOption { @@ -2334,7 +2406,7 @@ func WithDNSOrder(order *DNSOrder) DNSPaginateOption { order = DefaultDNSOrder } o := *order - return func(pager *dNSPager) error { + return func(pager *dnsPager) error { if err := o.Direction.Validate(); err != nil { return err } @@ -2348,7 +2420,7 @@ func WithDNSOrder(order *DNSOrder) DNSPaginateOption { // WithDNSFilter configures pagination filter. func WithDNSFilter(filter func(*DNSQuery) (*DNSQuery, error)) DNSPaginateOption { - return func(pager *dNSPager) error { + return func(pager *dnsPager) error { if filter == nil { return errors.New("DNSQuery filter cannot be nil") } @@ -2357,13 +2429,14 @@ func WithDNSFilter(filter func(*DNSQuery) (*DNSQuery, error)) DNSPaginateOption } } -type dNSPager struct { - order *DNSOrder - filter func(*DNSQuery) (*DNSQuery, error) +type dnsPager struct { + reverse bool + order *DNSOrder + filter func(*DNSQuery) (*DNSQuery, error) } -func newDNSPager(opts []DNSPaginateOption) (*dNSPager, error) { - pager := &dNSPager{} +func newDNSPager(opts []DNSPaginateOption, reverse bool) (*dnsPager, error) { + pager := &dnsPager{reverse: reverse} for _, opt := range opts { if err := opt(pager); err != nil { return nil, err @@ -2375,39 +2448,59 @@ func newDNSPager(opts []DNSPaginateOption) (*dNSPager, error) { return pager, nil } -func (p *dNSPager) applyFilter(query *DNSQuery) (*DNSQuery, error) { +func (p *dnsPager) applyFilter(query *DNSQuery) (*DNSQuery, error) { if p.filter != nil { return p.filter(query) } return query, nil } -func (p *dNSPager) toCursor(d *DNS) Cursor { +func (p *dnsPager) toCursor(d *DNS) Cursor { return p.order.Field.toCursor(d) } -func (p *dNSPager) applyCursors(query *DNSQuery, after, before *Cursor) *DNSQuery { - for _, predicate := range cursorsToPredicates( - p.order.Direction, after, before, - p.order.Field.field, DefaultDNSOrder.Field.field, - ) { +func (p *dnsPager) applyCursors(query *DNSQuery, after, before *Cursor) (*DNSQuery, error) { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + for _, predicate := range entgql.CursorsPredicate(after, before, DefaultDNSOrder.Field.column, p.order.Field.column, direction) { query = query.Where(predicate) } - return query + return query, nil } -func (p *dNSPager) applyOrder(query *DNSQuery, reverse bool) *DNSQuery { +func (p *dnsPager) applyOrder(query *DNSQuery) *DNSQuery { direction := p.order.Direction - if reverse { - direction = direction.reverse() + if p.reverse { + direction = direction.Reverse() } - query = query.Order(direction.orderFunc(p.order.Field.field)) + query = query.Order(p.order.Field.toTerm(direction.OrderTermOption())) if p.order.Field != DefaultDNSOrder.Field { - query = query.Order(direction.orderFunc(DefaultDNSOrder.Field.field)) + query = query.Order(DefaultDNSOrder.Field.toTerm(direction.OrderTermOption())) + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) } return query } +func (p *dnsPager) orderExpr(query *DNSQuery) sql.Querier { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) + } + return sql.ExprFunc(func(b *sql.Builder) { + b.Ident(p.order.Field.column).Pad().WriteString(string(direction)) + if p.order.Field != DefaultDNSOrder.Field { + b.Comma().Ident(DefaultDNSOrder.Field.column).Pad().WriteString(string(direction)) + } + }) +} + // Paginate executes the query and returns a relay based cursor connection to DNS. func (d *DNSQuery) Paginate( ctx context.Context, after *Cursor, first *int, @@ -2416,98 +2509,54 @@ func (d *DNSQuery) Paginate( if err := validateFirstLast(first, last); err != nil { return nil, err } - pager, err := newDNSPager(opts) + pager, err := newDNSPager(opts, last != nil) if err != nil { return nil, err } - if d, err = pager.applyFilter(d); err != nil { return nil, err } - conn := &DNSConnection{Edges: []*DNSEdge{}} - if !hasCollectedField(ctx, edgesField) || first != nil && *first == 0 || last != nil && *last == 0 { - if hasCollectedField(ctx, totalCountField) || - hasCollectedField(ctx, pageInfoField) { - count, err := d.Count(ctx) - if err != nil { + ignoredEdges := !hasCollectedField(ctx, edgesField) + if hasCollectedField(ctx, totalCountField) || hasCollectedField(ctx, pageInfoField) { + hasPagination := after != nil || first != nil || before != nil || last != nil + if hasPagination || ignoredEdges { + if conn.TotalCount, err = d.Clone().Count(ctx); err != nil { return nil, err } - conn.TotalCount = count - conn.PageInfo.HasNextPage = first != nil && count > 0 - conn.PageInfo.HasPreviousPage = last != nil && count > 0 - } - return conn, nil - } - - if (after != nil || first != nil || before != nil || last != nil) && hasCollectedField(ctx, totalCountField) { - count, err := d.Clone().Count(ctx) - if err != nil { - return nil, err + conn.PageInfo.HasNextPage = first != nil && conn.TotalCount > 0 + conn.PageInfo.HasPreviousPage = last != nil && conn.TotalCount > 0 } - conn.TotalCount = count - } - - d = pager.applyCursors(d, after, before) - d = pager.applyOrder(d, last != nil) - var limit int - if first != nil { - limit = *first + 1 - } else if last != nil { - limit = *last + 1 - } - if limit > 0 { - d = d.Limit(limit) - } - - if field := getCollectedField(ctx, edgesField, nodeField); field != nil { - d = d.collectField(graphql.GetOperationContext(ctx), *field) } - - nodes, err := d.All(ctx) - if err != nil || len(nodes) == 0 { - return conn, err + if ignoredEdges || (first != nil && *first == 0) || (last != nil && *last == 0) { + return conn, nil } - - if len(nodes) == limit { - conn.PageInfo.HasNextPage = first != nil - conn.PageInfo.HasPreviousPage = last != nil - nodes = nodes[:len(nodes)-1] + if d, err = pager.applyCursors(d, after, before); err != nil { + return nil, err } - - var nodeAt func(int) *DNS - if last != nil { - n := len(nodes) - 1 - nodeAt = func(i int) *DNS { - return nodes[n-i] - } - } else { - nodeAt = func(i int) *DNS { - return nodes[i] - } + if limit := paginateLimit(first, last); limit != 0 { + d.Limit(limit) } - - conn.Edges = make([]*DNSEdge, len(nodes)) - for i := range nodes { - node := nodeAt(i) - conn.Edges[i] = &DNSEdge{ - Node: node, - Cursor: pager.toCursor(node), + if field := collectedField(ctx, edgesField, nodeField); field != nil { + if err := d.collectField(ctx, graphql.GetOperationContext(ctx), *field, []string{edgesField, nodeField}); err != nil { + return nil, err } } - - conn.PageInfo.StartCursor = &conn.Edges[0].Cursor - conn.PageInfo.EndCursor = &conn.Edges[len(conn.Edges)-1].Cursor - if conn.TotalCount == 0 { - conn.TotalCount = len(nodes) + d = pager.applyOrder(d) + nodes, err := d.All(ctx) + if err != nil { + return nil, err } - + conn.build(nodes, pager, after, first, before, last) return conn, nil } // DNSOrderField defines the ordering field of DNS. type DNSOrderField struct { - field string + // Value extracts the ordering value from the given DNS. + Value func(*DNS) (ent.Value, error) + column string // field or computed. + toTerm func(...sql.OrderTermOption) dns.OrderOption toCursor func(*DNS) Cursor } @@ -2519,9 +2568,13 @@ type DNSOrder struct { // DefaultDNSOrder is the default ordering of DNS. var DefaultDNSOrder = &DNSOrder{ - Direction: OrderDirectionAsc, + Direction: entgql.OrderDirectionAsc, Field: &DNSOrderField{ - field: dns.FieldID, + Value: func(d *DNS) (ent.Value, error) { + return d.ID, nil + }, + column: dns.FieldID, + toTerm: dns.ByID, toCursor: func(d *DNS) Cursor { return Cursor{ID: d.ID} }, @@ -2552,8 +2605,46 @@ type DNSRecordConnection struct { TotalCount int `json:"totalCount"` } +func (c *DNSRecordConnection) build(nodes []*DNSRecord, pager *dnsrecordPager, after *Cursor, first *int, before *Cursor, last *int) { + c.PageInfo.HasNextPage = before != nil + c.PageInfo.HasPreviousPage = after != nil + if first != nil && *first+1 == len(nodes) { + c.PageInfo.HasNextPage = true + nodes = nodes[:len(nodes)-1] + } else if last != nil && *last+1 == len(nodes) { + c.PageInfo.HasPreviousPage = true + nodes = nodes[:len(nodes)-1] + } + var nodeAt func(int) *DNSRecord + if last != nil { + n := len(nodes) - 1 + nodeAt = func(i int) *DNSRecord { + return nodes[n-i] + } + } else { + nodeAt = func(i int) *DNSRecord { + return nodes[i] + } + } + c.Edges = make([]*DNSRecordEdge, len(nodes)) + for i := range nodes { + node := nodeAt(i) + c.Edges[i] = &DNSRecordEdge{ + Node: node, + Cursor: pager.toCursor(node), + } + } + if l := len(c.Edges); l > 0 { + c.PageInfo.StartCursor = &c.Edges[0].Cursor + c.PageInfo.EndCursor = &c.Edges[l-1].Cursor + } + if c.TotalCount == 0 { + c.TotalCount = len(nodes) + } +} + // DNSRecordPaginateOption enables pagination customization. -type DNSRecordPaginateOption func(*dNSRecordPager) error +type DNSRecordPaginateOption func(*dnsrecordPager) error // WithDNSRecordOrder configures pagination ordering. func WithDNSRecordOrder(order *DNSRecordOrder) DNSRecordPaginateOption { @@ -2561,7 +2652,7 @@ func WithDNSRecordOrder(order *DNSRecordOrder) DNSRecordPaginateOption { order = DefaultDNSRecordOrder } o := *order - return func(pager *dNSRecordPager) error { + return func(pager *dnsrecordPager) error { if err := o.Direction.Validate(); err != nil { return err } @@ -2575,7 +2666,7 @@ func WithDNSRecordOrder(order *DNSRecordOrder) DNSRecordPaginateOption { // WithDNSRecordFilter configures pagination filter. func WithDNSRecordFilter(filter func(*DNSRecordQuery) (*DNSRecordQuery, error)) DNSRecordPaginateOption { - return func(pager *dNSRecordPager) error { + return func(pager *dnsrecordPager) error { if filter == nil { return errors.New("DNSRecordQuery filter cannot be nil") } @@ -2584,13 +2675,14 @@ func WithDNSRecordFilter(filter func(*DNSRecordQuery) (*DNSRecordQuery, error)) } } -type dNSRecordPager struct { - order *DNSRecordOrder - filter func(*DNSRecordQuery) (*DNSRecordQuery, error) +type dnsrecordPager struct { + reverse bool + order *DNSRecordOrder + filter func(*DNSRecordQuery) (*DNSRecordQuery, error) } -func newDNSRecordPager(opts []DNSRecordPaginateOption) (*dNSRecordPager, error) { - pager := &dNSRecordPager{} +func newDNSRecordPager(opts []DNSRecordPaginateOption, reverse bool) (*dnsrecordPager, error) { + pager := &dnsrecordPager{reverse: reverse} for _, opt := range opts { if err := opt(pager); err != nil { return nil, err @@ -2602,39 +2694,59 @@ func newDNSRecordPager(opts []DNSRecordPaginateOption) (*dNSRecordPager, error) return pager, nil } -func (p *dNSRecordPager) applyFilter(query *DNSRecordQuery) (*DNSRecordQuery, error) { +func (p *dnsrecordPager) applyFilter(query *DNSRecordQuery) (*DNSRecordQuery, error) { if p.filter != nil { return p.filter(query) } return query, nil } -func (p *dNSRecordPager) toCursor(dr *DNSRecord) Cursor { +func (p *dnsrecordPager) toCursor(dr *DNSRecord) Cursor { return p.order.Field.toCursor(dr) } -func (p *dNSRecordPager) applyCursors(query *DNSRecordQuery, after, before *Cursor) *DNSRecordQuery { - for _, predicate := range cursorsToPredicates( - p.order.Direction, after, before, - p.order.Field.field, DefaultDNSRecordOrder.Field.field, - ) { +func (p *dnsrecordPager) applyCursors(query *DNSRecordQuery, after, before *Cursor) (*DNSRecordQuery, error) { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + for _, predicate := range entgql.CursorsPredicate(after, before, DefaultDNSRecordOrder.Field.column, p.order.Field.column, direction) { query = query.Where(predicate) } - return query + return query, nil } -func (p *dNSRecordPager) applyOrder(query *DNSRecordQuery, reverse bool) *DNSRecordQuery { +func (p *dnsrecordPager) applyOrder(query *DNSRecordQuery) *DNSRecordQuery { direction := p.order.Direction - if reverse { - direction = direction.reverse() + if p.reverse { + direction = direction.Reverse() } - query = query.Order(direction.orderFunc(p.order.Field.field)) + query = query.Order(p.order.Field.toTerm(direction.OrderTermOption())) if p.order.Field != DefaultDNSRecordOrder.Field { - query = query.Order(direction.orderFunc(DefaultDNSRecordOrder.Field.field)) + query = query.Order(DefaultDNSRecordOrder.Field.toTerm(direction.OrderTermOption())) + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) } return query } +func (p *dnsrecordPager) orderExpr(query *DNSRecordQuery) sql.Querier { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) + } + return sql.ExprFunc(func(b *sql.Builder) { + b.Ident(p.order.Field.column).Pad().WriteString(string(direction)) + if p.order.Field != DefaultDNSRecordOrder.Field { + b.Comma().Ident(DefaultDNSRecordOrder.Field.column).Pad().WriteString(string(direction)) + } + }) +} + // Paginate executes the query and returns a relay based cursor connection to DNSRecord. func (dr *DNSRecordQuery) Paginate( ctx context.Context, after *Cursor, first *int, @@ -2643,98 +2755,54 @@ func (dr *DNSRecordQuery) Paginate( if err := validateFirstLast(first, last); err != nil { return nil, err } - pager, err := newDNSRecordPager(opts) + pager, err := newDNSRecordPager(opts, last != nil) if err != nil { return nil, err } - if dr, err = pager.applyFilter(dr); err != nil { return nil, err } - conn := &DNSRecordConnection{Edges: []*DNSRecordEdge{}} - if !hasCollectedField(ctx, edgesField) || first != nil && *first == 0 || last != nil && *last == 0 { - if hasCollectedField(ctx, totalCountField) || - hasCollectedField(ctx, pageInfoField) { - count, err := dr.Count(ctx) - if err != nil { + ignoredEdges := !hasCollectedField(ctx, edgesField) + if hasCollectedField(ctx, totalCountField) || hasCollectedField(ctx, pageInfoField) { + hasPagination := after != nil || first != nil || before != nil || last != nil + if hasPagination || ignoredEdges { + if conn.TotalCount, err = dr.Clone().Count(ctx); err != nil { return nil, err } - conn.TotalCount = count - conn.PageInfo.HasNextPage = first != nil && count > 0 - conn.PageInfo.HasPreviousPage = last != nil && count > 0 - } - return conn, nil - } - - if (after != nil || first != nil || before != nil || last != nil) && hasCollectedField(ctx, totalCountField) { - count, err := dr.Clone().Count(ctx) - if err != nil { - return nil, err + conn.PageInfo.HasNextPage = first != nil && conn.TotalCount > 0 + conn.PageInfo.HasPreviousPage = last != nil && conn.TotalCount > 0 } - conn.TotalCount = count - } - - dr = pager.applyCursors(dr, after, before) - dr = pager.applyOrder(dr, last != nil) - var limit int - if first != nil { - limit = *first + 1 - } else if last != nil { - limit = *last + 1 - } - if limit > 0 { - dr = dr.Limit(limit) } - - if field := getCollectedField(ctx, edgesField, nodeField); field != nil { - dr = dr.collectField(graphql.GetOperationContext(ctx), *field) - } - - nodes, err := dr.All(ctx) - if err != nil || len(nodes) == 0 { - return conn, err + if ignoredEdges || (first != nil && *first == 0) || (last != nil && *last == 0) { + return conn, nil } - - if len(nodes) == limit { - conn.PageInfo.HasNextPage = first != nil - conn.PageInfo.HasPreviousPage = last != nil - nodes = nodes[:len(nodes)-1] + if dr, err = pager.applyCursors(dr, after, before); err != nil { + return nil, err } - - var nodeAt func(int) *DNSRecord - if last != nil { - n := len(nodes) - 1 - nodeAt = func(i int) *DNSRecord { - return nodes[n-i] - } - } else { - nodeAt = func(i int) *DNSRecord { - return nodes[i] - } + if limit := paginateLimit(first, last); limit != 0 { + dr.Limit(limit) } - - conn.Edges = make([]*DNSRecordEdge, len(nodes)) - for i := range nodes { - node := nodeAt(i) - conn.Edges[i] = &DNSRecordEdge{ - Node: node, - Cursor: pager.toCursor(node), + if field := collectedField(ctx, edgesField, nodeField); field != nil { + if err := dr.collectField(ctx, graphql.GetOperationContext(ctx), *field, []string{edgesField, nodeField}); err != nil { + return nil, err } } - - conn.PageInfo.StartCursor = &conn.Edges[0].Cursor - conn.PageInfo.EndCursor = &conn.Edges[len(conn.Edges)-1].Cursor - if conn.TotalCount == 0 { - conn.TotalCount = len(nodes) + dr = pager.applyOrder(dr) + nodes, err := dr.All(ctx) + if err != nil { + return nil, err } - + conn.build(nodes, pager, after, first, before, last) return conn, nil } // DNSRecordOrderField defines the ordering field of DNSRecord. type DNSRecordOrderField struct { - field string + // Value extracts the ordering value from the given DNSRecord. + Value func(*DNSRecord) (ent.Value, error) + column string // field or computed. + toTerm func(...sql.OrderTermOption) dnsrecord.OrderOption toCursor func(*DNSRecord) Cursor } @@ -2746,9 +2814,13 @@ type DNSRecordOrder struct { // DefaultDNSRecordOrder is the default ordering of DNSRecord. var DefaultDNSRecordOrder = &DNSRecordOrder{ - Direction: OrderDirectionAsc, + Direction: entgql.OrderDirectionAsc, Field: &DNSRecordOrderField{ - field: dnsrecord.FieldID, + Value: func(dr *DNSRecord) (ent.Value, error) { + return dr.ID, nil + }, + column: dnsrecord.FieldID, + toTerm: dnsrecord.ByID, toCursor: func(dr *DNSRecord) Cursor { return Cursor{ID: dr.ID} }, @@ -2779,6 +2851,44 @@ type DiskConnection struct { TotalCount int `json:"totalCount"` } +func (c *DiskConnection) build(nodes []*Disk, pager *diskPager, after *Cursor, first *int, before *Cursor, last *int) { + c.PageInfo.HasNextPage = before != nil + c.PageInfo.HasPreviousPage = after != nil + if first != nil && *first+1 == len(nodes) { + c.PageInfo.HasNextPage = true + nodes = nodes[:len(nodes)-1] + } else if last != nil && *last+1 == len(nodes) { + c.PageInfo.HasPreviousPage = true + nodes = nodes[:len(nodes)-1] + } + var nodeAt func(int) *Disk + if last != nil { + n := len(nodes) - 1 + nodeAt = func(i int) *Disk { + return nodes[n-i] + } + } else { + nodeAt = func(i int) *Disk { + return nodes[i] + } + } + c.Edges = make([]*DiskEdge, len(nodes)) + for i := range nodes { + node := nodeAt(i) + c.Edges[i] = &DiskEdge{ + Node: node, + Cursor: pager.toCursor(node), + } + } + if l := len(c.Edges); l > 0 { + c.PageInfo.StartCursor = &c.Edges[0].Cursor + c.PageInfo.EndCursor = &c.Edges[l-1].Cursor + } + if c.TotalCount == 0 { + c.TotalCount = len(nodes) + } +} + // DiskPaginateOption enables pagination customization. type DiskPaginateOption func(*diskPager) error @@ -2812,12 +2922,13 @@ func WithDiskFilter(filter func(*DiskQuery) (*DiskQuery, error)) DiskPaginateOpt } type diskPager struct { - order *DiskOrder - filter func(*DiskQuery) (*DiskQuery, error) + reverse bool + order *DiskOrder + filter func(*DiskQuery) (*DiskQuery, error) } -func newDiskPager(opts []DiskPaginateOption) (*diskPager, error) { - pager := &diskPager{} +func newDiskPager(opts []DiskPaginateOption, reverse bool) (*diskPager, error) { + pager := &diskPager{reverse: reverse} for _, opt := range opts { if err := opt(pager); err != nil { return nil, err @@ -2840,28 +2951,48 @@ func (p *diskPager) toCursor(d *Disk) Cursor { return p.order.Field.toCursor(d) } -func (p *diskPager) applyCursors(query *DiskQuery, after, before *Cursor) *DiskQuery { - for _, predicate := range cursorsToPredicates( - p.order.Direction, after, before, - p.order.Field.field, DefaultDiskOrder.Field.field, - ) { +func (p *diskPager) applyCursors(query *DiskQuery, after, before *Cursor) (*DiskQuery, error) { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + for _, predicate := range entgql.CursorsPredicate(after, before, DefaultDiskOrder.Field.column, p.order.Field.column, direction) { query = query.Where(predicate) } - return query + return query, nil } -func (p *diskPager) applyOrder(query *DiskQuery, reverse bool) *DiskQuery { +func (p *diskPager) applyOrder(query *DiskQuery) *DiskQuery { direction := p.order.Direction - if reverse { - direction = direction.reverse() + if p.reverse { + direction = direction.Reverse() } - query = query.Order(direction.orderFunc(p.order.Field.field)) + query = query.Order(p.order.Field.toTerm(direction.OrderTermOption())) if p.order.Field != DefaultDiskOrder.Field { - query = query.Order(direction.orderFunc(DefaultDiskOrder.Field.field)) + query = query.Order(DefaultDiskOrder.Field.toTerm(direction.OrderTermOption())) + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) } return query } +func (p *diskPager) orderExpr(query *DiskQuery) sql.Querier { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) + } + return sql.ExprFunc(func(b *sql.Builder) { + b.Ident(p.order.Field.column).Pad().WriteString(string(direction)) + if p.order.Field != DefaultDiskOrder.Field { + b.Comma().Ident(DefaultDiskOrder.Field.column).Pad().WriteString(string(direction)) + } + }) +} + // Paginate executes the query and returns a relay based cursor connection to Disk. func (d *DiskQuery) Paginate( ctx context.Context, after *Cursor, first *int, @@ -2870,100 +3001,56 @@ func (d *DiskQuery) Paginate( if err := validateFirstLast(first, last); err != nil { return nil, err } - pager, err := newDiskPager(opts) + pager, err := newDiskPager(opts, last != nil) if err != nil { return nil, err } - if d, err = pager.applyFilter(d); err != nil { return nil, err } - conn := &DiskConnection{Edges: []*DiskEdge{}} - if !hasCollectedField(ctx, edgesField) || first != nil && *first == 0 || last != nil && *last == 0 { - if hasCollectedField(ctx, totalCountField) || - hasCollectedField(ctx, pageInfoField) { - count, err := d.Count(ctx) - if err != nil { + ignoredEdges := !hasCollectedField(ctx, edgesField) + if hasCollectedField(ctx, totalCountField) || hasCollectedField(ctx, pageInfoField) { + hasPagination := after != nil || first != nil || before != nil || last != nil + if hasPagination || ignoredEdges { + if conn.TotalCount, err = d.Clone().Count(ctx); err != nil { return nil, err } - conn.TotalCount = count - conn.PageInfo.HasNextPage = first != nil && count > 0 - conn.PageInfo.HasPreviousPage = last != nil && count > 0 + conn.PageInfo.HasNextPage = first != nil && conn.TotalCount > 0 + conn.PageInfo.HasPreviousPage = last != nil && conn.TotalCount > 0 } - return conn, nil } - - if (after != nil || first != nil || before != nil || last != nil) && hasCollectedField(ctx, totalCountField) { - count, err := d.Clone().Count(ctx) - if err != nil { - return nil, err - } - conn.TotalCount = count + if ignoredEdges || (first != nil && *first == 0) || (last != nil && *last == 0) { + return conn, nil } - - d = pager.applyCursors(d, after, before) - d = pager.applyOrder(d, last != nil) - var limit int - if first != nil { - limit = *first + 1 - } else if last != nil { - limit = *last + 1 + if d, err = pager.applyCursors(d, after, before); err != nil { + return nil, err } - if limit > 0 { - d = d.Limit(limit) + if limit := paginateLimit(first, last); limit != 0 { + d.Limit(limit) } - - if field := getCollectedField(ctx, edgesField, nodeField); field != nil { - d = d.collectField(graphql.GetOperationContext(ctx), *field) + if field := collectedField(ctx, edgesField, nodeField); field != nil { + if err := d.collectField(ctx, graphql.GetOperationContext(ctx), *field, []string{edgesField, nodeField}); err != nil { + return nil, err + } } - + d = pager.applyOrder(d) nodes, err := d.All(ctx) - if err != nil || len(nodes) == 0 { - return conn, err + if err != nil { + return nil, err } + conn.build(nodes, pager, after, first, before, last) + return conn, nil +} - if len(nodes) == limit { - conn.PageInfo.HasNextPage = first != nil - conn.PageInfo.HasPreviousPage = last != nil - nodes = nodes[:len(nodes)-1] - } - - var nodeAt func(int) *Disk - if last != nil { - n := len(nodes) - 1 - nodeAt = func(i int) *Disk { - return nodes[n-i] - } - } else { - nodeAt = func(i int) *Disk { - return nodes[i] - } - } - - conn.Edges = make([]*DiskEdge, len(nodes)) - for i := range nodes { - node := nodeAt(i) - conn.Edges[i] = &DiskEdge{ - Node: node, - Cursor: pager.toCursor(node), - } - } - - conn.PageInfo.StartCursor = &conn.Edges[0].Cursor - conn.PageInfo.EndCursor = &conn.Edges[len(conn.Edges)-1].Cursor - if conn.TotalCount == 0 { - conn.TotalCount = len(nodes) - } - - return conn, nil -} - -// DiskOrderField defines the ordering field of Disk. -type DiskOrderField struct { - field string - toCursor func(*Disk) Cursor -} +// DiskOrderField defines the ordering field of Disk. +type DiskOrderField struct { + // Value extracts the ordering value from the given Disk. + Value func(*Disk) (ent.Value, error) + column string // field or computed. + toTerm func(...sql.OrderTermOption) disk.OrderOption + toCursor func(*Disk) Cursor +} // DiskOrder defines the ordering of Disk. type DiskOrder struct { @@ -2973,9 +3060,13 @@ type DiskOrder struct { // DefaultDiskOrder is the default ordering of Disk. var DefaultDiskOrder = &DiskOrder{ - Direction: OrderDirectionAsc, + Direction: entgql.OrderDirectionAsc, Field: &DiskOrderField{ - field: disk.FieldID, + Value: func(d *Disk) (ent.Value, error) { + return d.ID, nil + }, + column: disk.FieldID, + toTerm: disk.ByID, toCursor: func(d *Disk) Cursor { return Cursor{ID: d.ID} }, @@ -3006,6 +3097,44 @@ type EnvironmentConnection struct { TotalCount int `json:"totalCount"` } +func (c *EnvironmentConnection) build(nodes []*Environment, pager *environmentPager, after *Cursor, first *int, before *Cursor, last *int) { + c.PageInfo.HasNextPage = before != nil + c.PageInfo.HasPreviousPage = after != nil + if first != nil && *first+1 == len(nodes) { + c.PageInfo.HasNextPage = true + nodes = nodes[:len(nodes)-1] + } else if last != nil && *last+1 == len(nodes) { + c.PageInfo.HasPreviousPage = true + nodes = nodes[:len(nodes)-1] + } + var nodeAt func(int) *Environment + if last != nil { + n := len(nodes) - 1 + nodeAt = func(i int) *Environment { + return nodes[n-i] + } + } else { + nodeAt = func(i int) *Environment { + return nodes[i] + } + } + c.Edges = make([]*EnvironmentEdge, len(nodes)) + for i := range nodes { + node := nodeAt(i) + c.Edges[i] = &EnvironmentEdge{ + Node: node, + Cursor: pager.toCursor(node), + } + } + if l := len(c.Edges); l > 0 { + c.PageInfo.StartCursor = &c.Edges[0].Cursor + c.PageInfo.EndCursor = &c.Edges[l-1].Cursor + } + if c.TotalCount == 0 { + c.TotalCount = len(nodes) + } +} + // EnvironmentPaginateOption enables pagination customization. type EnvironmentPaginateOption func(*environmentPager) error @@ -3039,12 +3168,13 @@ func WithEnvironmentFilter(filter func(*EnvironmentQuery) (*EnvironmentQuery, er } type environmentPager struct { - order *EnvironmentOrder - filter func(*EnvironmentQuery) (*EnvironmentQuery, error) + reverse bool + order *EnvironmentOrder + filter func(*EnvironmentQuery) (*EnvironmentQuery, error) } -func newEnvironmentPager(opts []EnvironmentPaginateOption) (*environmentPager, error) { - pager := &environmentPager{} +func newEnvironmentPager(opts []EnvironmentPaginateOption, reverse bool) (*environmentPager, error) { + pager := &environmentPager{reverse: reverse} for _, opt := range opts { if err := opt(pager); err != nil { return nil, err @@ -3067,28 +3197,48 @@ func (p *environmentPager) toCursor(e *Environment) Cursor { return p.order.Field.toCursor(e) } -func (p *environmentPager) applyCursors(query *EnvironmentQuery, after, before *Cursor) *EnvironmentQuery { - for _, predicate := range cursorsToPredicates( - p.order.Direction, after, before, - p.order.Field.field, DefaultEnvironmentOrder.Field.field, - ) { +func (p *environmentPager) applyCursors(query *EnvironmentQuery, after, before *Cursor) (*EnvironmentQuery, error) { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + for _, predicate := range entgql.CursorsPredicate(after, before, DefaultEnvironmentOrder.Field.column, p.order.Field.column, direction) { query = query.Where(predicate) } - return query + return query, nil } -func (p *environmentPager) applyOrder(query *EnvironmentQuery, reverse bool) *EnvironmentQuery { +func (p *environmentPager) applyOrder(query *EnvironmentQuery) *EnvironmentQuery { direction := p.order.Direction - if reverse { - direction = direction.reverse() + if p.reverse { + direction = direction.Reverse() } - query = query.Order(direction.orderFunc(p.order.Field.field)) + query = query.Order(p.order.Field.toTerm(direction.OrderTermOption())) if p.order.Field != DefaultEnvironmentOrder.Field { - query = query.Order(direction.orderFunc(DefaultEnvironmentOrder.Field.field)) + query = query.Order(DefaultEnvironmentOrder.Field.toTerm(direction.OrderTermOption())) + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) } return query } +func (p *environmentPager) orderExpr(query *EnvironmentQuery) sql.Querier { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) + } + return sql.ExprFunc(func(b *sql.Builder) { + b.Ident(p.order.Field.column).Pad().WriteString(string(direction)) + if p.order.Field != DefaultEnvironmentOrder.Field { + b.Comma().Ident(DefaultEnvironmentOrder.Field.column).Pad().WriteString(string(direction)) + } + }) +} + // Paginate executes the query and returns a relay based cursor connection to Environment. func (e *EnvironmentQuery) Paginate( ctx context.Context, after *Cursor, first *int, @@ -3097,98 +3247,54 @@ func (e *EnvironmentQuery) Paginate( if err := validateFirstLast(first, last); err != nil { return nil, err } - pager, err := newEnvironmentPager(opts) + pager, err := newEnvironmentPager(opts, last != nil) if err != nil { return nil, err } - if e, err = pager.applyFilter(e); err != nil { return nil, err } - conn := &EnvironmentConnection{Edges: []*EnvironmentEdge{}} - if !hasCollectedField(ctx, edgesField) || first != nil && *first == 0 || last != nil && *last == 0 { - if hasCollectedField(ctx, totalCountField) || - hasCollectedField(ctx, pageInfoField) { - count, err := e.Count(ctx) - if err != nil { + ignoredEdges := !hasCollectedField(ctx, edgesField) + if hasCollectedField(ctx, totalCountField) || hasCollectedField(ctx, pageInfoField) { + hasPagination := after != nil || first != nil || before != nil || last != nil + if hasPagination || ignoredEdges { + if conn.TotalCount, err = e.Clone().Count(ctx); err != nil { return nil, err } - conn.TotalCount = count - conn.PageInfo.HasNextPage = first != nil && count > 0 - conn.PageInfo.HasPreviousPage = last != nil && count > 0 - } - return conn, nil - } - - if (after != nil || first != nil || before != nil || last != nil) && hasCollectedField(ctx, totalCountField) { - count, err := e.Clone().Count(ctx) - if err != nil { - return nil, err + conn.PageInfo.HasNextPage = first != nil && conn.TotalCount > 0 + conn.PageInfo.HasPreviousPage = last != nil && conn.TotalCount > 0 } - conn.TotalCount = count - } - - e = pager.applyCursors(e, after, before) - e = pager.applyOrder(e, last != nil) - var limit int - if first != nil { - limit = *first + 1 - } else if last != nil { - limit = *last + 1 - } - if limit > 0 { - e = e.Limit(limit) - } - - if field := getCollectedField(ctx, edgesField, nodeField); field != nil { - e = e.collectField(graphql.GetOperationContext(ctx), *field) } - - nodes, err := e.All(ctx) - if err != nil || len(nodes) == 0 { - return conn, err + if ignoredEdges || (first != nil && *first == 0) || (last != nil && *last == 0) { + return conn, nil } - - if len(nodes) == limit { - conn.PageInfo.HasNextPage = first != nil - conn.PageInfo.HasPreviousPage = last != nil - nodes = nodes[:len(nodes)-1] + if e, err = pager.applyCursors(e, after, before); err != nil { + return nil, err } - - var nodeAt func(int) *Environment - if last != nil { - n := len(nodes) - 1 - nodeAt = func(i int) *Environment { - return nodes[n-i] - } - } else { - nodeAt = func(i int) *Environment { - return nodes[i] - } + if limit := paginateLimit(first, last); limit != 0 { + e.Limit(limit) } - - conn.Edges = make([]*EnvironmentEdge, len(nodes)) - for i := range nodes { - node := nodeAt(i) - conn.Edges[i] = &EnvironmentEdge{ - Node: node, - Cursor: pager.toCursor(node), + if field := collectedField(ctx, edgesField, nodeField); field != nil { + if err := e.collectField(ctx, graphql.GetOperationContext(ctx), *field, []string{edgesField, nodeField}); err != nil { + return nil, err } } - - conn.PageInfo.StartCursor = &conn.Edges[0].Cursor - conn.PageInfo.EndCursor = &conn.Edges[len(conn.Edges)-1].Cursor - if conn.TotalCount == 0 { - conn.TotalCount = len(nodes) + e = pager.applyOrder(e) + nodes, err := e.All(ctx) + if err != nil { + return nil, err } - + conn.build(nodes, pager, after, first, before, last) return conn, nil } // EnvironmentOrderField defines the ordering field of Environment. type EnvironmentOrderField struct { - field string + // Value extracts the ordering value from the given Environment. + Value func(*Environment) (ent.Value, error) + column string // field or computed. + toTerm func(...sql.OrderTermOption) environment.OrderOption toCursor func(*Environment) Cursor } @@ -3200,9 +3306,13 @@ type EnvironmentOrder struct { // DefaultEnvironmentOrder is the default ordering of Environment. var DefaultEnvironmentOrder = &EnvironmentOrder{ - Direction: OrderDirectionAsc, + Direction: entgql.OrderDirectionAsc, Field: &EnvironmentOrderField{ - field: environment.FieldID, + Value: func(e *Environment) (ent.Value, error) { + return e.ID, nil + }, + column: environment.FieldID, + toTerm: environment.ByID, toCursor: func(e *Environment) Cursor { return Cursor{ID: e.ID} }, @@ -3233,8 +3343,46 @@ type FileDeleteConnection struct { TotalCount int `json:"totalCount"` } +func (c *FileDeleteConnection) build(nodes []*FileDelete, pager *filedeletePager, after *Cursor, first *int, before *Cursor, last *int) { + c.PageInfo.HasNextPage = before != nil + c.PageInfo.HasPreviousPage = after != nil + if first != nil && *first+1 == len(nodes) { + c.PageInfo.HasNextPage = true + nodes = nodes[:len(nodes)-1] + } else if last != nil && *last+1 == len(nodes) { + c.PageInfo.HasPreviousPage = true + nodes = nodes[:len(nodes)-1] + } + var nodeAt func(int) *FileDelete + if last != nil { + n := len(nodes) - 1 + nodeAt = func(i int) *FileDelete { + return nodes[n-i] + } + } else { + nodeAt = func(i int) *FileDelete { + return nodes[i] + } + } + c.Edges = make([]*FileDeleteEdge, len(nodes)) + for i := range nodes { + node := nodeAt(i) + c.Edges[i] = &FileDeleteEdge{ + Node: node, + Cursor: pager.toCursor(node), + } + } + if l := len(c.Edges); l > 0 { + c.PageInfo.StartCursor = &c.Edges[0].Cursor + c.PageInfo.EndCursor = &c.Edges[l-1].Cursor + } + if c.TotalCount == 0 { + c.TotalCount = len(nodes) + } +} + // FileDeletePaginateOption enables pagination customization. -type FileDeletePaginateOption func(*fileDeletePager) error +type FileDeletePaginateOption func(*filedeletePager) error // WithFileDeleteOrder configures pagination ordering. func WithFileDeleteOrder(order *FileDeleteOrder) FileDeletePaginateOption { @@ -3242,7 +3390,7 @@ func WithFileDeleteOrder(order *FileDeleteOrder) FileDeletePaginateOption { order = DefaultFileDeleteOrder } o := *order - return func(pager *fileDeletePager) error { + return func(pager *filedeletePager) error { if err := o.Direction.Validate(); err != nil { return err } @@ -3256,7 +3404,7 @@ func WithFileDeleteOrder(order *FileDeleteOrder) FileDeletePaginateOption { // WithFileDeleteFilter configures pagination filter. func WithFileDeleteFilter(filter func(*FileDeleteQuery) (*FileDeleteQuery, error)) FileDeletePaginateOption { - return func(pager *fileDeletePager) error { + return func(pager *filedeletePager) error { if filter == nil { return errors.New("FileDeleteQuery filter cannot be nil") } @@ -3265,13 +3413,14 @@ func WithFileDeleteFilter(filter func(*FileDeleteQuery) (*FileDeleteQuery, error } } -type fileDeletePager struct { - order *FileDeleteOrder - filter func(*FileDeleteQuery) (*FileDeleteQuery, error) +type filedeletePager struct { + reverse bool + order *FileDeleteOrder + filter func(*FileDeleteQuery) (*FileDeleteQuery, error) } -func newFileDeletePager(opts []FileDeletePaginateOption) (*fileDeletePager, error) { - pager := &fileDeletePager{} +func newFileDeletePager(opts []FileDeletePaginateOption, reverse bool) (*filedeletePager, error) { + pager := &filedeletePager{reverse: reverse} for _, opt := range opts { if err := opt(pager); err != nil { return nil, err @@ -3283,39 +3432,59 @@ func newFileDeletePager(opts []FileDeletePaginateOption) (*fileDeletePager, erro return pager, nil } -func (p *fileDeletePager) applyFilter(query *FileDeleteQuery) (*FileDeleteQuery, error) { +func (p *filedeletePager) applyFilter(query *FileDeleteQuery) (*FileDeleteQuery, error) { if p.filter != nil { return p.filter(query) } return query, nil } -func (p *fileDeletePager) toCursor(fd *FileDelete) Cursor { +func (p *filedeletePager) toCursor(fd *FileDelete) Cursor { return p.order.Field.toCursor(fd) } -func (p *fileDeletePager) applyCursors(query *FileDeleteQuery, after, before *Cursor) *FileDeleteQuery { - for _, predicate := range cursorsToPredicates( - p.order.Direction, after, before, - p.order.Field.field, DefaultFileDeleteOrder.Field.field, - ) { +func (p *filedeletePager) applyCursors(query *FileDeleteQuery, after, before *Cursor) (*FileDeleteQuery, error) { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + for _, predicate := range entgql.CursorsPredicate(after, before, DefaultFileDeleteOrder.Field.column, p.order.Field.column, direction) { query = query.Where(predicate) } - return query + return query, nil } -func (p *fileDeletePager) applyOrder(query *FileDeleteQuery, reverse bool) *FileDeleteQuery { +func (p *filedeletePager) applyOrder(query *FileDeleteQuery) *FileDeleteQuery { direction := p.order.Direction - if reverse { - direction = direction.reverse() + if p.reverse { + direction = direction.Reverse() } - query = query.Order(direction.orderFunc(p.order.Field.field)) + query = query.Order(p.order.Field.toTerm(direction.OrderTermOption())) if p.order.Field != DefaultFileDeleteOrder.Field { - query = query.Order(direction.orderFunc(DefaultFileDeleteOrder.Field.field)) + query = query.Order(DefaultFileDeleteOrder.Field.toTerm(direction.OrderTermOption())) + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) } return query } +func (p *filedeletePager) orderExpr(query *FileDeleteQuery) sql.Querier { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) + } + return sql.ExprFunc(func(b *sql.Builder) { + b.Ident(p.order.Field.column).Pad().WriteString(string(direction)) + if p.order.Field != DefaultFileDeleteOrder.Field { + b.Comma().Ident(DefaultFileDeleteOrder.Field.column).Pad().WriteString(string(direction)) + } + }) +} + // Paginate executes the query and returns a relay based cursor connection to FileDelete. func (fd *FileDeleteQuery) Paginate( ctx context.Context, after *Cursor, first *int, @@ -3324,98 +3493,54 @@ func (fd *FileDeleteQuery) Paginate( if err := validateFirstLast(first, last); err != nil { return nil, err } - pager, err := newFileDeletePager(opts) + pager, err := newFileDeletePager(opts, last != nil) if err != nil { return nil, err } - if fd, err = pager.applyFilter(fd); err != nil { return nil, err } - conn := &FileDeleteConnection{Edges: []*FileDeleteEdge{}} - if !hasCollectedField(ctx, edgesField) || first != nil && *first == 0 || last != nil && *last == 0 { - if hasCollectedField(ctx, totalCountField) || - hasCollectedField(ctx, pageInfoField) { - count, err := fd.Count(ctx) - if err != nil { + ignoredEdges := !hasCollectedField(ctx, edgesField) + if hasCollectedField(ctx, totalCountField) || hasCollectedField(ctx, pageInfoField) { + hasPagination := after != nil || first != nil || before != nil || last != nil + if hasPagination || ignoredEdges { + if conn.TotalCount, err = fd.Clone().Count(ctx); err != nil { return nil, err } - conn.TotalCount = count - conn.PageInfo.HasNextPage = first != nil && count > 0 - conn.PageInfo.HasPreviousPage = last != nil && count > 0 - } - return conn, nil - } - - if (after != nil || first != nil || before != nil || last != nil) && hasCollectedField(ctx, totalCountField) { - count, err := fd.Clone().Count(ctx) - if err != nil { - return nil, err + conn.PageInfo.HasNextPage = first != nil && conn.TotalCount > 0 + conn.PageInfo.HasPreviousPage = last != nil && conn.TotalCount > 0 } - conn.TotalCount = count - } - - fd = pager.applyCursors(fd, after, before) - fd = pager.applyOrder(fd, last != nil) - var limit int - if first != nil { - limit = *first + 1 - } else if last != nil { - limit = *last + 1 - } - if limit > 0 { - fd = fd.Limit(limit) - } - - if field := getCollectedField(ctx, edgesField, nodeField); field != nil { - fd = fd.collectField(graphql.GetOperationContext(ctx), *field) } - - nodes, err := fd.All(ctx) - if err != nil || len(nodes) == 0 { - return conn, err + if ignoredEdges || (first != nil && *first == 0) || (last != nil && *last == 0) { + return conn, nil } - - if len(nodes) == limit { - conn.PageInfo.HasNextPage = first != nil - conn.PageInfo.HasPreviousPage = last != nil - nodes = nodes[:len(nodes)-1] + if fd, err = pager.applyCursors(fd, after, before); err != nil { + return nil, err } - - var nodeAt func(int) *FileDelete - if last != nil { - n := len(nodes) - 1 - nodeAt = func(i int) *FileDelete { - return nodes[n-i] - } - } else { - nodeAt = func(i int) *FileDelete { - return nodes[i] - } + if limit := paginateLimit(first, last); limit != 0 { + fd.Limit(limit) } - - conn.Edges = make([]*FileDeleteEdge, len(nodes)) - for i := range nodes { - node := nodeAt(i) - conn.Edges[i] = &FileDeleteEdge{ - Node: node, - Cursor: pager.toCursor(node), + if field := collectedField(ctx, edgesField, nodeField); field != nil { + if err := fd.collectField(ctx, graphql.GetOperationContext(ctx), *field, []string{edgesField, nodeField}); err != nil { + return nil, err } } - - conn.PageInfo.StartCursor = &conn.Edges[0].Cursor - conn.PageInfo.EndCursor = &conn.Edges[len(conn.Edges)-1].Cursor - if conn.TotalCount == 0 { - conn.TotalCount = len(nodes) + fd = pager.applyOrder(fd) + nodes, err := fd.All(ctx) + if err != nil { + return nil, err } - + conn.build(nodes, pager, after, first, before, last) return conn, nil } // FileDeleteOrderField defines the ordering field of FileDelete. type FileDeleteOrderField struct { - field string + // Value extracts the ordering value from the given FileDelete. + Value func(*FileDelete) (ent.Value, error) + column string // field or computed. + toTerm func(...sql.OrderTermOption) filedelete.OrderOption toCursor func(*FileDelete) Cursor } @@ -3427,9 +3552,13 @@ type FileDeleteOrder struct { // DefaultFileDeleteOrder is the default ordering of FileDelete. var DefaultFileDeleteOrder = &FileDeleteOrder{ - Direction: OrderDirectionAsc, + Direction: entgql.OrderDirectionAsc, Field: &FileDeleteOrderField{ - field: filedelete.FieldID, + Value: func(fd *FileDelete) (ent.Value, error) { + return fd.ID, nil + }, + column: filedelete.FieldID, + toTerm: filedelete.ByID, toCursor: func(fd *FileDelete) Cursor { return Cursor{ID: fd.ID} }, @@ -3460,8 +3589,46 @@ type FileDownloadConnection struct { TotalCount int `json:"totalCount"` } +func (c *FileDownloadConnection) build(nodes []*FileDownload, pager *filedownloadPager, after *Cursor, first *int, before *Cursor, last *int) { + c.PageInfo.HasNextPage = before != nil + c.PageInfo.HasPreviousPage = after != nil + if first != nil && *first+1 == len(nodes) { + c.PageInfo.HasNextPage = true + nodes = nodes[:len(nodes)-1] + } else if last != nil && *last+1 == len(nodes) { + c.PageInfo.HasPreviousPage = true + nodes = nodes[:len(nodes)-1] + } + var nodeAt func(int) *FileDownload + if last != nil { + n := len(nodes) - 1 + nodeAt = func(i int) *FileDownload { + return nodes[n-i] + } + } else { + nodeAt = func(i int) *FileDownload { + return nodes[i] + } + } + c.Edges = make([]*FileDownloadEdge, len(nodes)) + for i := range nodes { + node := nodeAt(i) + c.Edges[i] = &FileDownloadEdge{ + Node: node, + Cursor: pager.toCursor(node), + } + } + if l := len(c.Edges); l > 0 { + c.PageInfo.StartCursor = &c.Edges[0].Cursor + c.PageInfo.EndCursor = &c.Edges[l-1].Cursor + } + if c.TotalCount == 0 { + c.TotalCount = len(nodes) + } +} + // FileDownloadPaginateOption enables pagination customization. -type FileDownloadPaginateOption func(*fileDownloadPager) error +type FileDownloadPaginateOption func(*filedownloadPager) error // WithFileDownloadOrder configures pagination ordering. func WithFileDownloadOrder(order *FileDownloadOrder) FileDownloadPaginateOption { @@ -3469,7 +3636,7 @@ func WithFileDownloadOrder(order *FileDownloadOrder) FileDownloadPaginateOption order = DefaultFileDownloadOrder } o := *order - return func(pager *fileDownloadPager) error { + return func(pager *filedownloadPager) error { if err := o.Direction.Validate(); err != nil { return err } @@ -3483,7 +3650,7 @@ func WithFileDownloadOrder(order *FileDownloadOrder) FileDownloadPaginateOption // WithFileDownloadFilter configures pagination filter. func WithFileDownloadFilter(filter func(*FileDownloadQuery) (*FileDownloadQuery, error)) FileDownloadPaginateOption { - return func(pager *fileDownloadPager) error { + return func(pager *filedownloadPager) error { if filter == nil { return errors.New("FileDownloadQuery filter cannot be nil") } @@ -3492,13 +3659,14 @@ func WithFileDownloadFilter(filter func(*FileDownloadQuery) (*FileDownloadQuery, } } -type fileDownloadPager struct { - order *FileDownloadOrder - filter func(*FileDownloadQuery) (*FileDownloadQuery, error) +type filedownloadPager struct { + reverse bool + order *FileDownloadOrder + filter func(*FileDownloadQuery) (*FileDownloadQuery, error) } -func newFileDownloadPager(opts []FileDownloadPaginateOption) (*fileDownloadPager, error) { - pager := &fileDownloadPager{} +func newFileDownloadPager(opts []FileDownloadPaginateOption, reverse bool) (*filedownloadPager, error) { + pager := &filedownloadPager{reverse: reverse} for _, opt := range opts { if err := opt(pager); err != nil { return nil, err @@ -3510,39 +3678,59 @@ func newFileDownloadPager(opts []FileDownloadPaginateOption) (*fileDownloadPager return pager, nil } -func (p *fileDownloadPager) applyFilter(query *FileDownloadQuery) (*FileDownloadQuery, error) { +func (p *filedownloadPager) applyFilter(query *FileDownloadQuery) (*FileDownloadQuery, error) { if p.filter != nil { return p.filter(query) } return query, nil } -func (p *fileDownloadPager) toCursor(fd *FileDownload) Cursor { +func (p *filedownloadPager) toCursor(fd *FileDownload) Cursor { return p.order.Field.toCursor(fd) } -func (p *fileDownloadPager) applyCursors(query *FileDownloadQuery, after, before *Cursor) *FileDownloadQuery { - for _, predicate := range cursorsToPredicates( - p.order.Direction, after, before, - p.order.Field.field, DefaultFileDownloadOrder.Field.field, - ) { +func (p *filedownloadPager) applyCursors(query *FileDownloadQuery, after, before *Cursor) (*FileDownloadQuery, error) { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + for _, predicate := range entgql.CursorsPredicate(after, before, DefaultFileDownloadOrder.Field.column, p.order.Field.column, direction) { query = query.Where(predicate) } - return query + return query, nil } -func (p *fileDownloadPager) applyOrder(query *FileDownloadQuery, reverse bool) *FileDownloadQuery { +func (p *filedownloadPager) applyOrder(query *FileDownloadQuery) *FileDownloadQuery { direction := p.order.Direction - if reverse { - direction = direction.reverse() + if p.reverse { + direction = direction.Reverse() } - query = query.Order(direction.orderFunc(p.order.Field.field)) + query = query.Order(p.order.Field.toTerm(direction.OrderTermOption())) if p.order.Field != DefaultFileDownloadOrder.Field { - query = query.Order(direction.orderFunc(DefaultFileDownloadOrder.Field.field)) + query = query.Order(DefaultFileDownloadOrder.Field.toTerm(direction.OrderTermOption())) + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) } return query } +func (p *filedownloadPager) orderExpr(query *FileDownloadQuery) sql.Querier { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) + } + return sql.ExprFunc(func(b *sql.Builder) { + b.Ident(p.order.Field.column).Pad().WriteString(string(direction)) + if p.order.Field != DefaultFileDownloadOrder.Field { + b.Comma().Ident(DefaultFileDownloadOrder.Field.column).Pad().WriteString(string(direction)) + } + }) +} + // Paginate executes the query and returns a relay based cursor connection to FileDownload. func (fd *FileDownloadQuery) Paginate( ctx context.Context, after *Cursor, first *int, @@ -3551,100 +3739,56 @@ func (fd *FileDownloadQuery) Paginate( if err := validateFirstLast(first, last); err != nil { return nil, err } - pager, err := newFileDownloadPager(opts) + pager, err := newFileDownloadPager(opts, last != nil) if err != nil { return nil, err } - if fd, err = pager.applyFilter(fd); err != nil { return nil, err } - conn := &FileDownloadConnection{Edges: []*FileDownloadEdge{}} - if !hasCollectedField(ctx, edgesField) || first != nil && *first == 0 || last != nil && *last == 0 { - if hasCollectedField(ctx, totalCountField) || - hasCollectedField(ctx, pageInfoField) { - count, err := fd.Count(ctx) - if err != nil { + ignoredEdges := !hasCollectedField(ctx, edgesField) + if hasCollectedField(ctx, totalCountField) || hasCollectedField(ctx, pageInfoField) { + hasPagination := after != nil || first != nil || before != nil || last != nil + if hasPagination || ignoredEdges { + if conn.TotalCount, err = fd.Clone().Count(ctx); err != nil { return nil, err } - conn.TotalCount = count - conn.PageInfo.HasNextPage = first != nil && count > 0 - conn.PageInfo.HasPreviousPage = last != nil && count > 0 + conn.PageInfo.HasNextPage = first != nil && conn.TotalCount > 0 + conn.PageInfo.HasPreviousPage = last != nil && conn.TotalCount > 0 } - return conn, nil } - - if (after != nil || first != nil || before != nil || last != nil) && hasCollectedField(ctx, totalCountField) { - count, err := fd.Clone().Count(ctx) - if err != nil { - return nil, err - } - conn.TotalCount = count + if ignoredEdges || (first != nil && *first == 0) || (last != nil && *last == 0) { + return conn, nil } - - fd = pager.applyCursors(fd, after, before) - fd = pager.applyOrder(fd, last != nil) - var limit int - if first != nil { - limit = *first + 1 - } else if last != nil { - limit = *last + 1 + if fd, err = pager.applyCursors(fd, after, before); err != nil { + return nil, err } - if limit > 0 { - fd = fd.Limit(limit) + if limit := paginateLimit(first, last); limit != 0 { + fd.Limit(limit) } - - if field := getCollectedField(ctx, edgesField, nodeField); field != nil { - fd = fd.collectField(graphql.GetOperationContext(ctx), *field) + if field := collectedField(ctx, edgesField, nodeField); field != nil { + if err := fd.collectField(ctx, graphql.GetOperationContext(ctx), *field, []string{edgesField, nodeField}); err != nil { + return nil, err + } } - + fd = pager.applyOrder(fd) nodes, err := fd.All(ctx) - if err != nil || len(nodes) == 0 { - return conn, err + if err != nil { + return nil, err } + conn.build(nodes, pager, after, first, before, last) + return conn, nil +} - if len(nodes) == limit { - conn.PageInfo.HasNextPage = first != nil - conn.PageInfo.HasPreviousPage = last != nil - nodes = nodes[:len(nodes)-1] - } - - var nodeAt func(int) *FileDownload - if last != nil { - n := len(nodes) - 1 - nodeAt = func(i int) *FileDownload { - return nodes[n-i] - } - } else { - nodeAt = func(i int) *FileDownload { - return nodes[i] - } - } - - conn.Edges = make([]*FileDownloadEdge, len(nodes)) - for i := range nodes { - node := nodeAt(i) - conn.Edges[i] = &FileDownloadEdge{ - Node: node, - Cursor: pager.toCursor(node), - } - } - - conn.PageInfo.StartCursor = &conn.Edges[0].Cursor - conn.PageInfo.EndCursor = &conn.Edges[len(conn.Edges)-1].Cursor - if conn.TotalCount == 0 { - conn.TotalCount = len(nodes) - } - - return conn, nil -} - -// FileDownloadOrderField defines the ordering field of FileDownload. -type FileDownloadOrderField struct { - field string - toCursor func(*FileDownload) Cursor -} +// FileDownloadOrderField defines the ordering field of FileDownload. +type FileDownloadOrderField struct { + // Value extracts the ordering value from the given FileDownload. + Value func(*FileDownload) (ent.Value, error) + column string // field or computed. + toTerm func(...sql.OrderTermOption) filedownload.OrderOption + toCursor func(*FileDownload) Cursor +} // FileDownloadOrder defines the ordering of FileDownload. type FileDownloadOrder struct { @@ -3654,9 +3798,13 @@ type FileDownloadOrder struct { // DefaultFileDownloadOrder is the default ordering of FileDownload. var DefaultFileDownloadOrder = &FileDownloadOrder{ - Direction: OrderDirectionAsc, + Direction: entgql.OrderDirectionAsc, Field: &FileDownloadOrderField{ - field: filedownload.FieldID, + Value: func(fd *FileDownload) (ent.Value, error) { + return fd.ID, nil + }, + column: filedownload.FieldID, + toTerm: filedownload.ByID, toCursor: func(fd *FileDownload) Cursor { return Cursor{ID: fd.ID} }, @@ -3687,8 +3835,46 @@ type FileExtractConnection struct { TotalCount int `json:"totalCount"` } +func (c *FileExtractConnection) build(nodes []*FileExtract, pager *fileextractPager, after *Cursor, first *int, before *Cursor, last *int) { + c.PageInfo.HasNextPage = before != nil + c.PageInfo.HasPreviousPage = after != nil + if first != nil && *first+1 == len(nodes) { + c.PageInfo.HasNextPage = true + nodes = nodes[:len(nodes)-1] + } else if last != nil && *last+1 == len(nodes) { + c.PageInfo.HasPreviousPage = true + nodes = nodes[:len(nodes)-1] + } + var nodeAt func(int) *FileExtract + if last != nil { + n := len(nodes) - 1 + nodeAt = func(i int) *FileExtract { + return nodes[n-i] + } + } else { + nodeAt = func(i int) *FileExtract { + return nodes[i] + } + } + c.Edges = make([]*FileExtractEdge, len(nodes)) + for i := range nodes { + node := nodeAt(i) + c.Edges[i] = &FileExtractEdge{ + Node: node, + Cursor: pager.toCursor(node), + } + } + if l := len(c.Edges); l > 0 { + c.PageInfo.StartCursor = &c.Edges[0].Cursor + c.PageInfo.EndCursor = &c.Edges[l-1].Cursor + } + if c.TotalCount == 0 { + c.TotalCount = len(nodes) + } +} + // FileExtractPaginateOption enables pagination customization. -type FileExtractPaginateOption func(*fileExtractPager) error +type FileExtractPaginateOption func(*fileextractPager) error // WithFileExtractOrder configures pagination ordering. func WithFileExtractOrder(order *FileExtractOrder) FileExtractPaginateOption { @@ -3696,7 +3882,7 @@ func WithFileExtractOrder(order *FileExtractOrder) FileExtractPaginateOption { order = DefaultFileExtractOrder } o := *order - return func(pager *fileExtractPager) error { + return func(pager *fileextractPager) error { if err := o.Direction.Validate(); err != nil { return err } @@ -3710,7 +3896,7 @@ func WithFileExtractOrder(order *FileExtractOrder) FileExtractPaginateOption { // WithFileExtractFilter configures pagination filter. func WithFileExtractFilter(filter func(*FileExtractQuery) (*FileExtractQuery, error)) FileExtractPaginateOption { - return func(pager *fileExtractPager) error { + return func(pager *fileextractPager) error { if filter == nil { return errors.New("FileExtractQuery filter cannot be nil") } @@ -3719,13 +3905,14 @@ func WithFileExtractFilter(filter func(*FileExtractQuery) (*FileExtractQuery, er } } -type fileExtractPager struct { - order *FileExtractOrder - filter func(*FileExtractQuery) (*FileExtractQuery, error) +type fileextractPager struct { + reverse bool + order *FileExtractOrder + filter func(*FileExtractQuery) (*FileExtractQuery, error) } -func newFileExtractPager(opts []FileExtractPaginateOption) (*fileExtractPager, error) { - pager := &fileExtractPager{} +func newFileExtractPager(opts []FileExtractPaginateOption, reverse bool) (*fileextractPager, error) { + pager := &fileextractPager{reverse: reverse} for _, opt := range opts { if err := opt(pager); err != nil { return nil, err @@ -3737,39 +3924,59 @@ func newFileExtractPager(opts []FileExtractPaginateOption) (*fileExtractPager, e return pager, nil } -func (p *fileExtractPager) applyFilter(query *FileExtractQuery) (*FileExtractQuery, error) { +func (p *fileextractPager) applyFilter(query *FileExtractQuery) (*FileExtractQuery, error) { if p.filter != nil { return p.filter(query) } return query, nil } -func (p *fileExtractPager) toCursor(fe *FileExtract) Cursor { +func (p *fileextractPager) toCursor(fe *FileExtract) Cursor { return p.order.Field.toCursor(fe) } -func (p *fileExtractPager) applyCursors(query *FileExtractQuery, after, before *Cursor) *FileExtractQuery { - for _, predicate := range cursorsToPredicates( - p.order.Direction, after, before, - p.order.Field.field, DefaultFileExtractOrder.Field.field, - ) { +func (p *fileextractPager) applyCursors(query *FileExtractQuery, after, before *Cursor) (*FileExtractQuery, error) { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + for _, predicate := range entgql.CursorsPredicate(after, before, DefaultFileExtractOrder.Field.column, p.order.Field.column, direction) { query = query.Where(predicate) } - return query + return query, nil } -func (p *fileExtractPager) applyOrder(query *FileExtractQuery, reverse bool) *FileExtractQuery { +func (p *fileextractPager) applyOrder(query *FileExtractQuery) *FileExtractQuery { direction := p.order.Direction - if reverse { - direction = direction.reverse() + if p.reverse { + direction = direction.Reverse() } - query = query.Order(direction.orderFunc(p.order.Field.field)) + query = query.Order(p.order.Field.toTerm(direction.OrderTermOption())) if p.order.Field != DefaultFileExtractOrder.Field { - query = query.Order(direction.orderFunc(DefaultFileExtractOrder.Field.field)) + query = query.Order(DefaultFileExtractOrder.Field.toTerm(direction.OrderTermOption())) + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) } return query } +func (p *fileextractPager) orderExpr(query *FileExtractQuery) sql.Querier { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) + } + return sql.ExprFunc(func(b *sql.Builder) { + b.Ident(p.order.Field.column).Pad().WriteString(string(direction)) + if p.order.Field != DefaultFileExtractOrder.Field { + b.Comma().Ident(DefaultFileExtractOrder.Field.column).Pad().WriteString(string(direction)) + } + }) +} + // Paginate executes the query and returns a relay based cursor connection to FileExtract. func (fe *FileExtractQuery) Paginate( ctx context.Context, after *Cursor, first *int, @@ -3778,98 +3985,54 @@ func (fe *FileExtractQuery) Paginate( if err := validateFirstLast(first, last); err != nil { return nil, err } - pager, err := newFileExtractPager(opts) + pager, err := newFileExtractPager(opts, last != nil) if err != nil { return nil, err } - if fe, err = pager.applyFilter(fe); err != nil { return nil, err } - conn := &FileExtractConnection{Edges: []*FileExtractEdge{}} - if !hasCollectedField(ctx, edgesField) || first != nil && *first == 0 || last != nil && *last == 0 { - if hasCollectedField(ctx, totalCountField) || - hasCollectedField(ctx, pageInfoField) { - count, err := fe.Count(ctx) - if err != nil { + ignoredEdges := !hasCollectedField(ctx, edgesField) + if hasCollectedField(ctx, totalCountField) || hasCollectedField(ctx, pageInfoField) { + hasPagination := after != nil || first != nil || before != nil || last != nil + if hasPagination || ignoredEdges { + if conn.TotalCount, err = fe.Clone().Count(ctx); err != nil { return nil, err } - conn.TotalCount = count - conn.PageInfo.HasNextPage = first != nil && count > 0 - conn.PageInfo.HasPreviousPage = last != nil && count > 0 - } - return conn, nil - } - - if (after != nil || first != nil || before != nil || last != nil) && hasCollectedField(ctx, totalCountField) { - count, err := fe.Clone().Count(ctx) - if err != nil { - return nil, err + conn.PageInfo.HasNextPage = first != nil && conn.TotalCount > 0 + conn.PageInfo.HasPreviousPage = last != nil && conn.TotalCount > 0 } - conn.TotalCount = count - } - - fe = pager.applyCursors(fe, after, before) - fe = pager.applyOrder(fe, last != nil) - var limit int - if first != nil { - limit = *first + 1 - } else if last != nil { - limit = *last + 1 } - if limit > 0 { - fe = fe.Limit(limit) - } - - if field := getCollectedField(ctx, edgesField, nodeField); field != nil { - fe = fe.collectField(graphql.GetOperationContext(ctx), *field) - } - - nodes, err := fe.All(ctx) - if err != nil || len(nodes) == 0 { - return conn, err + if ignoredEdges || (first != nil && *first == 0) || (last != nil && *last == 0) { + return conn, nil } - - if len(nodes) == limit { - conn.PageInfo.HasNextPage = first != nil - conn.PageInfo.HasPreviousPage = last != nil - nodes = nodes[:len(nodes)-1] + if fe, err = pager.applyCursors(fe, after, before); err != nil { + return nil, err } - - var nodeAt func(int) *FileExtract - if last != nil { - n := len(nodes) - 1 - nodeAt = func(i int) *FileExtract { - return nodes[n-i] - } - } else { - nodeAt = func(i int) *FileExtract { - return nodes[i] - } + if limit := paginateLimit(first, last); limit != 0 { + fe.Limit(limit) } - - conn.Edges = make([]*FileExtractEdge, len(nodes)) - for i := range nodes { - node := nodeAt(i) - conn.Edges[i] = &FileExtractEdge{ - Node: node, - Cursor: pager.toCursor(node), + if field := collectedField(ctx, edgesField, nodeField); field != nil { + if err := fe.collectField(ctx, graphql.GetOperationContext(ctx), *field, []string{edgesField, nodeField}); err != nil { + return nil, err } } - - conn.PageInfo.StartCursor = &conn.Edges[0].Cursor - conn.PageInfo.EndCursor = &conn.Edges[len(conn.Edges)-1].Cursor - if conn.TotalCount == 0 { - conn.TotalCount = len(nodes) + fe = pager.applyOrder(fe) + nodes, err := fe.All(ctx) + if err != nil { + return nil, err } - + conn.build(nodes, pager, after, first, before, last) return conn, nil } // FileExtractOrderField defines the ordering field of FileExtract. type FileExtractOrderField struct { - field string + // Value extracts the ordering value from the given FileExtract. + Value func(*FileExtract) (ent.Value, error) + column string // field or computed. + toTerm func(...sql.OrderTermOption) fileextract.OrderOption toCursor func(*FileExtract) Cursor } @@ -3881,9 +4044,13 @@ type FileExtractOrder struct { // DefaultFileExtractOrder is the default ordering of FileExtract. var DefaultFileExtractOrder = &FileExtractOrder{ - Direction: OrderDirectionAsc, + Direction: entgql.OrderDirectionAsc, Field: &FileExtractOrderField{ - field: fileextract.FieldID, + Value: func(fe *FileExtract) (ent.Value, error) { + return fe.ID, nil + }, + column: fileextract.FieldID, + toTerm: fileextract.ByID, toCursor: func(fe *FileExtract) Cursor { return Cursor{ID: fe.ID} }, @@ -3914,6 +4081,44 @@ type FindingConnection struct { TotalCount int `json:"totalCount"` } +func (c *FindingConnection) build(nodes []*Finding, pager *findingPager, after *Cursor, first *int, before *Cursor, last *int) { + c.PageInfo.HasNextPage = before != nil + c.PageInfo.HasPreviousPage = after != nil + if first != nil && *first+1 == len(nodes) { + c.PageInfo.HasNextPage = true + nodes = nodes[:len(nodes)-1] + } else if last != nil && *last+1 == len(nodes) { + c.PageInfo.HasPreviousPage = true + nodes = nodes[:len(nodes)-1] + } + var nodeAt func(int) *Finding + if last != nil { + n := len(nodes) - 1 + nodeAt = func(i int) *Finding { + return nodes[n-i] + } + } else { + nodeAt = func(i int) *Finding { + return nodes[i] + } + } + c.Edges = make([]*FindingEdge, len(nodes)) + for i := range nodes { + node := nodeAt(i) + c.Edges[i] = &FindingEdge{ + Node: node, + Cursor: pager.toCursor(node), + } + } + if l := len(c.Edges); l > 0 { + c.PageInfo.StartCursor = &c.Edges[0].Cursor + c.PageInfo.EndCursor = &c.Edges[l-1].Cursor + } + if c.TotalCount == 0 { + c.TotalCount = len(nodes) + } +} + // FindingPaginateOption enables pagination customization. type FindingPaginateOption func(*findingPager) error @@ -3947,12 +4152,13 @@ func WithFindingFilter(filter func(*FindingQuery) (*FindingQuery, error)) Findin } type findingPager struct { - order *FindingOrder - filter func(*FindingQuery) (*FindingQuery, error) + reverse bool + order *FindingOrder + filter func(*FindingQuery) (*FindingQuery, error) } -func newFindingPager(opts []FindingPaginateOption) (*findingPager, error) { - pager := &findingPager{} +func newFindingPager(opts []FindingPaginateOption, reverse bool) (*findingPager, error) { + pager := &findingPager{reverse: reverse} for _, opt := range opts { if err := opt(pager); err != nil { return nil, err @@ -3975,28 +4181,48 @@ func (p *findingPager) toCursor(f *Finding) Cursor { return p.order.Field.toCursor(f) } -func (p *findingPager) applyCursors(query *FindingQuery, after, before *Cursor) *FindingQuery { - for _, predicate := range cursorsToPredicates( - p.order.Direction, after, before, - p.order.Field.field, DefaultFindingOrder.Field.field, - ) { +func (p *findingPager) applyCursors(query *FindingQuery, after, before *Cursor) (*FindingQuery, error) { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + for _, predicate := range entgql.CursorsPredicate(after, before, DefaultFindingOrder.Field.column, p.order.Field.column, direction) { query = query.Where(predicate) } - return query + return query, nil } -func (p *findingPager) applyOrder(query *FindingQuery, reverse bool) *FindingQuery { +func (p *findingPager) applyOrder(query *FindingQuery) *FindingQuery { direction := p.order.Direction - if reverse { - direction = direction.reverse() + if p.reverse { + direction = direction.Reverse() } - query = query.Order(direction.orderFunc(p.order.Field.field)) + query = query.Order(p.order.Field.toTerm(direction.OrderTermOption())) if p.order.Field != DefaultFindingOrder.Field { - query = query.Order(direction.orderFunc(DefaultFindingOrder.Field.field)) + query = query.Order(DefaultFindingOrder.Field.toTerm(direction.OrderTermOption())) + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) } return query } +func (p *findingPager) orderExpr(query *FindingQuery) sql.Querier { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) + } + return sql.ExprFunc(func(b *sql.Builder) { + b.Ident(p.order.Field.column).Pad().WriteString(string(direction)) + if p.order.Field != DefaultFindingOrder.Field { + b.Comma().Ident(DefaultFindingOrder.Field.column).Pad().WriteString(string(direction)) + } + }) +} + // Paginate executes the query and returns a relay based cursor connection to Finding. func (f *FindingQuery) Paginate( ctx context.Context, after *Cursor, first *int, @@ -4005,98 +4231,54 @@ func (f *FindingQuery) Paginate( if err := validateFirstLast(first, last); err != nil { return nil, err } - pager, err := newFindingPager(opts) + pager, err := newFindingPager(opts, last != nil) if err != nil { return nil, err } - if f, err = pager.applyFilter(f); err != nil { return nil, err } - conn := &FindingConnection{Edges: []*FindingEdge{}} - if !hasCollectedField(ctx, edgesField) || first != nil && *first == 0 || last != nil && *last == 0 { - if hasCollectedField(ctx, totalCountField) || - hasCollectedField(ctx, pageInfoField) { - count, err := f.Count(ctx) - if err != nil { + ignoredEdges := !hasCollectedField(ctx, edgesField) + if hasCollectedField(ctx, totalCountField) || hasCollectedField(ctx, pageInfoField) { + hasPagination := after != nil || first != nil || before != nil || last != nil + if hasPagination || ignoredEdges { + if conn.TotalCount, err = f.Clone().Count(ctx); err != nil { return nil, err } - conn.TotalCount = count - conn.PageInfo.HasNextPage = first != nil && count > 0 - conn.PageInfo.HasPreviousPage = last != nil && count > 0 - } - return conn, nil - } - - if (after != nil || first != nil || before != nil || last != nil) && hasCollectedField(ctx, totalCountField) { - count, err := f.Clone().Count(ctx) - if err != nil { - return nil, err + conn.PageInfo.HasNextPage = first != nil && conn.TotalCount > 0 + conn.PageInfo.HasPreviousPage = last != nil && conn.TotalCount > 0 } - conn.TotalCount = count - } - - f = pager.applyCursors(f, after, before) - f = pager.applyOrder(f, last != nil) - var limit int - if first != nil { - limit = *first + 1 - } else if last != nil { - limit = *last + 1 - } - if limit > 0 { - f = f.Limit(limit) - } - - if field := getCollectedField(ctx, edgesField, nodeField); field != nil { - f = f.collectField(graphql.GetOperationContext(ctx), *field) } - - nodes, err := f.All(ctx) - if err != nil || len(nodes) == 0 { - return conn, err + if ignoredEdges || (first != nil && *first == 0) || (last != nil && *last == 0) { + return conn, nil } - - if len(nodes) == limit { - conn.PageInfo.HasNextPage = first != nil - conn.PageInfo.HasPreviousPage = last != nil - nodes = nodes[:len(nodes)-1] + if f, err = pager.applyCursors(f, after, before); err != nil { + return nil, err } - - var nodeAt func(int) *Finding - if last != nil { - n := len(nodes) - 1 - nodeAt = func(i int) *Finding { - return nodes[n-i] - } - } else { - nodeAt = func(i int) *Finding { - return nodes[i] - } + if limit := paginateLimit(first, last); limit != 0 { + f.Limit(limit) } - - conn.Edges = make([]*FindingEdge, len(nodes)) - for i := range nodes { - node := nodeAt(i) - conn.Edges[i] = &FindingEdge{ - Node: node, - Cursor: pager.toCursor(node), + if field := collectedField(ctx, edgesField, nodeField); field != nil { + if err := f.collectField(ctx, graphql.GetOperationContext(ctx), *field, []string{edgesField, nodeField}); err != nil { + return nil, err } } - - conn.PageInfo.StartCursor = &conn.Edges[0].Cursor - conn.PageInfo.EndCursor = &conn.Edges[len(conn.Edges)-1].Cursor - if conn.TotalCount == 0 { - conn.TotalCount = len(nodes) + f = pager.applyOrder(f) + nodes, err := f.All(ctx) + if err != nil { + return nil, err } - + conn.build(nodes, pager, after, first, before, last) return conn, nil } // FindingOrderField defines the ordering field of Finding. type FindingOrderField struct { - field string + // Value extracts the ordering value from the given Finding. + Value func(*Finding) (ent.Value, error) + column string // field or computed. + toTerm func(...sql.OrderTermOption) finding.OrderOption toCursor func(*Finding) Cursor } @@ -4108,9 +4290,13 @@ type FindingOrder struct { // DefaultFindingOrder is the default ordering of Finding. var DefaultFindingOrder = &FindingOrder{ - Direction: OrderDirectionAsc, + Direction: entgql.OrderDirectionAsc, Field: &FindingOrderField{ - field: finding.FieldID, + Value: func(f *Finding) (ent.Value, error) { + return f.ID, nil + }, + column: finding.FieldID, + toTerm: finding.ByID, toCursor: func(f *Finding) Cursor { return Cursor{ID: f.ID} }, @@ -4141,8 +4327,46 @@ type GinFileMiddlewareConnection struct { TotalCount int `json:"totalCount"` } +func (c *GinFileMiddlewareConnection) build(nodes []*GinFileMiddleware, pager *ginfilemiddlewarePager, after *Cursor, first *int, before *Cursor, last *int) { + c.PageInfo.HasNextPage = before != nil + c.PageInfo.HasPreviousPage = after != nil + if first != nil && *first+1 == len(nodes) { + c.PageInfo.HasNextPage = true + nodes = nodes[:len(nodes)-1] + } else if last != nil && *last+1 == len(nodes) { + c.PageInfo.HasPreviousPage = true + nodes = nodes[:len(nodes)-1] + } + var nodeAt func(int) *GinFileMiddleware + if last != nil { + n := len(nodes) - 1 + nodeAt = func(i int) *GinFileMiddleware { + return nodes[n-i] + } + } else { + nodeAt = func(i int) *GinFileMiddleware { + return nodes[i] + } + } + c.Edges = make([]*GinFileMiddlewareEdge, len(nodes)) + for i := range nodes { + node := nodeAt(i) + c.Edges[i] = &GinFileMiddlewareEdge{ + Node: node, + Cursor: pager.toCursor(node), + } + } + if l := len(c.Edges); l > 0 { + c.PageInfo.StartCursor = &c.Edges[0].Cursor + c.PageInfo.EndCursor = &c.Edges[l-1].Cursor + } + if c.TotalCount == 0 { + c.TotalCount = len(nodes) + } +} + // GinFileMiddlewarePaginateOption enables pagination customization. -type GinFileMiddlewarePaginateOption func(*ginFileMiddlewarePager) error +type GinFileMiddlewarePaginateOption func(*ginfilemiddlewarePager) error // WithGinFileMiddlewareOrder configures pagination ordering. func WithGinFileMiddlewareOrder(order *GinFileMiddlewareOrder) GinFileMiddlewarePaginateOption { @@ -4150,7 +4374,7 @@ func WithGinFileMiddlewareOrder(order *GinFileMiddlewareOrder) GinFileMiddleware order = DefaultGinFileMiddlewareOrder } o := *order - return func(pager *ginFileMiddlewarePager) error { + return func(pager *ginfilemiddlewarePager) error { if err := o.Direction.Validate(); err != nil { return err } @@ -4164,7 +4388,7 @@ func WithGinFileMiddlewareOrder(order *GinFileMiddlewareOrder) GinFileMiddleware // WithGinFileMiddlewareFilter configures pagination filter. func WithGinFileMiddlewareFilter(filter func(*GinFileMiddlewareQuery) (*GinFileMiddlewareQuery, error)) GinFileMiddlewarePaginateOption { - return func(pager *ginFileMiddlewarePager) error { + return func(pager *ginfilemiddlewarePager) error { if filter == nil { return errors.New("GinFileMiddlewareQuery filter cannot be nil") } @@ -4173,13 +4397,14 @@ func WithGinFileMiddlewareFilter(filter func(*GinFileMiddlewareQuery) (*GinFileM } } -type ginFileMiddlewarePager struct { - order *GinFileMiddlewareOrder - filter func(*GinFileMiddlewareQuery) (*GinFileMiddlewareQuery, error) +type ginfilemiddlewarePager struct { + reverse bool + order *GinFileMiddlewareOrder + filter func(*GinFileMiddlewareQuery) (*GinFileMiddlewareQuery, error) } -func newGinFileMiddlewarePager(opts []GinFileMiddlewarePaginateOption) (*ginFileMiddlewarePager, error) { - pager := &ginFileMiddlewarePager{} +func newGinFileMiddlewarePager(opts []GinFileMiddlewarePaginateOption, reverse bool) (*ginfilemiddlewarePager, error) { + pager := &ginfilemiddlewarePager{reverse: reverse} for _, opt := range opts { if err := opt(pager); err != nil { return nil, err @@ -4191,39 +4416,59 @@ func newGinFileMiddlewarePager(opts []GinFileMiddlewarePaginateOption) (*ginFile return pager, nil } -func (p *ginFileMiddlewarePager) applyFilter(query *GinFileMiddlewareQuery) (*GinFileMiddlewareQuery, error) { +func (p *ginfilemiddlewarePager) applyFilter(query *GinFileMiddlewareQuery) (*GinFileMiddlewareQuery, error) { if p.filter != nil { return p.filter(query) } return query, nil } -func (p *ginFileMiddlewarePager) toCursor(gfm *GinFileMiddleware) Cursor { +func (p *ginfilemiddlewarePager) toCursor(gfm *GinFileMiddleware) Cursor { return p.order.Field.toCursor(gfm) } -func (p *ginFileMiddlewarePager) applyCursors(query *GinFileMiddlewareQuery, after, before *Cursor) *GinFileMiddlewareQuery { - for _, predicate := range cursorsToPredicates( - p.order.Direction, after, before, - p.order.Field.field, DefaultGinFileMiddlewareOrder.Field.field, - ) { +func (p *ginfilemiddlewarePager) applyCursors(query *GinFileMiddlewareQuery, after, before *Cursor) (*GinFileMiddlewareQuery, error) { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + for _, predicate := range entgql.CursorsPredicate(after, before, DefaultGinFileMiddlewareOrder.Field.column, p.order.Field.column, direction) { query = query.Where(predicate) } - return query + return query, nil } -func (p *ginFileMiddlewarePager) applyOrder(query *GinFileMiddlewareQuery, reverse bool) *GinFileMiddlewareQuery { +func (p *ginfilemiddlewarePager) applyOrder(query *GinFileMiddlewareQuery) *GinFileMiddlewareQuery { direction := p.order.Direction - if reverse { - direction = direction.reverse() + if p.reverse { + direction = direction.Reverse() } - query = query.Order(direction.orderFunc(p.order.Field.field)) + query = query.Order(p.order.Field.toTerm(direction.OrderTermOption())) if p.order.Field != DefaultGinFileMiddlewareOrder.Field { - query = query.Order(direction.orderFunc(DefaultGinFileMiddlewareOrder.Field.field)) + query = query.Order(DefaultGinFileMiddlewareOrder.Field.toTerm(direction.OrderTermOption())) + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) } return query } +func (p *ginfilemiddlewarePager) orderExpr(query *GinFileMiddlewareQuery) sql.Querier { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) + } + return sql.ExprFunc(func(b *sql.Builder) { + b.Ident(p.order.Field.column).Pad().WriteString(string(direction)) + if p.order.Field != DefaultGinFileMiddlewareOrder.Field { + b.Comma().Ident(DefaultGinFileMiddlewareOrder.Field.column).Pad().WriteString(string(direction)) + } + }) +} + // Paginate executes the query and returns a relay based cursor connection to GinFileMiddleware. func (gfm *GinFileMiddlewareQuery) Paginate( ctx context.Context, after *Cursor, first *int, @@ -4232,100 +4477,56 @@ func (gfm *GinFileMiddlewareQuery) Paginate( if err := validateFirstLast(first, last); err != nil { return nil, err } - pager, err := newGinFileMiddlewarePager(opts) + pager, err := newGinFileMiddlewarePager(opts, last != nil) if err != nil { return nil, err } - if gfm, err = pager.applyFilter(gfm); err != nil { return nil, err } - conn := &GinFileMiddlewareConnection{Edges: []*GinFileMiddlewareEdge{}} - if !hasCollectedField(ctx, edgesField) || first != nil && *first == 0 || last != nil && *last == 0 { - if hasCollectedField(ctx, totalCountField) || - hasCollectedField(ctx, pageInfoField) { - count, err := gfm.Count(ctx) - if err != nil { + ignoredEdges := !hasCollectedField(ctx, edgesField) + if hasCollectedField(ctx, totalCountField) || hasCollectedField(ctx, pageInfoField) { + hasPagination := after != nil || first != nil || before != nil || last != nil + if hasPagination || ignoredEdges { + if conn.TotalCount, err = gfm.Clone().Count(ctx); err != nil { return nil, err } - conn.TotalCount = count - conn.PageInfo.HasNextPage = first != nil && count > 0 - conn.PageInfo.HasPreviousPage = last != nil && count > 0 + conn.PageInfo.HasNextPage = first != nil && conn.TotalCount > 0 + conn.PageInfo.HasPreviousPage = last != nil && conn.TotalCount > 0 } - return conn, nil } - - if (after != nil || first != nil || before != nil || last != nil) && hasCollectedField(ctx, totalCountField) { - count, err := gfm.Clone().Count(ctx) - if err != nil { - return nil, err - } - conn.TotalCount = count + if ignoredEdges || (first != nil && *first == 0) || (last != nil && *last == 0) { + return conn, nil } - - gfm = pager.applyCursors(gfm, after, before) - gfm = pager.applyOrder(gfm, last != nil) - var limit int - if first != nil { - limit = *first + 1 - } else if last != nil { - limit = *last + 1 + if gfm, err = pager.applyCursors(gfm, after, before); err != nil { + return nil, err } - if limit > 0 { - gfm = gfm.Limit(limit) + if limit := paginateLimit(first, last); limit != 0 { + gfm.Limit(limit) } - - if field := getCollectedField(ctx, edgesField, nodeField); field != nil { - gfm = gfm.collectField(graphql.GetOperationContext(ctx), *field) + if field := collectedField(ctx, edgesField, nodeField); field != nil { + if err := gfm.collectField(ctx, graphql.GetOperationContext(ctx), *field, []string{edgesField, nodeField}); err != nil { + return nil, err + } } - + gfm = pager.applyOrder(gfm) nodes, err := gfm.All(ctx) - if err != nil || len(nodes) == 0 { - return conn, err + if err != nil { + return nil, err } + conn.build(nodes, pager, after, first, before, last) + return conn, nil +} - if len(nodes) == limit { - conn.PageInfo.HasNextPage = first != nil - conn.PageInfo.HasPreviousPage = last != nil - nodes = nodes[:len(nodes)-1] - } - - var nodeAt func(int) *GinFileMiddleware - if last != nil { - n := len(nodes) - 1 - nodeAt = func(i int) *GinFileMiddleware { - return nodes[n-i] - } - } else { - nodeAt = func(i int) *GinFileMiddleware { - return nodes[i] - } - } - - conn.Edges = make([]*GinFileMiddlewareEdge, len(nodes)) - for i := range nodes { - node := nodeAt(i) - conn.Edges[i] = &GinFileMiddlewareEdge{ - Node: node, - Cursor: pager.toCursor(node), - } - } - - conn.PageInfo.StartCursor = &conn.Edges[0].Cursor - conn.PageInfo.EndCursor = &conn.Edges[len(conn.Edges)-1].Cursor - if conn.TotalCount == 0 { - conn.TotalCount = len(nodes) - } - - return conn, nil -} - -// GinFileMiddlewareOrderField defines the ordering field of GinFileMiddleware. -type GinFileMiddlewareOrderField struct { - field string - toCursor func(*GinFileMiddleware) Cursor -} +// GinFileMiddlewareOrderField defines the ordering field of GinFileMiddleware. +type GinFileMiddlewareOrderField struct { + // Value extracts the ordering value from the given GinFileMiddleware. + Value func(*GinFileMiddleware) (ent.Value, error) + column string // field or computed. + toTerm func(...sql.OrderTermOption) ginfilemiddleware.OrderOption + toCursor func(*GinFileMiddleware) Cursor +} // GinFileMiddlewareOrder defines the ordering of GinFileMiddleware. type GinFileMiddlewareOrder struct { @@ -4335,9 +4536,13 @@ type GinFileMiddlewareOrder struct { // DefaultGinFileMiddlewareOrder is the default ordering of GinFileMiddleware. var DefaultGinFileMiddlewareOrder = &GinFileMiddlewareOrder{ - Direction: OrderDirectionAsc, + Direction: entgql.OrderDirectionAsc, Field: &GinFileMiddlewareOrderField{ - field: ginfilemiddleware.FieldID, + Value: func(gfm *GinFileMiddleware) (ent.Value, error) { + return gfm.ID, nil + }, + column: ginfilemiddleware.FieldID, + toTerm: ginfilemiddleware.ByID, toCursor: func(gfm *GinFileMiddleware) Cursor { return Cursor{ID: gfm.ID} }, @@ -4368,6 +4573,44 @@ type HostConnection struct { TotalCount int `json:"totalCount"` } +func (c *HostConnection) build(nodes []*Host, pager *hostPager, after *Cursor, first *int, before *Cursor, last *int) { + c.PageInfo.HasNextPage = before != nil + c.PageInfo.HasPreviousPage = after != nil + if first != nil && *first+1 == len(nodes) { + c.PageInfo.HasNextPage = true + nodes = nodes[:len(nodes)-1] + } else if last != nil && *last+1 == len(nodes) { + c.PageInfo.HasPreviousPage = true + nodes = nodes[:len(nodes)-1] + } + var nodeAt func(int) *Host + if last != nil { + n := len(nodes) - 1 + nodeAt = func(i int) *Host { + return nodes[n-i] + } + } else { + nodeAt = func(i int) *Host { + return nodes[i] + } + } + c.Edges = make([]*HostEdge, len(nodes)) + for i := range nodes { + node := nodeAt(i) + c.Edges[i] = &HostEdge{ + Node: node, + Cursor: pager.toCursor(node), + } + } + if l := len(c.Edges); l > 0 { + c.PageInfo.StartCursor = &c.Edges[0].Cursor + c.PageInfo.EndCursor = &c.Edges[l-1].Cursor + } + if c.TotalCount == 0 { + c.TotalCount = len(nodes) + } +} + // HostPaginateOption enables pagination customization. type HostPaginateOption func(*hostPager) error @@ -4401,12 +4644,13 @@ func WithHostFilter(filter func(*HostQuery) (*HostQuery, error)) HostPaginateOpt } type hostPager struct { - order *HostOrder - filter func(*HostQuery) (*HostQuery, error) + reverse bool + order *HostOrder + filter func(*HostQuery) (*HostQuery, error) } -func newHostPager(opts []HostPaginateOption) (*hostPager, error) { - pager := &hostPager{} +func newHostPager(opts []HostPaginateOption, reverse bool) (*hostPager, error) { + pager := &hostPager{reverse: reverse} for _, opt := range opts { if err := opt(pager); err != nil { return nil, err @@ -4429,28 +4673,48 @@ func (p *hostPager) toCursor(h *Host) Cursor { return p.order.Field.toCursor(h) } -func (p *hostPager) applyCursors(query *HostQuery, after, before *Cursor) *HostQuery { - for _, predicate := range cursorsToPredicates( - p.order.Direction, after, before, - p.order.Field.field, DefaultHostOrder.Field.field, - ) { +func (p *hostPager) applyCursors(query *HostQuery, after, before *Cursor) (*HostQuery, error) { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + for _, predicate := range entgql.CursorsPredicate(after, before, DefaultHostOrder.Field.column, p.order.Field.column, direction) { query = query.Where(predicate) } - return query + return query, nil } -func (p *hostPager) applyOrder(query *HostQuery, reverse bool) *HostQuery { +func (p *hostPager) applyOrder(query *HostQuery) *HostQuery { direction := p.order.Direction - if reverse { - direction = direction.reverse() + if p.reverse { + direction = direction.Reverse() } - query = query.Order(direction.orderFunc(p.order.Field.field)) + query = query.Order(p.order.Field.toTerm(direction.OrderTermOption())) if p.order.Field != DefaultHostOrder.Field { - query = query.Order(direction.orderFunc(DefaultHostOrder.Field.field)) + query = query.Order(DefaultHostOrder.Field.toTerm(direction.OrderTermOption())) + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) } return query } +func (p *hostPager) orderExpr(query *HostQuery) sql.Querier { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) + } + return sql.ExprFunc(func(b *sql.Builder) { + b.Ident(p.order.Field.column).Pad().WriteString(string(direction)) + if p.order.Field != DefaultHostOrder.Field { + b.Comma().Ident(DefaultHostOrder.Field.column).Pad().WriteString(string(direction)) + } + }) +} + // Paginate executes the query and returns a relay based cursor connection to Host. func (h *HostQuery) Paginate( ctx context.Context, after *Cursor, first *int, @@ -4459,98 +4723,54 @@ func (h *HostQuery) Paginate( if err := validateFirstLast(first, last); err != nil { return nil, err } - pager, err := newHostPager(opts) + pager, err := newHostPager(opts, last != nil) if err != nil { return nil, err } - if h, err = pager.applyFilter(h); err != nil { return nil, err } - conn := &HostConnection{Edges: []*HostEdge{}} - if !hasCollectedField(ctx, edgesField) || first != nil && *first == 0 || last != nil && *last == 0 { - if hasCollectedField(ctx, totalCountField) || - hasCollectedField(ctx, pageInfoField) { - count, err := h.Count(ctx) - if err != nil { + ignoredEdges := !hasCollectedField(ctx, edgesField) + if hasCollectedField(ctx, totalCountField) || hasCollectedField(ctx, pageInfoField) { + hasPagination := after != nil || first != nil || before != nil || last != nil + if hasPagination || ignoredEdges { + if conn.TotalCount, err = h.Clone().Count(ctx); err != nil { return nil, err } - conn.TotalCount = count - conn.PageInfo.HasNextPage = first != nil && count > 0 - conn.PageInfo.HasPreviousPage = last != nil && count > 0 - } - return conn, nil - } - - if (after != nil || first != nil || before != nil || last != nil) && hasCollectedField(ctx, totalCountField) { - count, err := h.Clone().Count(ctx) - if err != nil { - return nil, err + conn.PageInfo.HasNextPage = first != nil && conn.TotalCount > 0 + conn.PageInfo.HasPreviousPage = last != nil && conn.TotalCount > 0 } - conn.TotalCount = count } - - h = pager.applyCursors(h, after, before) - h = pager.applyOrder(h, last != nil) - var limit int - if first != nil { - limit = *first + 1 - } else if last != nil { - limit = *last + 1 - } - if limit > 0 { - h = h.Limit(limit) - } - - if field := getCollectedField(ctx, edgesField, nodeField); field != nil { - h = h.collectField(graphql.GetOperationContext(ctx), *field) - } - - nodes, err := h.All(ctx) - if err != nil || len(nodes) == 0 { - return conn, err + if ignoredEdges || (first != nil && *first == 0) || (last != nil && *last == 0) { + return conn, nil } - - if len(nodes) == limit { - conn.PageInfo.HasNextPage = first != nil - conn.PageInfo.HasPreviousPage = last != nil - nodes = nodes[:len(nodes)-1] + if h, err = pager.applyCursors(h, after, before); err != nil { + return nil, err } - - var nodeAt func(int) *Host - if last != nil { - n := len(nodes) - 1 - nodeAt = func(i int) *Host { - return nodes[n-i] - } - } else { - nodeAt = func(i int) *Host { - return nodes[i] - } + if limit := paginateLimit(first, last); limit != 0 { + h.Limit(limit) } - - conn.Edges = make([]*HostEdge, len(nodes)) - for i := range nodes { - node := nodeAt(i) - conn.Edges[i] = &HostEdge{ - Node: node, - Cursor: pager.toCursor(node), + if field := collectedField(ctx, edgesField, nodeField); field != nil { + if err := h.collectField(ctx, graphql.GetOperationContext(ctx), *field, []string{edgesField, nodeField}); err != nil { + return nil, err } } - - conn.PageInfo.StartCursor = &conn.Edges[0].Cursor - conn.PageInfo.EndCursor = &conn.Edges[len(conn.Edges)-1].Cursor - if conn.TotalCount == 0 { - conn.TotalCount = len(nodes) + h = pager.applyOrder(h) + nodes, err := h.All(ctx) + if err != nil { + return nil, err } - + conn.build(nodes, pager, after, first, before, last) return conn, nil } // HostOrderField defines the ordering field of Host. type HostOrderField struct { - field string + // Value extracts the ordering value from the given Host. + Value func(*Host) (ent.Value, error) + column string // field or computed. + toTerm func(...sql.OrderTermOption) host.OrderOption toCursor func(*Host) Cursor } @@ -4562,9 +4782,13 @@ type HostOrder struct { // DefaultHostOrder is the default ordering of Host. var DefaultHostOrder = &HostOrder{ - Direction: OrderDirectionAsc, + Direction: entgql.OrderDirectionAsc, Field: &HostOrderField{ - field: host.FieldID, + Value: func(h *Host) (ent.Value, error) { + return h.ID, nil + }, + column: host.FieldID, + toTerm: host.ByID, toCursor: func(h *Host) Cursor { return Cursor{ID: h.ID} }, @@ -4595,8 +4819,46 @@ type HostDependencyConnection struct { TotalCount int `json:"totalCount"` } +func (c *HostDependencyConnection) build(nodes []*HostDependency, pager *hostdependencyPager, after *Cursor, first *int, before *Cursor, last *int) { + c.PageInfo.HasNextPage = before != nil + c.PageInfo.HasPreviousPage = after != nil + if first != nil && *first+1 == len(nodes) { + c.PageInfo.HasNextPage = true + nodes = nodes[:len(nodes)-1] + } else if last != nil && *last+1 == len(nodes) { + c.PageInfo.HasPreviousPage = true + nodes = nodes[:len(nodes)-1] + } + var nodeAt func(int) *HostDependency + if last != nil { + n := len(nodes) - 1 + nodeAt = func(i int) *HostDependency { + return nodes[n-i] + } + } else { + nodeAt = func(i int) *HostDependency { + return nodes[i] + } + } + c.Edges = make([]*HostDependencyEdge, len(nodes)) + for i := range nodes { + node := nodeAt(i) + c.Edges[i] = &HostDependencyEdge{ + Node: node, + Cursor: pager.toCursor(node), + } + } + if l := len(c.Edges); l > 0 { + c.PageInfo.StartCursor = &c.Edges[0].Cursor + c.PageInfo.EndCursor = &c.Edges[l-1].Cursor + } + if c.TotalCount == 0 { + c.TotalCount = len(nodes) + } +} + // HostDependencyPaginateOption enables pagination customization. -type HostDependencyPaginateOption func(*hostDependencyPager) error +type HostDependencyPaginateOption func(*hostdependencyPager) error // WithHostDependencyOrder configures pagination ordering. func WithHostDependencyOrder(order *HostDependencyOrder) HostDependencyPaginateOption { @@ -4604,7 +4866,7 @@ func WithHostDependencyOrder(order *HostDependencyOrder) HostDependencyPaginateO order = DefaultHostDependencyOrder } o := *order - return func(pager *hostDependencyPager) error { + return func(pager *hostdependencyPager) error { if err := o.Direction.Validate(); err != nil { return err } @@ -4618,7 +4880,7 @@ func WithHostDependencyOrder(order *HostDependencyOrder) HostDependencyPaginateO // WithHostDependencyFilter configures pagination filter. func WithHostDependencyFilter(filter func(*HostDependencyQuery) (*HostDependencyQuery, error)) HostDependencyPaginateOption { - return func(pager *hostDependencyPager) error { + return func(pager *hostdependencyPager) error { if filter == nil { return errors.New("HostDependencyQuery filter cannot be nil") } @@ -4627,13 +4889,14 @@ func WithHostDependencyFilter(filter func(*HostDependencyQuery) (*HostDependency } } -type hostDependencyPager struct { - order *HostDependencyOrder - filter func(*HostDependencyQuery) (*HostDependencyQuery, error) +type hostdependencyPager struct { + reverse bool + order *HostDependencyOrder + filter func(*HostDependencyQuery) (*HostDependencyQuery, error) } -func newHostDependencyPager(opts []HostDependencyPaginateOption) (*hostDependencyPager, error) { - pager := &hostDependencyPager{} +func newHostDependencyPager(opts []HostDependencyPaginateOption, reverse bool) (*hostdependencyPager, error) { + pager := &hostdependencyPager{reverse: reverse} for _, opt := range opts { if err := opt(pager); err != nil { return nil, err @@ -4645,39 +4908,59 @@ func newHostDependencyPager(opts []HostDependencyPaginateOption) (*hostDependenc return pager, nil } -func (p *hostDependencyPager) applyFilter(query *HostDependencyQuery) (*HostDependencyQuery, error) { +func (p *hostdependencyPager) applyFilter(query *HostDependencyQuery) (*HostDependencyQuery, error) { if p.filter != nil { return p.filter(query) } return query, nil } -func (p *hostDependencyPager) toCursor(hd *HostDependency) Cursor { +func (p *hostdependencyPager) toCursor(hd *HostDependency) Cursor { return p.order.Field.toCursor(hd) } -func (p *hostDependencyPager) applyCursors(query *HostDependencyQuery, after, before *Cursor) *HostDependencyQuery { - for _, predicate := range cursorsToPredicates( - p.order.Direction, after, before, - p.order.Field.field, DefaultHostDependencyOrder.Field.field, - ) { +func (p *hostdependencyPager) applyCursors(query *HostDependencyQuery, after, before *Cursor) (*HostDependencyQuery, error) { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + for _, predicate := range entgql.CursorsPredicate(after, before, DefaultHostDependencyOrder.Field.column, p.order.Field.column, direction) { query = query.Where(predicate) } - return query + return query, nil } -func (p *hostDependencyPager) applyOrder(query *HostDependencyQuery, reverse bool) *HostDependencyQuery { +func (p *hostdependencyPager) applyOrder(query *HostDependencyQuery) *HostDependencyQuery { direction := p.order.Direction - if reverse { - direction = direction.reverse() + if p.reverse { + direction = direction.Reverse() } - query = query.Order(direction.orderFunc(p.order.Field.field)) + query = query.Order(p.order.Field.toTerm(direction.OrderTermOption())) if p.order.Field != DefaultHostDependencyOrder.Field { - query = query.Order(direction.orderFunc(DefaultHostDependencyOrder.Field.field)) + query = query.Order(DefaultHostDependencyOrder.Field.toTerm(direction.OrderTermOption())) + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) } return query } +func (p *hostdependencyPager) orderExpr(query *HostDependencyQuery) sql.Querier { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) + } + return sql.ExprFunc(func(b *sql.Builder) { + b.Ident(p.order.Field.column).Pad().WriteString(string(direction)) + if p.order.Field != DefaultHostDependencyOrder.Field { + b.Comma().Ident(DefaultHostDependencyOrder.Field.column).Pad().WriteString(string(direction)) + } + }) +} + // Paginate executes the query and returns a relay based cursor connection to HostDependency. func (hd *HostDependencyQuery) Paginate( ctx context.Context, after *Cursor, first *int, @@ -4686,98 +4969,54 @@ func (hd *HostDependencyQuery) Paginate( if err := validateFirstLast(first, last); err != nil { return nil, err } - pager, err := newHostDependencyPager(opts) + pager, err := newHostDependencyPager(opts, last != nil) if err != nil { return nil, err } - if hd, err = pager.applyFilter(hd); err != nil { return nil, err } - conn := &HostDependencyConnection{Edges: []*HostDependencyEdge{}} - if !hasCollectedField(ctx, edgesField) || first != nil && *first == 0 || last != nil && *last == 0 { - if hasCollectedField(ctx, totalCountField) || - hasCollectedField(ctx, pageInfoField) { - count, err := hd.Count(ctx) - if err != nil { + ignoredEdges := !hasCollectedField(ctx, edgesField) + if hasCollectedField(ctx, totalCountField) || hasCollectedField(ctx, pageInfoField) { + hasPagination := after != nil || first != nil || before != nil || last != nil + if hasPagination || ignoredEdges { + if conn.TotalCount, err = hd.Clone().Count(ctx); err != nil { return nil, err } - conn.TotalCount = count - conn.PageInfo.HasNextPage = first != nil && count > 0 - conn.PageInfo.HasPreviousPage = last != nil && count > 0 - } - return conn, nil - } - - if (after != nil || first != nil || before != nil || last != nil) && hasCollectedField(ctx, totalCountField) { - count, err := hd.Clone().Count(ctx) - if err != nil { - return nil, err + conn.PageInfo.HasNextPage = first != nil && conn.TotalCount > 0 + conn.PageInfo.HasPreviousPage = last != nil && conn.TotalCount > 0 } - conn.TotalCount = count - } - - hd = pager.applyCursors(hd, after, before) - hd = pager.applyOrder(hd, last != nil) - var limit int - if first != nil { - limit = *first + 1 - } else if last != nil { - limit = *last + 1 } - if limit > 0 { - hd = hd.Limit(limit) - } - - if field := getCollectedField(ctx, edgesField, nodeField); field != nil { - hd = hd.collectField(graphql.GetOperationContext(ctx), *field) - } - - nodes, err := hd.All(ctx) - if err != nil || len(nodes) == 0 { - return conn, err + if ignoredEdges || (first != nil && *first == 0) || (last != nil && *last == 0) { + return conn, nil } - - if len(nodes) == limit { - conn.PageInfo.HasNextPage = first != nil - conn.PageInfo.HasPreviousPage = last != nil - nodes = nodes[:len(nodes)-1] + if hd, err = pager.applyCursors(hd, after, before); err != nil { + return nil, err } - - var nodeAt func(int) *HostDependency - if last != nil { - n := len(nodes) - 1 - nodeAt = func(i int) *HostDependency { - return nodes[n-i] - } - } else { - nodeAt = func(i int) *HostDependency { - return nodes[i] - } + if limit := paginateLimit(first, last); limit != 0 { + hd.Limit(limit) } - - conn.Edges = make([]*HostDependencyEdge, len(nodes)) - for i := range nodes { - node := nodeAt(i) - conn.Edges[i] = &HostDependencyEdge{ - Node: node, - Cursor: pager.toCursor(node), + if field := collectedField(ctx, edgesField, nodeField); field != nil { + if err := hd.collectField(ctx, graphql.GetOperationContext(ctx), *field, []string{edgesField, nodeField}); err != nil { + return nil, err } } - - conn.PageInfo.StartCursor = &conn.Edges[0].Cursor - conn.PageInfo.EndCursor = &conn.Edges[len(conn.Edges)-1].Cursor - if conn.TotalCount == 0 { - conn.TotalCount = len(nodes) + hd = pager.applyOrder(hd) + nodes, err := hd.All(ctx) + if err != nil { + return nil, err } - + conn.build(nodes, pager, after, first, before, last) return conn, nil } // HostDependencyOrderField defines the ordering field of HostDependency. type HostDependencyOrderField struct { - field string + // Value extracts the ordering value from the given HostDependency. + Value func(*HostDependency) (ent.Value, error) + column string // field or computed. + toTerm func(...sql.OrderTermOption) hostdependency.OrderOption toCursor func(*HostDependency) Cursor } @@ -4789,9 +5028,13 @@ type HostDependencyOrder struct { // DefaultHostDependencyOrder is the default ordering of HostDependency. var DefaultHostDependencyOrder = &HostDependencyOrder{ - Direction: OrderDirectionAsc, + Direction: entgql.OrderDirectionAsc, Field: &HostDependencyOrderField{ - field: hostdependency.FieldID, + Value: func(hd *HostDependency) (ent.Value, error) { + return hd.ID, nil + }, + column: hostdependency.FieldID, + toTerm: hostdependency.ByID, toCursor: func(hd *HostDependency) Cursor { return Cursor{ID: hd.ID} }, @@ -4822,6 +5065,44 @@ type IdentityConnection struct { TotalCount int `json:"totalCount"` } +func (c *IdentityConnection) build(nodes []*Identity, pager *identityPager, after *Cursor, first *int, before *Cursor, last *int) { + c.PageInfo.HasNextPage = before != nil + c.PageInfo.HasPreviousPage = after != nil + if first != nil && *first+1 == len(nodes) { + c.PageInfo.HasNextPage = true + nodes = nodes[:len(nodes)-1] + } else if last != nil && *last+1 == len(nodes) { + c.PageInfo.HasPreviousPage = true + nodes = nodes[:len(nodes)-1] + } + var nodeAt func(int) *Identity + if last != nil { + n := len(nodes) - 1 + nodeAt = func(i int) *Identity { + return nodes[n-i] + } + } else { + nodeAt = func(i int) *Identity { + return nodes[i] + } + } + c.Edges = make([]*IdentityEdge, len(nodes)) + for i := range nodes { + node := nodeAt(i) + c.Edges[i] = &IdentityEdge{ + Node: node, + Cursor: pager.toCursor(node), + } + } + if l := len(c.Edges); l > 0 { + c.PageInfo.StartCursor = &c.Edges[0].Cursor + c.PageInfo.EndCursor = &c.Edges[l-1].Cursor + } + if c.TotalCount == 0 { + c.TotalCount = len(nodes) + } +} + // IdentityPaginateOption enables pagination customization. type IdentityPaginateOption func(*identityPager) error @@ -4855,12 +5136,13 @@ func WithIdentityFilter(filter func(*IdentityQuery) (*IdentityQuery, error)) Ide } type identityPager struct { - order *IdentityOrder - filter func(*IdentityQuery) (*IdentityQuery, error) + reverse bool + order *IdentityOrder + filter func(*IdentityQuery) (*IdentityQuery, error) } -func newIdentityPager(opts []IdentityPaginateOption) (*identityPager, error) { - pager := &identityPager{} +func newIdentityPager(opts []IdentityPaginateOption, reverse bool) (*identityPager, error) { + pager := &identityPager{reverse: reverse} for _, opt := range opts { if err := opt(pager); err != nil { return nil, err @@ -4883,28 +5165,48 @@ func (p *identityPager) toCursor(i *Identity) Cursor { return p.order.Field.toCursor(i) } -func (p *identityPager) applyCursors(query *IdentityQuery, after, before *Cursor) *IdentityQuery { - for _, predicate := range cursorsToPredicates( - p.order.Direction, after, before, - p.order.Field.field, DefaultIdentityOrder.Field.field, - ) { +func (p *identityPager) applyCursors(query *IdentityQuery, after, before *Cursor) (*IdentityQuery, error) { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + for _, predicate := range entgql.CursorsPredicate(after, before, DefaultIdentityOrder.Field.column, p.order.Field.column, direction) { query = query.Where(predicate) } - return query + return query, nil } -func (p *identityPager) applyOrder(query *IdentityQuery, reverse bool) *IdentityQuery { +func (p *identityPager) applyOrder(query *IdentityQuery) *IdentityQuery { direction := p.order.Direction - if reverse { - direction = direction.reverse() + if p.reverse { + direction = direction.Reverse() } - query = query.Order(direction.orderFunc(p.order.Field.field)) + query = query.Order(p.order.Field.toTerm(direction.OrderTermOption())) if p.order.Field != DefaultIdentityOrder.Field { - query = query.Order(direction.orderFunc(DefaultIdentityOrder.Field.field)) + query = query.Order(DefaultIdentityOrder.Field.toTerm(direction.OrderTermOption())) + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) } return query } +func (p *identityPager) orderExpr(query *IdentityQuery) sql.Querier { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) + } + return sql.ExprFunc(func(b *sql.Builder) { + b.Ident(p.order.Field.column).Pad().WriteString(string(direction)) + if p.order.Field != DefaultIdentityOrder.Field { + b.Comma().Ident(DefaultIdentityOrder.Field.column).Pad().WriteString(string(direction)) + } + }) +} + // Paginate executes the query and returns a relay based cursor connection to Identity. func (i *IdentityQuery) Paginate( ctx context.Context, after *Cursor, first *int, @@ -4913,100 +5215,56 @@ func (i *IdentityQuery) Paginate( if err := validateFirstLast(first, last); err != nil { return nil, err } - pager, err := newIdentityPager(opts) + pager, err := newIdentityPager(opts, last != nil) if err != nil { return nil, err } - if i, err = pager.applyFilter(i); err != nil { return nil, err } - conn := &IdentityConnection{Edges: []*IdentityEdge{}} - if !hasCollectedField(ctx, edgesField) || first != nil && *first == 0 || last != nil && *last == 0 { - if hasCollectedField(ctx, totalCountField) || - hasCollectedField(ctx, pageInfoField) { - count, err := i.Count(ctx) - if err != nil { + ignoredEdges := !hasCollectedField(ctx, edgesField) + if hasCollectedField(ctx, totalCountField) || hasCollectedField(ctx, pageInfoField) { + hasPagination := after != nil || first != nil || before != nil || last != nil + if hasPagination || ignoredEdges { + if conn.TotalCount, err = i.Clone().Count(ctx); err != nil { return nil, err } - conn.TotalCount = count - conn.PageInfo.HasNextPage = first != nil && count > 0 - conn.PageInfo.HasPreviousPage = last != nil && count > 0 + conn.PageInfo.HasNextPage = first != nil && conn.TotalCount > 0 + conn.PageInfo.HasPreviousPage = last != nil && conn.TotalCount > 0 } - return conn, nil } - - if (after != nil || first != nil || before != nil || last != nil) && hasCollectedField(ctx, totalCountField) { - count, err := i.Clone().Count(ctx) - if err != nil { - return nil, err - } - conn.TotalCount = count + if ignoredEdges || (first != nil && *first == 0) || (last != nil && *last == 0) { + return conn, nil } - - i = pager.applyCursors(i, after, before) - i = pager.applyOrder(i, last != nil) - var limit int - if first != nil { - limit = *first + 1 - } else if last != nil { - limit = *last + 1 + if i, err = pager.applyCursors(i, after, before); err != nil { + return nil, err } - if limit > 0 { - i = i.Limit(limit) + if limit := paginateLimit(first, last); limit != 0 { + i.Limit(limit) } - - if field := getCollectedField(ctx, edgesField, nodeField); field != nil { - i = i.collectField(graphql.GetOperationContext(ctx), *field) + if field := collectedField(ctx, edgesField, nodeField); field != nil { + if err := i.collectField(ctx, graphql.GetOperationContext(ctx), *field, []string{edgesField, nodeField}); err != nil { + return nil, err + } } - + i = pager.applyOrder(i) nodes, err := i.All(ctx) - if err != nil || len(nodes) == 0 { - return conn, err + if err != nil { + return nil, err } + conn.build(nodes, pager, after, first, before, last) + return conn, nil +} - if len(nodes) == limit { - conn.PageInfo.HasNextPage = first != nil - conn.PageInfo.HasPreviousPage = last != nil - nodes = nodes[:len(nodes)-1] - } - - var nodeAt func(int) *Identity - if last != nil { - n := len(nodes) - 1 - nodeAt = func(i int) *Identity { - return nodes[n-i] - } - } else { - nodeAt = func(i int) *Identity { - return nodes[i] - } - } - - conn.Edges = make([]*IdentityEdge, len(nodes)) - for i := range nodes { - node := nodeAt(i) - conn.Edges[i] = &IdentityEdge{ - Node: node, - Cursor: pager.toCursor(node), - } - } - - conn.PageInfo.StartCursor = &conn.Edges[0].Cursor - conn.PageInfo.EndCursor = &conn.Edges[len(conn.Edges)-1].Cursor - if conn.TotalCount == 0 { - conn.TotalCount = len(nodes) - } - - return conn, nil -} - -// IdentityOrderField defines the ordering field of Identity. -type IdentityOrderField struct { - field string - toCursor func(*Identity) Cursor -} +// IdentityOrderField defines the ordering field of Identity. +type IdentityOrderField struct { + // Value extracts the ordering value from the given Identity. + Value func(*Identity) (ent.Value, error) + column string // field or computed. + toTerm func(...sql.OrderTermOption) identity.OrderOption + toCursor func(*Identity) Cursor +} // IdentityOrder defines the ordering of Identity. type IdentityOrder struct { @@ -5016,9 +5274,13 @@ type IdentityOrder struct { // DefaultIdentityOrder is the default ordering of Identity. var DefaultIdentityOrder = &IdentityOrder{ - Direction: OrderDirectionAsc, + Direction: entgql.OrderDirectionAsc, Field: &IdentityOrderField{ - field: identity.FieldID, + Value: func(i *Identity) (ent.Value, error) { + return i.ID, nil + }, + column: identity.FieldID, + toTerm: identity.ByID, toCursor: func(i *Identity) Cursor { return Cursor{ID: i.ID} }, @@ -5049,8 +5311,46 @@ type IncludedNetworkConnection struct { TotalCount int `json:"totalCount"` } +func (c *IncludedNetworkConnection) build(nodes []*IncludedNetwork, pager *includednetworkPager, after *Cursor, first *int, before *Cursor, last *int) { + c.PageInfo.HasNextPage = before != nil + c.PageInfo.HasPreviousPage = after != nil + if first != nil && *first+1 == len(nodes) { + c.PageInfo.HasNextPage = true + nodes = nodes[:len(nodes)-1] + } else if last != nil && *last+1 == len(nodes) { + c.PageInfo.HasPreviousPage = true + nodes = nodes[:len(nodes)-1] + } + var nodeAt func(int) *IncludedNetwork + if last != nil { + n := len(nodes) - 1 + nodeAt = func(i int) *IncludedNetwork { + return nodes[n-i] + } + } else { + nodeAt = func(i int) *IncludedNetwork { + return nodes[i] + } + } + c.Edges = make([]*IncludedNetworkEdge, len(nodes)) + for i := range nodes { + node := nodeAt(i) + c.Edges[i] = &IncludedNetworkEdge{ + Node: node, + Cursor: pager.toCursor(node), + } + } + if l := len(c.Edges); l > 0 { + c.PageInfo.StartCursor = &c.Edges[0].Cursor + c.PageInfo.EndCursor = &c.Edges[l-1].Cursor + } + if c.TotalCount == 0 { + c.TotalCount = len(nodes) + } +} + // IncludedNetworkPaginateOption enables pagination customization. -type IncludedNetworkPaginateOption func(*includedNetworkPager) error +type IncludedNetworkPaginateOption func(*includednetworkPager) error // WithIncludedNetworkOrder configures pagination ordering. func WithIncludedNetworkOrder(order *IncludedNetworkOrder) IncludedNetworkPaginateOption { @@ -5058,7 +5358,7 @@ func WithIncludedNetworkOrder(order *IncludedNetworkOrder) IncludedNetworkPagina order = DefaultIncludedNetworkOrder } o := *order - return func(pager *includedNetworkPager) error { + return func(pager *includednetworkPager) error { if err := o.Direction.Validate(); err != nil { return err } @@ -5072,7 +5372,7 @@ func WithIncludedNetworkOrder(order *IncludedNetworkOrder) IncludedNetworkPagina // WithIncludedNetworkFilter configures pagination filter. func WithIncludedNetworkFilter(filter func(*IncludedNetworkQuery) (*IncludedNetworkQuery, error)) IncludedNetworkPaginateOption { - return func(pager *includedNetworkPager) error { + return func(pager *includednetworkPager) error { if filter == nil { return errors.New("IncludedNetworkQuery filter cannot be nil") } @@ -5081,13 +5381,14 @@ func WithIncludedNetworkFilter(filter func(*IncludedNetworkQuery) (*IncludedNetw } } -type includedNetworkPager struct { - order *IncludedNetworkOrder - filter func(*IncludedNetworkQuery) (*IncludedNetworkQuery, error) +type includednetworkPager struct { + reverse bool + order *IncludedNetworkOrder + filter func(*IncludedNetworkQuery) (*IncludedNetworkQuery, error) } -func newIncludedNetworkPager(opts []IncludedNetworkPaginateOption) (*includedNetworkPager, error) { - pager := &includedNetworkPager{} +func newIncludedNetworkPager(opts []IncludedNetworkPaginateOption, reverse bool) (*includednetworkPager, error) { + pager := &includednetworkPager{reverse: reverse} for _, opt := range opts { if err := opt(pager); err != nil { return nil, err @@ -5099,39 +5400,59 @@ func newIncludedNetworkPager(opts []IncludedNetworkPaginateOption) (*includedNet return pager, nil } -func (p *includedNetworkPager) applyFilter(query *IncludedNetworkQuery) (*IncludedNetworkQuery, error) { +func (p *includednetworkPager) applyFilter(query *IncludedNetworkQuery) (*IncludedNetworkQuery, error) { if p.filter != nil { return p.filter(query) } return query, nil } -func (p *includedNetworkPager) toCursor(in *IncludedNetwork) Cursor { +func (p *includednetworkPager) toCursor(in *IncludedNetwork) Cursor { return p.order.Field.toCursor(in) } -func (p *includedNetworkPager) applyCursors(query *IncludedNetworkQuery, after, before *Cursor) *IncludedNetworkQuery { - for _, predicate := range cursorsToPredicates( - p.order.Direction, after, before, - p.order.Field.field, DefaultIncludedNetworkOrder.Field.field, - ) { +func (p *includednetworkPager) applyCursors(query *IncludedNetworkQuery, after, before *Cursor) (*IncludedNetworkQuery, error) { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + for _, predicate := range entgql.CursorsPredicate(after, before, DefaultIncludedNetworkOrder.Field.column, p.order.Field.column, direction) { query = query.Where(predicate) } - return query + return query, nil } -func (p *includedNetworkPager) applyOrder(query *IncludedNetworkQuery, reverse bool) *IncludedNetworkQuery { +func (p *includednetworkPager) applyOrder(query *IncludedNetworkQuery) *IncludedNetworkQuery { direction := p.order.Direction - if reverse { - direction = direction.reverse() + if p.reverse { + direction = direction.Reverse() } - query = query.Order(direction.orderFunc(p.order.Field.field)) + query = query.Order(p.order.Field.toTerm(direction.OrderTermOption())) if p.order.Field != DefaultIncludedNetworkOrder.Field { - query = query.Order(direction.orderFunc(DefaultIncludedNetworkOrder.Field.field)) + query = query.Order(DefaultIncludedNetworkOrder.Field.toTerm(direction.OrderTermOption())) + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) } return query } +func (p *includednetworkPager) orderExpr(query *IncludedNetworkQuery) sql.Querier { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) + } + return sql.ExprFunc(func(b *sql.Builder) { + b.Ident(p.order.Field.column).Pad().WriteString(string(direction)) + if p.order.Field != DefaultIncludedNetworkOrder.Field { + b.Comma().Ident(DefaultIncludedNetworkOrder.Field.column).Pad().WriteString(string(direction)) + } + }) +} + // Paginate executes the query and returns a relay based cursor connection to IncludedNetwork. func (in *IncludedNetworkQuery) Paginate( ctx context.Context, after *Cursor, first *int, @@ -5140,98 +5461,54 @@ func (in *IncludedNetworkQuery) Paginate( if err := validateFirstLast(first, last); err != nil { return nil, err } - pager, err := newIncludedNetworkPager(opts) + pager, err := newIncludedNetworkPager(opts, last != nil) if err != nil { return nil, err } - if in, err = pager.applyFilter(in); err != nil { return nil, err } - conn := &IncludedNetworkConnection{Edges: []*IncludedNetworkEdge{}} - if !hasCollectedField(ctx, edgesField) || first != nil && *first == 0 || last != nil && *last == 0 { - if hasCollectedField(ctx, totalCountField) || - hasCollectedField(ctx, pageInfoField) { - count, err := in.Count(ctx) - if err != nil { + ignoredEdges := !hasCollectedField(ctx, edgesField) + if hasCollectedField(ctx, totalCountField) || hasCollectedField(ctx, pageInfoField) { + hasPagination := after != nil || first != nil || before != nil || last != nil + if hasPagination || ignoredEdges { + if conn.TotalCount, err = in.Clone().Count(ctx); err != nil { return nil, err } - conn.TotalCount = count - conn.PageInfo.HasNextPage = first != nil && count > 0 - conn.PageInfo.HasPreviousPage = last != nil && count > 0 - } - return conn, nil - } - - if (after != nil || first != nil || before != nil || last != nil) && hasCollectedField(ctx, totalCountField) { - count, err := in.Clone().Count(ctx) - if err != nil { - return nil, err + conn.PageInfo.HasNextPage = first != nil && conn.TotalCount > 0 + conn.PageInfo.HasPreviousPage = last != nil && conn.TotalCount > 0 } - conn.TotalCount = count - } - - in = pager.applyCursors(in, after, before) - in = pager.applyOrder(in, last != nil) - var limit int - if first != nil { - limit = *first + 1 - } else if last != nil { - limit = *last + 1 - } - if limit > 0 { - in = in.Limit(limit) - } - - if field := getCollectedField(ctx, edgesField, nodeField); field != nil { - in = in.collectField(graphql.GetOperationContext(ctx), *field) } - - nodes, err := in.All(ctx) - if err != nil || len(nodes) == 0 { - return conn, err + if ignoredEdges || (first != nil && *first == 0) || (last != nil && *last == 0) { + return conn, nil } - - if len(nodes) == limit { - conn.PageInfo.HasNextPage = first != nil - conn.PageInfo.HasPreviousPage = last != nil - nodes = nodes[:len(nodes)-1] + if in, err = pager.applyCursors(in, after, before); err != nil { + return nil, err } - - var nodeAt func(int) *IncludedNetwork - if last != nil { - n := len(nodes) - 1 - nodeAt = func(i int) *IncludedNetwork { - return nodes[n-i] - } - } else { - nodeAt = func(i int) *IncludedNetwork { - return nodes[i] - } + if limit := paginateLimit(first, last); limit != 0 { + in.Limit(limit) } - - conn.Edges = make([]*IncludedNetworkEdge, len(nodes)) - for i := range nodes { - node := nodeAt(i) - conn.Edges[i] = &IncludedNetworkEdge{ - Node: node, - Cursor: pager.toCursor(node), + if field := collectedField(ctx, edgesField, nodeField); field != nil { + if err := in.collectField(ctx, graphql.GetOperationContext(ctx), *field, []string{edgesField, nodeField}); err != nil { + return nil, err } } - - conn.PageInfo.StartCursor = &conn.Edges[0].Cursor - conn.PageInfo.EndCursor = &conn.Edges[len(conn.Edges)-1].Cursor - if conn.TotalCount == 0 { - conn.TotalCount = len(nodes) + in = pager.applyOrder(in) + nodes, err := in.All(ctx) + if err != nil { + return nil, err } - + conn.build(nodes, pager, after, first, before, last) return conn, nil } // IncludedNetworkOrderField defines the ordering field of IncludedNetwork. type IncludedNetworkOrderField struct { - field string + // Value extracts the ordering value from the given IncludedNetwork. + Value func(*IncludedNetwork) (ent.Value, error) + column string // field or computed. + toTerm func(...sql.OrderTermOption) includednetwork.OrderOption toCursor func(*IncludedNetwork) Cursor } @@ -5243,9 +5520,13 @@ type IncludedNetworkOrder struct { // DefaultIncludedNetworkOrder is the default ordering of IncludedNetwork. var DefaultIncludedNetworkOrder = &IncludedNetworkOrder{ - Direction: OrderDirectionAsc, + Direction: entgql.OrderDirectionAsc, Field: &IncludedNetworkOrderField{ - field: includednetwork.FieldID, + Value: func(in *IncludedNetwork) (ent.Value, error) { + return in.ID, nil + }, + column: includednetwork.FieldID, + toTerm: includednetwork.ByID, toCursor: func(in *IncludedNetwork) Cursor { return Cursor{ID: in.ID} }, @@ -5276,6 +5557,44 @@ type NetworkConnection struct { TotalCount int `json:"totalCount"` } +func (c *NetworkConnection) build(nodes []*Network, pager *networkPager, after *Cursor, first *int, before *Cursor, last *int) { + c.PageInfo.HasNextPage = before != nil + c.PageInfo.HasPreviousPage = after != nil + if first != nil && *first+1 == len(nodes) { + c.PageInfo.HasNextPage = true + nodes = nodes[:len(nodes)-1] + } else if last != nil && *last+1 == len(nodes) { + c.PageInfo.HasPreviousPage = true + nodes = nodes[:len(nodes)-1] + } + var nodeAt func(int) *Network + if last != nil { + n := len(nodes) - 1 + nodeAt = func(i int) *Network { + return nodes[n-i] + } + } else { + nodeAt = func(i int) *Network { + return nodes[i] + } + } + c.Edges = make([]*NetworkEdge, len(nodes)) + for i := range nodes { + node := nodeAt(i) + c.Edges[i] = &NetworkEdge{ + Node: node, + Cursor: pager.toCursor(node), + } + } + if l := len(c.Edges); l > 0 { + c.PageInfo.StartCursor = &c.Edges[0].Cursor + c.PageInfo.EndCursor = &c.Edges[l-1].Cursor + } + if c.TotalCount == 0 { + c.TotalCount = len(nodes) + } +} + // NetworkPaginateOption enables pagination customization. type NetworkPaginateOption func(*networkPager) error @@ -5309,12 +5628,13 @@ func WithNetworkFilter(filter func(*NetworkQuery) (*NetworkQuery, error)) Networ } type networkPager struct { - order *NetworkOrder - filter func(*NetworkQuery) (*NetworkQuery, error) + reverse bool + order *NetworkOrder + filter func(*NetworkQuery) (*NetworkQuery, error) } -func newNetworkPager(opts []NetworkPaginateOption) (*networkPager, error) { - pager := &networkPager{} +func newNetworkPager(opts []NetworkPaginateOption, reverse bool) (*networkPager, error) { + pager := &networkPager{reverse: reverse} for _, opt := range opts { if err := opt(pager); err != nil { return nil, err @@ -5337,28 +5657,48 @@ func (p *networkPager) toCursor(n *Network) Cursor { return p.order.Field.toCursor(n) } -func (p *networkPager) applyCursors(query *NetworkQuery, after, before *Cursor) *NetworkQuery { - for _, predicate := range cursorsToPredicates( - p.order.Direction, after, before, - p.order.Field.field, DefaultNetworkOrder.Field.field, - ) { +func (p *networkPager) applyCursors(query *NetworkQuery, after, before *Cursor) (*NetworkQuery, error) { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + for _, predicate := range entgql.CursorsPredicate(after, before, DefaultNetworkOrder.Field.column, p.order.Field.column, direction) { query = query.Where(predicate) } - return query + return query, nil } -func (p *networkPager) applyOrder(query *NetworkQuery, reverse bool) *NetworkQuery { +func (p *networkPager) applyOrder(query *NetworkQuery) *NetworkQuery { direction := p.order.Direction - if reverse { - direction = direction.reverse() + if p.reverse { + direction = direction.Reverse() } - query = query.Order(direction.orderFunc(p.order.Field.field)) + query = query.Order(p.order.Field.toTerm(direction.OrderTermOption())) if p.order.Field != DefaultNetworkOrder.Field { - query = query.Order(direction.orderFunc(DefaultNetworkOrder.Field.field)) + query = query.Order(DefaultNetworkOrder.Field.toTerm(direction.OrderTermOption())) + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) } return query } +func (p *networkPager) orderExpr(query *NetworkQuery) sql.Querier { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) + } + return sql.ExprFunc(func(b *sql.Builder) { + b.Ident(p.order.Field.column).Pad().WriteString(string(direction)) + if p.order.Field != DefaultNetworkOrder.Field { + b.Comma().Ident(DefaultNetworkOrder.Field.column).Pad().WriteString(string(direction)) + } + }) +} + // Paginate executes the query and returns a relay based cursor connection to Network. func (n *NetworkQuery) Paginate( ctx context.Context, after *Cursor, first *int, @@ -5367,98 +5707,54 @@ func (n *NetworkQuery) Paginate( if err := validateFirstLast(first, last); err != nil { return nil, err } - pager, err := newNetworkPager(opts) + pager, err := newNetworkPager(opts, last != nil) if err != nil { return nil, err } - if n, err = pager.applyFilter(n); err != nil { return nil, err } - conn := &NetworkConnection{Edges: []*NetworkEdge{}} - if !hasCollectedField(ctx, edgesField) || first != nil && *first == 0 || last != nil && *last == 0 { - if hasCollectedField(ctx, totalCountField) || - hasCollectedField(ctx, pageInfoField) { - count, err := n.Count(ctx) - if err != nil { + ignoredEdges := !hasCollectedField(ctx, edgesField) + if hasCollectedField(ctx, totalCountField) || hasCollectedField(ctx, pageInfoField) { + hasPagination := after != nil || first != nil || before != nil || last != nil + if hasPagination || ignoredEdges { + if conn.TotalCount, err = n.Clone().Count(ctx); err != nil { return nil, err } - conn.TotalCount = count - conn.PageInfo.HasNextPage = first != nil && count > 0 - conn.PageInfo.HasPreviousPage = last != nil && count > 0 - } - return conn, nil - } - - if (after != nil || first != nil || before != nil || last != nil) && hasCollectedField(ctx, totalCountField) { - count, err := n.Clone().Count(ctx) - if err != nil { - return nil, err + conn.PageInfo.HasNextPage = first != nil && conn.TotalCount > 0 + conn.PageInfo.HasPreviousPage = last != nil && conn.TotalCount > 0 } - conn.TotalCount = count } - - n = pager.applyCursors(n, after, before) - n = pager.applyOrder(n, last != nil) - var limit int - if first != nil { - limit = *first + 1 - } else if last != nil { - limit = *last + 1 - } - if limit > 0 { - n = n.Limit(limit) - } - - if field := getCollectedField(ctx, edgesField, nodeField); field != nil { - n = n.collectField(graphql.GetOperationContext(ctx), *field) - } - - nodes, err := n.All(ctx) - if err != nil || len(nodes) == 0 { - return conn, err + if ignoredEdges || (first != nil && *first == 0) || (last != nil && *last == 0) { + return conn, nil } - - if len(nodes) == limit { - conn.PageInfo.HasNextPage = first != nil - conn.PageInfo.HasPreviousPage = last != nil - nodes = nodes[:len(nodes)-1] + if n, err = pager.applyCursors(n, after, before); err != nil { + return nil, err } - - var nodeAt func(int) *Network - if last != nil { - n := len(nodes) - 1 - nodeAt = func(i int) *Network { - return nodes[n-i] - } - } else { - nodeAt = func(i int) *Network { - return nodes[i] - } + if limit := paginateLimit(first, last); limit != 0 { + n.Limit(limit) } - - conn.Edges = make([]*NetworkEdge, len(nodes)) - for i := range nodes { - node := nodeAt(i) - conn.Edges[i] = &NetworkEdge{ - Node: node, - Cursor: pager.toCursor(node), + if field := collectedField(ctx, edgesField, nodeField); field != nil { + if err := n.collectField(ctx, graphql.GetOperationContext(ctx), *field, []string{edgesField, nodeField}); err != nil { + return nil, err } } - - conn.PageInfo.StartCursor = &conn.Edges[0].Cursor - conn.PageInfo.EndCursor = &conn.Edges[len(conn.Edges)-1].Cursor - if conn.TotalCount == 0 { - conn.TotalCount = len(nodes) + n = pager.applyOrder(n) + nodes, err := n.All(ctx) + if err != nil { + return nil, err } - + conn.build(nodes, pager, after, first, before, last) return conn, nil } // NetworkOrderField defines the ordering field of Network. type NetworkOrderField struct { - field string + // Value extracts the ordering value from the given Network. + Value func(*Network) (ent.Value, error) + column string // field or computed. + toTerm func(...sql.OrderTermOption) network.OrderOption toCursor func(*Network) Cursor } @@ -5470,9 +5766,13 @@ type NetworkOrder struct { // DefaultNetworkOrder is the default ordering of Network. var DefaultNetworkOrder = &NetworkOrder{ - Direction: OrderDirectionAsc, + Direction: entgql.OrderDirectionAsc, Field: &NetworkOrderField{ - field: network.FieldID, + Value: func(n *Network) (ent.Value, error) { + return n.ID, nil + }, + column: network.FieldID, + toTerm: network.ByID, toCursor: func(n *Network) Cursor { return Cursor{ID: n.ID} }, @@ -5503,6 +5803,44 @@ type PlanConnection struct { TotalCount int `json:"totalCount"` } +func (c *PlanConnection) build(nodes []*Plan, pager *planPager, after *Cursor, first *int, before *Cursor, last *int) { + c.PageInfo.HasNextPage = before != nil + c.PageInfo.HasPreviousPage = after != nil + if first != nil && *first+1 == len(nodes) { + c.PageInfo.HasNextPage = true + nodes = nodes[:len(nodes)-1] + } else if last != nil && *last+1 == len(nodes) { + c.PageInfo.HasPreviousPage = true + nodes = nodes[:len(nodes)-1] + } + var nodeAt func(int) *Plan + if last != nil { + n := len(nodes) - 1 + nodeAt = func(i int) *Plan { + return nodes[n-i] + } + } else { + nodeAt = func(i int) *Plan { + return nodes[i] + } + } + c.Edges = make([]*PlanEdge, len(nodes)) + for i := range nodes { + node := nodeAt(i) + c.Edges[i] = &PlanEdge{ + Node: node, + Cursor: pager.toCursor(node), + } + } + if l := len(c.Edges); l > 0 { + c.PageInfo.StartCursor = &c.Edges[0].Cursor + c.PageInfo.EndCursor = &c.Edges[l-1].Cursor + } + if c.TotalCount == 0 { + c.TotalCount = len(nodes) + } +} + // PlanPaginateOption enables pagination customization. type PlanPaginateOption func(*planPager) error @@ -5536,12 +5874,13 @@ func WithPlanFilter(filter func(*PlanQuery) (*PlanQuery, error)) PlanPaginateOpt } type planPager struct { - order *PlanOrder - filter func(*PlanQuery) (*PlanQuery, error) + reverse bool + order *PlanOrder + filter func(*PlanQuery) (*PlanQuery, error) } -func newPlanPager(opts []PlanPaginateOption) (*planPager, error) { - pager := &planPager{} +func newPlanPager(opts []PlanPaginateOption, reverse bool) (*planPager, error) { + pager := &planPager{reverse: reverse} for _, opt := range opts { if err := opt(pager); err != nil { return nil, err @@ -5564,28 +5903,48 @@ func (p *planPager) toCursor(pl *Plan) Cursor { return p.order.Field.toCursor(pl) } -func (p *planPager) applyCursors(query *PlanQuery, after, before *Cursor) *PlanQuery { - for _, predicate := range cursorsToPredicates( - p.order.Direction, after, before, - p.order.Field.field, DefaultPlanOrder.Field.field, - ) { +func (p *planPager) applyCursors(query *PlanQuery, after, before *Cursor) (*PlanQuery, error) { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + for _, predicate := range entgql.CursorsPredicate(after, before, DefaultPlanOrder.Field.column, p.order.Field.column, direction) { query = query.Where(predicate) } - return query + return query, nil } -func (p *planPager) applyOrder(query *PlanQuery, reverse bool) *PlanQuery { +func (p *planPager) applyOrder(query *PlanQuery) *PlanQuery { direction := p.order.Direction - if reverse { - direction = direction.reverse() + if p.reverse { + direction = direction.Reverse() } - query = query.Order(direction.orderFunc(p.order.Field.field)) + query = query.Order(p.order.Field.toTerm(direction.OrderTermOption())) if p.order.Field != DefaultPlanOrder.Field { - query = query.Order(direction.orderFunc(DefaultPlanOrder.Field.field)) + query = query.Order(DefaultPlanOrder.Field.toTerm(direction.OrderTermOption())) + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) } return query } +func (p *planPager) orderExpr(query *PlanQuery) sql.Querier { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) + } + return sql.ExprFunc(func(b *sql.Builder) { + b.Ident(p.order.Field.column).Pad().WriteString(string(direction)) + if p.order.Field != DefaultPlanOrder.Field { + b.Comma().Ident(DefaultPlanOrder.Field.column).Pad().WriteString(string(direction)) + } + }) +} + // Paginate executes the query and returns a relay based cursor connection to Plan. func (pl *PlanQuery) Paginate( ctx context.Context, after *Cursor, first *int, @@ -5594,100 +5953,56 @@ func (pl *PlanQuery) Paginate( if err := validateFirstLast(first, last); err != nil { return nil, err } - pager, err := newPlanPager(opts) + pager, err := newPlanPager(opts, last != nil) if err != nil { return nil, err } - if pl, err = pager.applyFilter(pl); err != nil { return nil, err } - conn := &PlanConnection{Edges: []*PlanEdge{}} - if !hasCollectedField(ctx, edgesField) || first != nil && *first == 0 || last != nil && *last == 0 { - if hasCollectedField(ctx, totalCountField) || - hasCollectedField(ctx, pageInfoField) { - count, err := pl.Count(ctx) - if err != nil { + ignoredEdges := !hasCollectedField(ctx, edgesField) + if hasCollectedField(ctx, totalCountField) || hasCollectedField(ctx, pageInfoField) { + hasPagination := after != nil || first != nil || before != nil || last != nil + if hasPagination || ignoredEdges { + if conn.TotalCount, err = pl.Clone().Count(ctx); err != nil { return nil, err } - conn.TotalCount = count - conn.PageInfo.HasNextPage = first != nil && count > 0 - conn.PageInfo.HasPreviousPage = last != nil && count > 0 + conn.PageInfo.HasNextPage = first != nil && conn.TotalCount > 0 + conn.PageInfo.HasPreviousPage = last != nil && conn.TotalCount > 0 } - return conn, nil } - - if (after != nil || first != nil || before != nil || last != nil) && hasCollectedField(ctx, totalCountField) { - count, err := pl.Clone().Count(ctx) - if err != nil { - return nil, err - } - conn.TotalCount = count + if ignoredEdges || (first != nil && *first == 0) || (last != nil && *last == 0) { + return conn, nil } - - pl = pager.applyCursors(pl, after, before) - pl = pager.applyOrder(pl, last != nil) - var limit int - if first != nil { - limit = *first + 1 - } else if last != nil { - limit = *last + 1 + if pl, err = pager.applyCursors(pl, after, before); err != nil { + return nil, err } - if limit > 0 { - pl = pl.Limit(limit) + if limit := paginateLimit(first, last); limit != 0 { + pl.Limit(limit) } - - if field := getCollectedField(ctx, edgesField, nodeField); field != nil { - pl = pl.collectField(graphql.GetOperationContext(ctx), *field) + if field := collectedField(ctx, edgesField, nodeField); field != nil { + if err := pl.collectField(ctx, graphql.GetOperationContext(ctx), *field, []string{edgesField, nodeField}); err != nil { + return nil, err + } } - + pl = pager.applyOrder(pl) nodes, err := pl.All(ctx) - if err != nil || len(nodes) == 0 { - return conn, err + if err != nil { + return nil, err } + conn.build(nodes, pager, after, first, before, last) + return conn, nil +} - if len(nodes) == limit { - conn.PageInfo.HasNextPage = first != nil - conn.PageInfo.HasPreviousPage = last != nil - nodes = nodes[:len(nodes)-1] - } - - var nodeAt func(int) *Plan - if last != nil { - n := len(nodes) - 1 - nodeAt = func(i int) *Plan { - return nodes[n-i] - } - } else { - nodeAt = func(i int) *Plan { - return nodes[i] - } - } - - conn.Edges = make([]*PlanEdge, len(nodes)) - for i := range nodes { - node := nodeAt(i) - conn.Edges[i] = &PlanEdge{ - Node: node, - Cursor: pager.toCursor(node), - } - } - - conn.PageInfo.StartCursor = &conn.Edges[0].Cursor - conn.PageInfo.EndCursor = &conn.Edges[len(conn.Edges)-1].Cursor - if conn.TotalCount == 0 { - conn.TotalCount = len(nodes) - } - - return conn, nil -} - -// PlanOrderField defines the ordering field of Plan. -type PlanOrderField struct { - field string - toCursor func(*Plan) Cursor -} +// PlanOrderField defines the ordering field of Plan. +type PlanOrderField struct { + // Value extracts the ordering value from the given Plan. + Value func(*Plan) (ent.Value, error) + column string // field or computed. + toTerm func(...sql.OrderTermOption) plan.OrderOption + toCursor func(*Plan) Cursor +} // PlanOrder defines the ordering of Plan. type PlanOrder struct { @@ -5697,9 +6012,13 @@ type PlanOrder struct { // DefaultPlanOrder is the default ordering of Plan. var DefaultPlanOrder = &PlanOrder{ - Direction: OrderDirectionAsc, + Direction: entgql.OrderDirectionAsc, Field: &PlanOrderField{ - field: plan.FieldID, + Value: func(pl *Plan) (ent.Value, error) { + return pl.ID, nil + }, + column: plan.FieldID, + toTerm: plan.ByID, toCursor: func(pl *Plan) Cursor { return Cursor{ID: pl.ID} }, @@ -5730,8 +6049,46 @@ type PlanDiffConnection struct { TotalCount int `json:"totalCount"` } +func (c *PlanDiffConnection) build(nodes []*PlanDiff, pager *plandiffPager, after *Cursor, first *int, before *Cursor, last *int) { + c.PageInfo.HasNextPage = before != nil + c.PageInfo.HasPreviousPage = after != nil + if first != nil && *first+1 == len(nodes) { + c.PageInfo.HasNextPage = true + nodes = nodes[:len(nodes)-1] + } else if last != nil && *last+1 == len(nodes) { + c.PageInfo.HasPreviousPage = true + nodes = nodes[:len(nodes)-1] + } + var nodeAt func(int) *PlanDiff + if last != nil { + n := len(nodes) - 1 + nodeAt = func(i int) *PlanDiff { + return nodes[n-i] + } + } else { + nodeAt = func(i int) *PlanDiff { + return nodes[i] + } + } + c.Edges = make([]*PlanDiffEdge, len(nodes)) + for i := range nodes { + node := nodeAt(i) + c.Edges[i] = &PlanDiffEdge{ + Node: node, + Cursor: pager.toCursor(node), + } + } + if l := len(c.Edges); l > 0 { + c.PageInfo.StartCursor = &c.Edges[0].Cursor + c.PageInfo.EndCursor = &c.Edges[l-1].Cursor + } + if c.TotalCount == 0 { + c.TotalCount = len(nodes) + } +} + // PlanDiffPaginateOption enables pagination customization. -type PlanDiffPaginateOption func(*planDiffPager) error +type PlanDiffPaginateOption func(*plandiffPager) error // WithPlanDiffOrder configures pagination ordering. func WithPlanDiffOrder(order *PlanDiffOrder) PlanDiffPaginateOption { @@ -5739,7 +6096,7 @@ func WithPlanDiffOrder(order *PlanDiffOrder) PlanDiffPaginateOption { order = DefaultPlanDiffOrder } o := *order - return func(pager *planDiffPager) error { + return func(pager *plandiffPager) error { if err := o.Direction.Validate(); err != nil { return err } @@ -5753,7 +6110,7 @@ func WithPlanDiffOrder(order *PlanDiffOrder) PlanDiffPaginateOption { // WithPlanDiffFilter configures pagination filter. func WithPlanDiffFilter(filter func(*PlanDiffQuery) (*PlanDiffQuery, error)) PlanDiffPaginateOption { - return func(pager *planDiffPager) error { + return func(pager *plandiffPager) error { if filter == nil { return errors.New("PlanDiffQuery filter cannot be nil") } @@ -5762,13 +6119,14 @@ func WithPlanDiffFilter(filter func(*PlanDiffQuery) (*PlanDiffQuery, error)) Pla } } -type planDiffPager struct { - order *PlanDiffOrder - filter func(*PlanDiffQuery) (*PlanDiffQuery, error) +type plandiffPager struct { + reverse bool + order *PlanDiffOrder + filter func(*PlanDiffQuery) (*PlanDiffQuery, error) } -func newPlanDiffPager(opts []PlanDiffPaginateOption) (*planDiffPager, error) { - pager := &planDiffPager{} +func newPlanDiffPager(opts []PlanDiffPaginateOption, reverse bool) (*plandiffPager, error) { + pager := &plandiffPager{reverse: reverse} for _, opt := range opts { if err := opt(pager); err != nil { return nil, err @@ -5780,39 +6138,59 @@ func newPlanDiffPager(opts []PlanDiffPaginateOption) (*planDiffPager, error) { return pager, nil } -func (p *planDiffPager) applyFilter(query *PlanDiffQuery) (*PlanDiffQuery, error) { +func (p *plandiffPager) applyFilter(query *PlanDiffQuery) (*PlanDiffQuery, error) { if p.filter != nil { return p.filter(query) } return query, nil } -func (p *planDiffPager) toCursor(pd *PlanDiff) Cursor { +func (p *plandiffPager) toCursor(pd *PlanDiff) Cursor { return p.order.Field.toCursor(pd) } -func (p *planDiffPager) applyCursors(query *PlanDiffQuery, after, before *Cursor) *PlanDiffQuery { - for _, predicate := range cursorsToPredicates( - p.order.Direction, after, before, - p.order.Field.field, DefaultPlanDiffOrder.Field.field, - ) { +func (p *plandiffPager) applyCursors(query *PlanDiffQuery, after, before *Cursor) (*PlanDiffQuery, error) { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + for _, predicate := range entgql.CursorsPredicate(after, before, DefaultPlanDiffOrder.Field.column, p.order.Field.column, direction) { query = query.Where(predicate) } - return query + return query, nil } -func (p *planDiffPager) applyOrder(query *PlanDiffQuery, reverse bool) *PlanDiffQuery { +func (p *plandiffPager) applyOrder(query *PlanDiffQuery) *PlanDiffQuery { direction := p.order.Direction - if reverse { - direction = direction.reverse() + if p.reverse { + direction = direction.Reverse() } - query = query.Order(direction.orderFunc(p.order.Field.field)) + query = query.Order(p.order.Field.toTerm(direction.OrderTermOption())) if p.order.Field != DefaultPlanDiffOrder.Field { - query = query.Order(direction.orderFunc(DefaultPlanDiffOrder.Field.field)) + query = query.Order(DefaultPlanDiffOrder.Field.toTerm(direction.OrderTermOption())) + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) } return query } +func (p *plandiffPager) orderExpr(query *PlanDiffQuery) sql.Querier { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) + } + return sql.ExprFunc(func(b *sql.Builder) { + b.Ident(p.order.Field.column).Pad().WriteString(string(direction)) + if p.order.Field != DefaultPlanDiffOrder.Field { + b.Comma().Ident(DefaultPlanDiffOrder.Field.column).Pad().WriteString(string(direction)) + } + }) +} + // Paginate executes the query and returns a relay based cursor connection to PlanDiff. func (pd *PlanDiffQuery) Paginate( ctx context.Context, after *Cursor, first *int, @@ -5821,98 +6199,54 @@ func (pd *PlanDiffQuery) Paginate( if err := validateFirstLast(first, last); err != nil { return nil, err } - pager, err := newPlanDiffPager(opts) + pager, err := newPlanDiffPager(opts, last != nil) if err != nil { return nil, err } - if pd, err = pager.applyFilter(pd); err != nil { return nil, err } - conn := &PlanDiffConnection{Edges: []*PlanDiffEdge{}} - if !hasCollectedField(ctx, edgesField) || first != nil && *first == 0 || last != nil && *last == 0 { - if hasCollectedField(ctx, totalCountField) || - hasCollectedField(ctx, pageInfoField) { - count, err := pd.Count(ctx) - if err != nil { + ignoredEdges := !hasCollectedField(ctx, edgesField) + if hasCollectedField(ctx, totalCountField) || hasCollectedField(ctx, pageInfoField) { + hasPagination := after != nil || first != nil || before != nil || last != nil + if hasPagination || ignoredEdges { + if conn.TotalCount, err = pd.Clone().Count(ctx); err != nil { return nil, err } - conn.TotalCount = count - conn.PageInfo.HasNextPage = first != nil && count > 0 - conn.PageInfo.HasPreviousPage = last != nil && count > 0 - } - return conn, nil - } - - if (after != nil || first != nil || before != nil || last != nil) && hasCollectedField(ctx, totalCountField) { - count, err := pd.Clone().Count(ctx) - if err != nil { - return nil, err + conn.PageInfo.HasNextPage = first != nil && conn.TotalCount > 0 + conn.PageInfo.HasPreviousPage = last != nil && conn.TotalCount > 0 } - conn.TotalCount = count - } - - pd = pager.applyCursors(pd, after, before) - pd = pager.applyOrder(pd, last != nil) - var limit int - if first != nil { - limit = *first + 1 - } else if last != nil { - limit = *last + 1 } - if limit > 0 { - pd = pd.Limit(limit) - } - - if field := getCollectedField(ctx, edgesField, nodeField); field != nil { - pd = pd.collectField(graphql.GetOperationContext(ctx), *field) + if ignoredEdges || (first != nil && *first == 0) || (last != nil && *last == 0) { + return conn, nil } - - nodes, err := pd.All(ctx) - if err != nil || len(nodes) == 0 { - return conn, err + if pd, err = pager.applyCursors(pd, after, before); err != nil { + return nil, err } - - if len(nodes) == limit { - conn.PageInfo.HasNextPage = first != nil - conn.PageInfo.HasPreviousPage = last != nil - nodes = nodes[:len(nodes)-1] + if limit := paginateLimit(first, last); limit != 0 { + pd.Limit(limit) } - - var nodeAt func(int) *PlanDiff - if last != nil { - n := len(nodes) - 1 - nodeAt = func(i int) *PlanDiff { - return nodes[n-i] - } - } else { - nodeAt = func(i int) *PlanDiff { - return nodes[i] - } - } - - conn.Edges = make([]*PlanDiffEdge, len(nodes)) - for i := range nodes { - node := nodeAt(i) - conn.Edges[i] = &PlanDiffEdge{ - Node: node, - Cursor: pager.toCursor(node), + if field := collectedField(ctx, edgesField, nodeField); field != nil { + if err := pd.collectField(ctx, graphql.GetOperationContext(ctx), *field, []string{edgesField, nodeField}); err != nil { + return nil, err } } - - conn.PageInfo.StartCursor = &conn.Edges[0].Cursor - conn.PageInfo.EndCursor = &conn.Edges[len(conn.Edges)-1].Cursor - if conn.TotalCount == 0 { - conn.TotalCount = len(nodes) + pd = pager.applyOrder(pd) + nodes, err := pd.All(ctx) + if err != nil { + return nil, err } - + conn.build(nodes, pager, after, first, before, last) return conn, nil } // PlanDiffOrderField defines the ordering field of PlanDiff. type PlanDiffOrderField struct { - field string + // Value extracts the ordering value from the given PlanDiff. + Value func(*PlanDiff) (ent.Value, error) + column string // field or computed. + toTerm func(...sql.OrderTermOption) plandiff.OrderOption toCursor func(*PlanDiff) Cursor } @@ -5924,9 +6258,13 @@ type PlanDiffOrder struct { // DefaultPlanDiffOrder is the default ordering of PlanDiff. var DefaultPlanDiffOrder = &PlanDiffOrder{ - Direction: OrderDirectionAsc, + Direction: entgql.OrderDirectionAsc, Field: &PlanDiffOrderField{ - field: plandiff.FieldID, + Value: func(pd *PlanDiff) (ent.Value, error) { + return pd.ID, nil + }, + column: plandiff.FieldID, + toTerm: plandiff.ByID, toCursor: func(pd *PlanDiff) Cursor { return Cursor{ID: pd.ID} }, @@ -5957,8 +6295,46 @@ type ProvisionedHostConnection struct { TotalCount int `json:"totalCount"` } +func (c *ProvisionedHostConnection) build(nodes []*ProvisionedHost, pager *provisionedhostPager, after *Cursor, first *int, before *Cursor, last *int) { + c.PageInfo.HasNextPage = before != nil + c.PageInfo.HasPreviousPage = after != nil + if first != nil && *first+1 == len(nodes) { + c.PageInfo.HasNextPage = true + nodes = nodes[:len(nodes)-1] + } else if last != nil && *last+1 == len(nodes) { + c.PageInfo.HasPreviousPage = true + nodes = nodes[:len(nodes)-1] + } + var nodeAt func(int) *ProvisionedHost + if last != nil { + n := len(nodes) - 1 + nodeAt = func(i int) *ProvisionedHost { + return nodes[n-i] + } + } else { + nodeAt = func(i int) *ProvisionedHost { + return nodes[i] + } + } + c.Edges = make([]*ProvisionedHostEdge, len(nodes)) + for i := range nodes { + node := nodeAt(i) + c.Edges[i] = &ProvisionedHostEdge{ + Node: node, + Cursor: pager.toCursor(node), + } + } + if l := len(c.Edges); l > 0 { + c.PageInfo.StartCursor = &c.Edges[0].Cursor + c.PageInfo.EndCursor = &c.Edges[l-1].Cursor + } + if c.TotalCount == 0 { + c.TotalCount = len(nodes) + } +} + // ProvisionedHostPaginateOption enables pagination customization. -type ProvisionedHostPaginateOption func(*provisionedHostPager) error +type ProvisionedHostPaginateOption func(*provisionedhostPager) error // WithProvisionedHostOrder configures pagination ordering. func WithProvisionedHostOrder(order *ProvisionedHostOrder) ProvisionedHostPaginateOption { @@ -5966,7 +6342,7 @@ func WithProvisionedHostOrder(order *ProvisionedHostOrder) ProvisionedHostPagina order = DefaultProvisionedHostOrder } o := *order - return func(pager *provisionedHostPager) error { + return func(pager *provisionedhostPager) error { if err := o.Direction.Validate(); err != nil { return err } @@ -5980,7 +6356,7 @@ func WithProvisionedHostOrder(order *ProvisionedHostOrder) ProvisionedHostPagina // WithProvisionedHostFilter configures pagination filter. func WithProvisionedHostFilter(filter func(*ProvisionedHostQuery) (*ProvisionedHostQuery, error)) ProvisionedHostPaginateOption { - return func(pager *provisionedHostPager) error { + return func(pager *provisionedhostPager) error { if filter == nil { return errors.New("ProvisionedHostQuery filter cannot be nil") } @@ -5989,13 +6365,14 @@ func WithProvisionedHostFilter(filter func(*ProvisionedHostQuery) (*ProvisionedH } } -type provisionedHostPager struct { - order *ProvisionedHostOrder - filter func(*ProvisionedHostQuery) (*ProvisionedHostQuery, error) +type provisionedhostPager struct { + reverse bool + order *ProvisionedHostOrder + filter func(*ProvisionedHostQuery) (*ProvisionedHostQuery, error) } -func newProvisionedHostPager(opts []ProvisionedHostPaginateOption) (*provisionedHostPager, error) { - pager := &provisionedHostPager{} +func newProvisionedHostPager(opts []ProvisionedHostPaginateOption, reverse bool) (*provisionedhostPager, error) { + pager := &provisionedhostPager{reverse: reverse} for _, opt := range opts { if err := opt(pager); err != nil { return nil, err @@ -6007,39 +6384,59 @@ func newProvisionedHostPager(opts []ProvisionedHostPaginateOption) (*provisioned return pager, nil } -func (p *provisionedHostPager) applyFilter(query *ProvisionedHostQuery) (*ProvisionedHostQuery, error) { +func (p *provisionedhostPager) applyFilter(query *ProvisionedHostQuery) (*ProvisionedHostQuery, error) { if p.filter != nil { return p.filter(query) } return query, nil } -func (p *provisionedHostPager) toCursor(ph *ProvisionedHost) Cursor { +func (p *provisionedhostPager) toCursor(ph *ProvisionedHost) Cursor { return p.order.Field.toCursor(ph) } -func (p *provisionedHostPager) applyCursors(query *ProvisionedHostQuery, after, before *Cursor) *ProvisionedHostQuery { - for _, predicate := range cursorsToPredicates( - p.order.Direction, after, before, - p.order.Field.field, DefaultProvisionedHostOrder.Field.field, - ) { +func (p *provisionedhostPager) applyCursors(query *ProvisionedHostQuery, after, before *Cursor) (*ProvisionedHostQuery, error) { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + for _, predicate := range entgql.CursorsPredicate(after, before, DefaultProvisionedHostOrder.Field.column, p.order.Field.column, direction) { query = query.Where(predicate) } - return query + return query, nil } -func (p *provisionedHostPager) applyOrder(query *ProvisionedHostQuery, reverse bool) *ProvisionedHostQuery { +func (p *provisionedhostPager) applyOrder(query *ProvisionedHostQuery) *ProvisionedHostQuery { direction := p.order.Direction - if reverse { - direction = direction.reverse() + if p.reverse { + direction = direction.Reverse() } - query = query.Order(direction.orderFunc(p.order.Field.field)) + query = query.Order(p.order.Field.toTerm(direction.OrderTermOption())) if p.order.Field != DefaultProvisionedHostOrder.Field { - query = query.Order(direction.orderFunc(DefaultProvisionedHostOrder.Field.field)) + query = query.Order(DefaultProvisionedHostOrder.Field.toTerm(direction.OrderTermOption())) + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) } return query } +func (p *provisionedhostPager) orderExpr(query *ProvisionedHostQuery) sql.Querier { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) + } + return sql.ExprFunc(func(b *sql.Builder) { + b.Ident(p.order.Field.column).Pad().WriteString(string(direction)) + if p.order.Field != DefaultProvisionedHostOrder.Field { + b.Comma().Ident(DefaultProvisionedHostOrder.Field.column).Pad().WriteString(string(direction)) + } + }) +} + // Paginate executes the query and returns a relay based cursor connection to ProvisionedHost. func (ph *ProvisionedHostQuery) Paginate( ctx context.Context, after *Cursor, first *int, @@ -6048,98 +6445,54 @@ func (ph *ProvisionedHostQuery) Paginate( if err := validateFirstLast(first, last); err != nil { return nil, err } - pager, err := newProvisionedHostPager(opts) + pager, err := newProvisionedHostPager(opts, last != nil) if err != nil { return nil, err } - if ph, err = pager.applyFilter(ph); err != nil { return nil, err } - conn := &ProvisionedHostConnection{Edges: []*ProvisionedHostEdge{}} - if !hasCollectedField(ctx, edgesField) || first != nil && *first == 0 || last != nil && *last == 0 { - if hasCollectedField(ctx, totalCountField) || - hasCollectedField(ctx, pageInfoField) { - count, err := ph.Count(ctx) - if err != nil { + ignoredEdges := !hasCollectedField(ctx, edgesField) + if hasCollectedField(ctx, totalCountField) || hasCollectedField(ctx, pageInfoField) { + hasPagination := after != nil || first != nil || before != nil || last != nil + if hasPagination || ignoredEdges { + if conn.TotalCount, err = ph.Clone().Count(ctx); err != nil { return nil, err } - conn.TotalCount = count - conn.PageInfo.HasNextPage = first != nil && count > 0 - conn.PageInfo.HasPreviousPage = last != nil && count > 0 - } - return conn, nil - } - - if (after != nil || first != nil || before != nil || last != nil) && hasCollectedField(ctx, totalCountField) { - count, err := ph.Clone().Count(ctx) - if err != nil { - return nil, err + conn.PageInfo.HasNextPage = first != nil && conn.TotalCount > 0 + conn.PageInfo.HasPreviousPage = last != nil && conn.TotalCount > 0 } - conn.TotalCount = count } - - ph = pager.applyCursors(ph, after, before) - ph = pager.applyOrder(ph, last != nil) - var limit int - if first != nil { - limit = *first + 1 - } else if last != nil { - limit = *last + 1 - } - if limit > 0 { - ph = ph.Limit(limit) - } - - if field := getCollectedField(ctx, edgesField, nodeField); field != nil { - ph = ph.collectField(graphql.GetOperationContext(ctx), *field) + if ignoredEdges || (first != nil && *first == 0) || (last != nil && *last == 0) { + return conn, nil } - - nodes, err := ph.All(ctx) - if err != nil || len(nodes) == 0 { - return conn, err + if ph, err = pager.applyCursors(ph, after, before); err != nil { + return nil, err } - - if len(nodes) == limit { - conn.PageInfo.HasNextPage = first != nil - conn.PageInfo.HasPreviousPage = last != nil - nodes = nodes[:len(nodes)-1] + if limit := paginateLimit(first, last); limit != 0 { + ph.Limit(limit) } - - var nodeAt func(int) *ProvisionedHost - if last != nil { - n := len(nodes) - 1 - nodeAt = func(i int) *ProvisionedHost { - return nodes[n-i] - } - } else { - nodeAt = func(i int) *ProvisionedHost { - return nodes[i] - } - } - - conn.Edges = make([]*ProvisionedHostEdge, len(nodes)) - for i := range nodes { - node := nodeAt(i) - conn.Edges[i] = &ProvisionedHostEdge{ - Node: node, - Cursor: pager.toCursor(node), + if field := collectedField(ctx, edgesField, nodeField); field != nil { + if err := ph.collectField(ctx, graphql.GetOperationContext(ctx), *field, []string{edgesField, nodeField}); err != nil { + return nil, err } } - - conn.PageInfo.StartCursor = &conn.Edges[0].Cursor - conn.PageInfo.EndCursor = &conn.Edges[len(conn.Edges)-1].Cursor - if conn.TotalCount == 0 { - conn.TotalCount = len(nodes) + ph = pager.applyOrder(ph) + nodes, err := ph.All(ctx) + if err != nil { + return nil, err } - + conn.build(nodes, pager, after, first, before, last) return conn, nil } // ProvisionedHostOrderField defines the ordering field of ProvisionedHost. type ProvisionedHostOrderField struct { - field string + // Value extracts the ordering value from the given ProvisionedHost. + Value func(*ProvisionedHost) (ent.Value, error) + column string // field or computed. + toTerm func(...sql.OrderTermOption) provisionedhost.OrderOption toCursor func(*ProvisionedHost) Cursor } @@ -6151,9 +6504,13 @@ type ProvisionedHostOrder struct { // DefaultProvisionedHostOrder is the default ordering of ProvisionedHost. var DefaultProvisionedHostOrder = &ProvisionedHostOrder{ - Direction: OrderDirectionAsc, + Direction: entgql.OrderDirectionAsc, Field: &ProvisionedHostOrderField{ - field: provisionedhost.FieldID, + Value: func(ph *ProvisionedHost) (ent.Value, error) { + return ph.ID, nil + }, + column: provisionedhost.FieldID, + toTerm: provisionedhost.ByID, toCursor: func(ph *ProvisionedHost) Cursor { return Cursor{ID: ph.ID} }, @@ -6184,8 +6541,46 @@ type ProvisionedNetworkConnection struct { TotalCount int `json:"totalCount"` } +func (c *ProvisionedNetworkConnection) build(nodes []*ProvisionedNetwork, pager *provisionednetworkPager, after *Cursor, first *int, before *Cursor, last *int) { + c.PageInfo.HasNextPage = before != nil + c.PageInfo.HasPreviousPage = after != nil + if first != nil && *first+1 == len(nodes) { + c.PageInfo.HasNextPage = true + nodes = nodes[:len(nodes)-1] + } else if last != nil && *last+1 == len(nodes) { + c.PageInfo.HasPreviousPage = true + nodes = nodes[:len(nodes)-1] + } + var nodeAt func(int) *ProvisionedNetwork + if last != nil { + n := len(nodes) - 1 + nodeAt = func(i int) *ProvisionedNetwork { + return nodes[n-i] + } + } else { + nodeAt = func(i int) *ProvisionedNetwork { + return nodes[i] + } + } + c.Edges = make([]*ProvisionedNetworkEdge, len(nodes)) + for i := range nodes { + node := nodeAt(i) + c.Edges[i] = &ProvisionedNetworkEdge{ + Node: node, + Cursor: pager.toCursor(node), + } + } + if l := len(c.Edges); l > 0 { + c.PageInfo.StartCursor = &c.Edges[0].Cursor + c.PageInfo.EndCursor = &c.Edges[l-1].Cursor + } + if c.TotalCount == 0 { + c.TotalCount = len(nodes) + } +} + // ProvisionedNetworkPaginateOption enables pagination customization. -type ProvisionedNetworkPaginateOption func(*provisionedNetworkPager) error +type ProvisionedNetworkPaginateOption func(*provisionednetworkPager) error // WithProvisionedNetworkOrder configures pagination ordering. func WithProvisionedNetworkOrder(order *ProvisionedNetworkOrder) ProvisionedNetworkPaginateOption { @@ -6193,7 +6588,7 @@ func WithProvisionedNetworkOrder(order *ProvisionedNetworkOrder) ProvisionedNetw order = DefaultProvisionedNetworkOrder } o := *order - return func(pager *provisionedNetworkPager) error { + return func(pager *provisionednetworkPager) error { if err := o.Direction.Validate(); err != nil { return err } @@ -6207,7 +6602,7 @@ func WithProvisionedNetworkOrder(order *ProvisionedNetworkOrder) ProvisionedNetw // WithProvisionedNetworkFilter configures pagination filter. func WithProvisionedNetworkFilter(filter func(*ProvisionedNetworkQuery) (*ProvisionedNetworkQuery, error)) ProvisionedNetworkPaginateOption { - return func(pager *provisionedNetworkPager) error { + return func(pager *provisionednetworkPager) error { if filter == nil { return errors.New("ProvisionedNetworkQuery filter cannot be nil") } @@ -6216,13 +6611,14 @@ func WithProvisionedNetworkFilter(filter func(*ProvisionedNetworkQuery) (*Provis } } -type provisionedNetworkPager struct { - order *ProvisionedNetworkOrder - filter func(*ProvisionedNetworkQuery) (*ProvisionedNetworkQuery, error) +type provisionednetworkPager struct { + reverse bool + order *ProvisionedNetworkOrder + filter func(*ProvisionedNetworkQuery) (*ProvisionedNetworkQuery, error) } -func newProvisionedNetworkPager(opts []ProvisionedNetworkPaginateOption) (*provisionedNetworkPager, error) { - pager := &provisionedNetworkPager{} +func newProvisionedNetworkPager(opts []ProvisionedNetworkPaginateOption, reverse bool) (*provisionednetworkPager, error) { + pager := &provisionednetworkPager{reverse: reverse} for _, opt := range opts { if err := opt(pager); err != nil { return nil, err @@ -6234,39 +6630,59 @@ func newProvisionedNetworkPager(opts []ProvisionedNetworkPaginateOption) (*provi return pager, nil } -func (p *provisionedNetworkPager) applyFilter(query *ProvisionedNetworkQuery) (*ProvisionedNetworkQuery, error) { +func (p *provisionednetworkPager) applyFilter(query *ProvisionedNetworkQuery) (*ProvisionedNetworkQuery, error) { if p.filter != nil { return p.filter(query) } return query, nil } -func (p *provisionedNetworkPager) toCursor(pn *ProvisionedNetwork) Cursor { +func (p *provisionednetworkPager) toCursor(pn *ProvisionedNetwork) Cursor { return p.order.Field.toCursor(pn) } -func (p *provisionedNetworkPager) applyCursors(query *ProvisionedNetworkQuery, after, before *Cursor) *ProvisionedNetworkQuery { - for _, predicate := range cursorsToPredicates( - p.order.Direction, after, before, - p.order.Field.field, DefaultProvisionedNetworkOrder.Field.field, - ) { +func (p *provisionednetworkPager) applyCursors(query *ProvisionedNetworkQuery, after, before *Cursor) (*ProvisionedNetworkQuery, error) { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + for _, predicate := range entgql.CursorsPredicate(after, before, DefaultProvisionedNetworkOrder.Field.column, p.order.Field.column, direction) { query = query.Where(predicate) } - return query + return query, nil } -func (p *provisionedNetworkPager) applyOrder(query *ProvisionedNetworkQuery, reverse bool) *ProvisionedNetworkQuery { +func (p *provisionednetworkPager) applyOrder(query *ProvisionedNetworkQuery) *ProvisionedNetworkQuery { direction := p.order.Direction - if reverse { - direction = direction.reverse() + if p.reverse { + direction = direction.Reverse() } - query = query.Order(direction.orderFunc(p.order.Field.field)) + query = query.Order(p.order.Field.toTerm(direction.OrderTermOption())) if p.order.Field != DefaultProvisionedNetworkOrder.Field { - query = query.Order(direction.orderFunc(DefaultProvisionedNetworkOrder.Field.field)) + query = query.Order(DefaultProvisionedNetworkOrder.Field.toTerm(direction.OrderTermOption())) + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) } return query } +func (p *provisionednetworkPager) orderExpr(query *ProvisionedNetworkQuery) sql.Querier { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) + } + return sql.ExprFunc(func(b *sql.Builder) { + b.Ident(p.order.Field.column).Pad().WriteString(string(direction)) + if p.order.Field != DefaultProvisionedNetworkOrder.Field { + b.Comma().Ident(DefaultProvisionedNetworkOrder.Field.column).Pad().WriteString(string(direction)) + } + }) +} + // Paginate executes the query and returns a relay based cursor connection to ProvisionedNetwork. func (pn *ProvisionedNetworkQuery) Paginate( ctx context.Context, after *Cursor, first *int, @@ -6275,98 +6691,54 @@ func (pn *ProvisionedNetworkQuery) Paginate( if err := validateFirstLast(first, last); err != nil { return nil, err } - pager, err := newProvisionedNetworkPager(opts) + pager, err := newProvisionedNetworkPager(opts, last != nil) if err != nil { return nil, err } - if pn, err = pager.applyFilter(pn); err != nil { return nil, err } - conn := &ProvisionedNetworkConnection{Edges: []*ProvisionedNetworkEdge{}} - if !hasCollectedField(ctx, edgesField) || first != nil && *first == 0 || last != nil && *last == 0 { - if hasCollectedField(ctx, totalCountField) || - hasCollectedField(ctx, pageInfoField) { - count, err := pn.Count(ctx) - if err != nil { + ignoredEdges := !hasCollectedField(ctx, edgesField) + if hasCollectedField(ctx, totalCountField) || hasCollectedField(ctx, pageInfoField) { + hasPagination := after != nil || first != nil || before != nil || last != nil + if hasPagination || ignoredEdges { + if conn.TotalCount, err = pn.Clone().Count(ctx); err != nil { return nil, err } - conn.TotalCount = count - conn.PageInfo.HasNextPage = first != nil && count > 0 - conn.PageInfo.HasPreviousPage = last != nil && count > 0 + conn.PageInfo.HasNextPage = first != nil && conn.TotalCount > 0 + conn.PageInfo.HasPreviousPage = last != nil && conn.TotalCount > 0 } - return conn, nil } - - if (after != nil || first != nil || before != nil || last != nil) && hasCollectedField(ctx, totalCountField) { - count, err := pn.Clone().Count(ctx) - if err != nil { - return nil, err - } - conn.TotalCount = count - } - - pn = pager.applyCursors(pn, after, before) - pn = pager.applyOrder(pn, last != nil) - var limit int - if first != nil { - limit = *first + 1 - } else if last != nil { - limit = *last + 1 - } - if limit > 0 { - pn = pn.Limit(limit) - } - - if field := getCollectedField(ctx, edgesField, nodeField); field != nil { - pn = pn.collectField(graphql.GetOperationContext(ctx), *field) - } - - nodes, err := pn.All(ctx) - if err != nil || len(nodes) == 0 { - return conn, err + if ignoredEdges || (first != nil && *first == 0) || (last != nil && *last == 0) { + return conn, nil } - - if len(nodes) == limit { - conn.PageInfo.HasNextPage = first != nil - conn.PageInfo.HasPreviousPage = last != nil - nodes = nodes[:len(nodes)-1] + if pn, err = pager.applyCursors(pn, after, before); err != nil { + return nil, err } - - var nodeAt func(int) *ProvisionedNetwork - if last != nil { - n := len(nodes) - 1 - nodeAt = func(i int) *ProvisionedNetwork { - return nodes[n-i] - } - } else { - nodeAt = func(i int) *ProvisionedNetwork { - return nodes[i] - } + if limit := paginateLimit(first, last); limit != 0 { + pn.Limit(limit) } - - conn.Edges = make([]*ProvisionedNetworkEdge, len(nodes)) - for i := range nodes { - node := nodeAt(i) - conn.Edges[i] = &ProvisionedNetworkEdge{ - Node: node, - Cursor: pager.toCursor(node), + if field := collectedField(ctx, edgesField, nodeField); field != nil { + if err := pn.collectField(ctx, graphql.GetOperationContext(ctx), *field, []string{edgesField, nodeField}); err != nil { + return nil, err } } - - conn.PageInfo.StartCursor = &conn.Edges[0].Cursor - conn.PageInfo.EndCursor = &conn.Edges[len(conn.Edges)-1].Cursor - if conn.TotalCount == 0 { - conn.TotalCount = len(nodes) + pn = pager.applyOrder(pn) + nodes, err := pn.All(ctx) + if err != nil { + return nil, err } - + conn.build(nodes, pager, after, first, before, last) return conn, nil } // ProvisionedNetworkOrderField defines the ordering field of ProvisionedNetwork. type ProvisionedNetworkOrderField struct { - field string + // Value extracts the ordering value from the given ProvisionedNetwork. + Value func(*ProvisionedNetwork) (ent.Value, error) + column string // field or computed. + toTerm func(...sql.OrderTermOption) provisionednetwork.OrderOption toCursor func(*ProvisionedNetwork) Cursor } @@ -6378,9 +6750,13 @@ type ProvisionedNetworkOrder struct { // DefaultProvisionedNetworkOrder is the default ordering of ProvisionedNetwork. var DefaultProvisionedNetworkOrder = &ProvisionedNetworkOrder{ - Direction: OrderDirectionAsc, + Direction: entgql.OrderDirectionAsc, Field: &ProvisionedNetworkOrderField{ - field: provisionednetwork.FieldID, + Value: func(pn *ProvisionedNetwork) (ent.Value, error) { + return pn.ID, nil + }, + column: provisionednetwork.FieldID, + toTerm: provisionednetwork.ByID, toCursor: func(pn *ProvisionedNetwork) Cursor { return Cursor{ID: pn.ID} }, @@ -6411,8 +6787,46 @@ type ProvisioningStepConnection struct { TotalCount int `json:"totalCount"` } +func (c *ProvisioningStepConnection) build(nodes []*ProvisioningStep, pager *provisioningstepPager, after *Cursor, first *int, before *Cursor, last *int) { + c.PageInfo.HasNextPage = before != nil + c.PageInfo.HasPreviousPage = after != nil + if first != nil && *first+1 == len(nodes) { + c.PageInfo.HasNextPage = true + nodes = nodes[:len(nodes)-1] + } else if last != nil && *last+1 == len(nodes) { + c.PageInfo.HasPreviousPage = true + nodes = nodes[:len(nodes)-1] + } + var nodeAt func(int) *ProvisioningStep + if last != nil { + n := len(nodes) - 1 + nodeAt = func(i int) *ProvisioningStep { + return nodes[n-i] + } + } else { + nodeAt = func(i int) *ProvisioningStep { + return nodes[i] + } + } + c.Edges = make([]*ProvisioningStepEdge, len(nodes)) + for i := range nodes { + node := nodeAt(i) + c.Edges[i] = &ProvisioningStepEdge{ + Node: node, + Cursor: pager.toCursor(node), + } + } + if l := len(c.Edges); l > 0 { + c.PageInfo.StartCursor = &c.Edges[0].Cursor + c.PageInfo.EndCursor = &c.Edges[l-1].Cursor + } + if c.TotalCount == 0 { + c.TotalCount = len(nodes) + } +} + // ProvisioningStepPaginateOption enables pagination customization. -type ProvisioningStepPaginateOption func(*provisioningStepPager) error +type ProvisioningStepPaginateOption func(*provisioningstepPager) error // WithProvisioningStepOrder configures pagination ordering. func WithProvisioningStepOrder(order *ProvisioningStepOrder) ProvisioningStepPaginateOption { @@ -6420,7 +6834,7 @@ func WithProvisioningStepOrder(order *ProvisioningStepOrder) ProvisioningStepPag order = DefaultProvisioningStepOrder } o := *order - return func(pager *provisioningStepPager) error { + return func(pager *provisioningstepPager) error { if err := o.Direction.Validate(); err != nil { return err } @@ -6434,7 +6848,7 @@ func WithProvisioningStepOrder(order *ProvisioningStepOrder) ProvisioningStepPag // WithProvisioningStepFilter configures pagination filter. func WithProvisioningStepFilter(filter func(*ProvisioningStepQuery) (*ProvisioningStepQuery, error)) ProvisioningStepPaginateOption { - return func(pager *provisioningStepPager) error { + return func(pager *provisioningstepPager) error { if filter == nil { return errors.New("ProvisioningStepQuery filter cannot be nil") } @@ -6443,13 +6857,14 @@ func WithProvisioningStepFilter(filter func(*ProvisioningStepQuery) (*Provisioni } } -type provisioningStepPager struct { - order *ProvisioningStepOrder - filter func(*ProvisioningStepQuery) (*ProvisioningStepQuery, error) +type provisioningstepPager struct { + reverse bool + order *ProvisioningStepOrder + filter func(*ProvisioningStepQuery) (*ProvisioningStepQuery, error) } -func newProvisioningStepPager(opts []ProvisioningStepPaginateOption) (*provisioningStepPager, error) { - pager := &provisioningStepPager{} +func newProvisioningStepPager(opts []ProvisioningStepPaginateOption, reverse bool) (*provisioningstepPager, error) { + pager := &provisioningstepPager{reverse: reverse} for _, opt := range opts { if err := opt(pager); err != nil { return nil, err @@ -6461,39 +6876,59 @@ func newProvisioningStepPager(opts []ProvisioningStepPaginateOption) (*provision return pager, nil } -func (p *provisioningStepPager) applyFilter(query *ProvisioningStepQuery) (*ProvisioningStepQuery, error) { +func (p *provisioningstepPager) applyFilter(query *ProvisioningStepQuery) (*ProvisioningStepQuery, error) { if p.filter != nil { return p.filter(query) } return query, nil } -func (p *provisioningStepPager) toCursor(ps *ProvisioningStep) Cursor { +func (p *provisioningstepPager) toCursor(ps *ProvisioningStep) Cursor { return p.order.Field.toCursor(ps) } -func (p *provisioningStepPager) applyCursors(query *ProvisioningStepQuery, after, before *Cursor) *ProvisioningStepQuery { - for _, predicate := range cursorsToPredicates( - p.order.Direction, after, before, - p.order.Field.field, DefaultProvisioningStepOrder.Field.field, - ) { +func (p *provisioningstepPager) applyCursors(query *ProvisioningStepQuery, after, before *Cursor) (*ProvisioningStepQuery, error) { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + for _, predicate := range entgql.CursorsPredicate(after, before, DefaultProvisioningStepOrder.Field.column, p.order.Field.column, direction) { query = query.Where(predicate) } - return query + return query, nil } -func (p *provisioningStepPager) applyOrder(query *ProvisioningStepQuery, reverse bool) *ProvisioningStepQuery { +func (p *provisioningstepPager) applyOrder(query *ProvisioningStepQuery) *ProvisioningStepQuery { direction := p.order.Direction - if reverse { - direction = direction.reverse() + if p.reverse { + direction = direction.Reverse() } - query = query.Order(direction.orderFunc(p.order.Field.field)) + query = query.Order(p.order.Field.toTerm(direction.OrderTermOption())) if p.order.Field != DefaultProvisioningStepOrder.Field { - query = query.Order(direction.orderFunc(DefaultProvisioningStepOrder.Field.field)) + query = query.Order(DefaultProvisioningStepOrder.Field.toTerm(direction.OrderTermOption())) + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) } return query } +func (p *provisioningstepPager) orderExpr(query *ProvisioningStepQuery) sql.Querier { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) + } + return sql.ExprFunc(func(b *sql.Builder) { + b.Ident(p.order.Field.column).Pad().WriteString(string(direction)) + if p.order.Field != DefaultProvisioningStepOrder.Field { + b.Comma().Ident(DefaultProvisioningStepOrder.Field.column).Pad().WriteString(string(direction)) + } + }) +} + // Paginate executes the query and returns a relay based cursor connection to ProvisioningStep. func (ps *ProvisioningStepQuery) Paginate( ctx context.Context, after *Cursor, first *int, @@ -6502,98 +6937,54 @@ func (ps *ProvisioningStepQuery) Paginate( if err := validateFirstLast(first, last); err != nil { return nil, err } - pager, err := newProvisioningStepPager(opts) + pager, err := newProvisioningStepPager(opts, last != nil) if err != nil { return nil, err } - if ps, err = pager.applyFilter(ps); err != nil { return nil, err } - conn := &ProvisioningStepConnection{Edges: []*ProvisioningStepEdge{}} - if !hasCollectedField(ctx, edgesField) || first != nil && *first == 0 || last != nil && *last == 0 { - if hasCollectedField(ctx, totalCountField) || - hasCollectedField(ctx, pageInfoField) { - count, err := ps.Count(ctx) - if err != nil { + ignoredEdges := !hasCollectedField(ctx, edgesField) + if hasCollectedField(ctx, totalCountField) || hasCollectedField(ctx, pageInfoField) { + hasPagination := after != nil || first != nil || before != nil || last != nil + if hasPagination || ignoredEdges { + if conn.TotalCount, err = ps.Clone().Count(ctx); err != nil { return nil, err } - conn.TotalCount = count - conn.PageInfo.HasNextPage = first != nil && count > 0 - conn.PageInfo.HasPreviousPage = last != nil && count > 0 - } - return conn, nil - } - - if (after != nil || first != nil || before != nil || last != nil) && hasCollectedField(ctx, totalCountField) { - count, err := ps.Clone().Count(ctx) - if err != nil { - return nil, err - } - conn.TotalCount = count - } - - ps = pager.applyCursors(ps, after, before) - ps = pager.applyOrder(ps, last != nil) - var limit int - if first != nil { - limit = *first + 1 - } else if last != nil { - limit = *last + 1 - } - if limit > 0 { - ps = ps.Limit(limit) - } - - if field := getCollectedField(ctx, edgesField, nodeField); field != nil { - ps = ps.collectField(graphql.GetOperationContext(ctx), *field) - } - - nodes, err := ps.All(ctx) - if err != nil || len(nodes) == 0 { - return conn, err - } - - if len(nodes) == limit { - conn.PageInfo.HasNextPage = first != nil - conn.PageInfo.HasPreviousPage = last != nil - nodes = nodes[:len(nodes)-1] - } - - var nodeAt func(int) *ProvisioningStep - if last != nil { - n := len(nodes) - 1 - nodeAt = func(i int) *ProvisioningStep { - return nodes[n-i] - } - } else { - nodeAt = func(i int) *ProvisioningStep { - return nodes[i] + conn.PageInfo.HasNextPage = first != nil && conn.TotalCount > 0 + conn.PageInfo.HasPreviousPage = last != nil && conn.TotalCount > 0 } } - - conn.Edges = make([]*ProvisioningStepEdge, len(nodes)) - for i := range nodes { - node := nodeAt(i) - conn.Edges[i] = &ProvisioningStepEdge{ - Node: node, - Cursor: pager.toCursor(node), + if ignoredEdges || (first != nil && *first == 0) || (last != nil && *last == 0) { + return conn, nil + } + if ps, err = pager.applyCursors(ps, after, before); err != nil { + return nil, err + } + if limit := paginateLimit(first, last); limit != 0 { + ps.Limit(limit) + } + if field := collectedField(ctx, edgesField, nodeField); field != nil { + if err := ps.collectField(ctx, graphql.GetOperationContext(ctx), *field, []string{edgesField, nodeField}); err != nil { + return nil, err } } - - conn.PageInfo.StartCursor = &conn.Edges[0].Cursor - conn.PageInfo.EndCursor = &conn.Edges[len(conn.Edges)-1].Cursor - if conn.TotalCount == 0 { - conn.TotalCount = len(nodes) + ps = pager.applyOrder(ps) + nodes, err := ps.All(ctx) + if err != nil { + return nil, err } - + conn.build(nodes, pager, after, first, before, last) return conn, nil } // ProvisioningStepOrderField defines the ordering field of ProvisioningStep. type ProvisioningStepOrderField struct { - field string + // Value extracts the ordering value from the given ProvisioningStep. + Value func(*ProvisioningStep) (ent.Value, error) + column string // field or computed. + toTerm func(...sql.OrderTermOption) provisioningstep.OrderOption toCursor func(*ProvisioningStep) Cursor } @@ -6605,9 +6996,13 @@ type ProvisioningStepOrder struct { // DefaultProvisioningStepOrder is the default ordering of ProvisioningStep. var DefaultProvisioningStepOrder = &ProvisioningStepOrder{ - Direction: OrderDirectionAsc, + Direction: entgql.OrderDirectionAsc, Field: &ProvisioningStepOrderField{ - field: provisioningstep.FieldID, + Value: func(ps *ProvisioningStep) (ent.Value, error) { + return ps.ID, nil + }, + column: provisioningstep.FieldID, + toTerm: provisioningstep.ByID, toCursor: func(ps *ProvisioningStep) Cursor { return Cursor{ID: ps.ID} }, @@ -6638,8 +7033,46 @@ type RepoCommitConnection struct { TotalCount int `json:"totalCount"` } +func (c *RepoCommitConnection) build(nodes []*RepoCommit, pager *repocommitPager, after *Cursor, first *int, before *Cursor, last *int) { + c.PageInfo.HasNextPage = before != nil + c.PageInfo.HasPreviousPage = after != nil + if first != nil && *first+1 == len(nodes) { + c.PageInfo.HasNextPage = true + nodes = nodes[:len(nodes)-1] + } else if last != nil && *last+1 == len(nodes) { + c.PageInfo.HasPreviousPage = true + nodes = nodes[:len(nodes)-1] + } + var nodeAt func(int) *RepoCommit + if last != nil { + n := len(nodes) - 1 + nodeAt = func(i int) *RepoCommit { + return nodes[n-i] + } + } else { + nodeAt = func(i int) *RepoCommit { + return nodes[i] + } + } + c.Edges = make([]*RepoCommitEdge, len(nodes)) + for i := range nodes { + node := nodeAt(i) + c.Edges[i] = &RepoCommitEdge{ + Node: node, + Cursor: pager.toCursor(node), + } + } + if l := len(c.Edges); l > 0 { + c.PageInfo.StartCursor = &c.Edges[0].Cursor + c.PageInfo.EndCursor = &c.Edges[l-1].Cursor + } + if c.TotalCount == 0 { + c.TotalCount = len(nodes) + } +} + // RepoCommitPaginateOption enables pagination customization. -type RepoCommitPaginateOption func(*repoCommitPager) error +type RepoCommitPaginateOption func(*repocommitPager) error // WithRepoCommitOrder configures pagination ordering. func WithRepoCommitOrder(order *RepoCommitOrder) RepoCommitPaginateOption { @@ -6647,7 +7080,7 @@ func WithRepoCommitOrder(order *RepoCommitOrder) RepoCommitPaginateOption { order = DefaultRepoCommitOrder } o := *order - return func(pager *repoCommitPager) error { + return func(pager *repocommitPager) error { if err := o.Direction.Validate(); err != nil { return err } @@ -6661,7 +7094,7 @@ func WithRepoCommitOrder(order *RepoCommitOrder) RepoCommitPaginateOption { // WithRepoCommitFilter configures pagination filter. func WithRepoCommitFilter(filter func(*RepoCommitQuery) (*RepoCommitQuery, error)) RepoCommitPaginateOption { - return func(pager *repoCommitPager) error { + return func(pager *repocommitPager) error { if filter == nil { return errors.New("RepoCommitQuery filter cannot be nil") } @@ -6670,13 +7103,14 @@ func WithRepoCommitFilter(filter func(*RepoCommitQuery) (*RepoCommitQuery, error } } -type repoCommitPager struct { - order *RepoCommitOrder - filter func(*RepoCommitQuery) (*RepoCommitQuery, error) +type repocommitPager struct { + reverse bool + order *RepoCommitOrder + filter func(*RepoCommitQuery) (*RepoCommitQuery, error) } -func newRepoCommitPager(opts []RepoCommitPaginateOption) (*repoCommitPager, error) { - pager := &repoCommitPager{} +func newRepoCommitPager(opts []RepoCommitPaginateOption, reverse bool) (*repocommitPager, error) { + pager := &repocommitPager{reverse: reverse} for _, opt := range opts { if err := opt(pager); err != nil { return nil, err @@ -6688,39 +7122,59 @@ func newRepoCommitPager(opts []RepoCommitPaginateOption) (*repoCommitPager, erro return pager, nil } -func (p *repoCommitPager) applyFilter(query *RepoCommitQuery) (*RepoCommitQuery, error) { +func (p *repocommitPager) applyFilter(query *RepoCommitQuery) (*RepoCommitQuery, error) { if p.filter != nil { return p.filter(query) } return query, nil } -func (p *repoCommitPager) toCursor(rc *RepoCommit) Cursor { +func (p *repocommitPager) toCursor(rc *RepoCommit) Cursor { return p.order.Field.toCursor(rc) } -func (p *repoCommitPager) applyCursors(query *RepoCommitQuery, after, before *Cursor) *RepoCommitQuery { - for _, predicate := range cursorsToPredicates( - p.order.Direction, after, before, - p.order.Field.field, DefaultRepoCommitOrder.Field.field, - ) { +func (p *repocommitPager) applyCursors(query *RepoCommitQuery, after, before *Cursor) (*RepoCommitQuery, error) { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + for _, predicate := range entgql.CursorsPredicate(after, before, DefaultRepoCommitOrder.Field.column, p.order.Field.column, direction) { query = query.Where(predicate) } - return query + return query, nil } -func (p *repoCommitPager) applyOrder(query *RepoCommitQuery, reverse bool) *RepoCommitQuery { +func (p *repocommitPager) applyOrder(query *RepoCommitQuery) *RepoCommitQuery { direction := p.order.Direction - if reverse { - direction = direction.reverse() + if p.reverse { + direction = direction.Reverse() } - query = query.Order(direction.orderFunc(p.order.Field.field)) + query = query.Order(p.order.Field.toTerm(direction.OrderTermOption())) if p.order.Field != DefaultRepoCommitOrder.Field { - query = query.Order(direction.orderFunc(DefaultRepoCommitOrder.Field.field)) + query = query.Order(DefaultRepoCommitOrder.Field.toTerm(direction.OrderTermOption())) + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) } return query } +func (p *repocommitPager) orderExpr(query *RepoCommitQuery) sql.Querier { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) + } + return sql.ExprFunc(func(b *sql.Builder) { + b.Ident(p.order.Field.column).Pad().WriteString(string(direction)) + if p.order.Field != DefaultRepoCommitOrder.Field { + b.Comma().Ident(DefaultRepoCommitOrder.Field.column).Pad().WriteString(string(direction)) + } + }) +} + // Paginate executes the query and returns a relay based cursor connection to RepoCommit. func (rc *RepoCommitQuery) Paginate( ctx context.Context, after *Cursor, first *int, @@ -6729,98 +7183,54 @@ func (rc *RepoCommitQuery) Paginate( if err := validateFirstLast(first, last); err != nil { return nil, err } - pager, err := newRepoCommitPager(opts) + pager, err := newRepoCommitPager(opts, last != nil) if err != nil { return nil, err } - if rc, err = pager.applyFilter(rc); err != nil { return nil, err } - conn := &RepoCommitConnection{Edges: []*RepoCommitEdge{}} - if !hasCollectedField(ctx, edgesField) || first != nil && *first == 0 || last != nil && *last == 0 { - if hasCollectedField(ctx, totalCountField) || - hasCollectedField(ctx, pageInfoField) { - count, err := rc.Count(ctx) - if err != nil { + ignoredEdges := !hasCollectedField(ctx, edgesField) + if hasCollectedField(ctx, totalCountField) || hasCollectedField(ctx, pageInfoField) { + hasPagination := after != nil || first != nil || before != nil || last != nil + if hasPagination || ignoredEdges { + if conn.TotalCount, err = rc.Clone().Count(ctx); err != nil { return nil, err } - conn.TotalCount = count - conn.PageInfo.HasNextPage = first != nil && count > 0 - conn.PageInfo.HasPreviousPage = last != nil && count > 0 - } - return conn, nil - } - - if (after != nil || first != nil || before != nil || last != nil) && hasCollectedField(ctx, totalCountField) { - count, err := rc.Clone().Count(ctx) - if err != nil { - return nil, err + conn.PageInfo.HasNextPage = first != nil && conn.TotalCount > 0 + conn.PageInfo.HasPreviousPage = last != nil && conn.TotalCount > 0 } - conn.TotalCount = count - } - - rc = pager.applyCursors(rc, after, before) - rc = pager.applyOrder(rc, last != nil) - var limit int - if first != nil { - limit = *first + 1 - } else if last != nil { - limit = *last + 1 - } - if limit > 0 { - rc = rc.Limit(limit) - } - - if field := getCollectedField(ctx, edgesField, nodeField); field != nil { - rc = rc.collectField(graphql.GetOperationContext(ctx), *field) } - - nodes, err := rc.All(ctx) - if err != nil || len(nodes) == 0 { - return conn, err + if ignoredEdges || (first != nil && *first == 0) || (last != nil && *last == 0) { + return conn, nil } - - if len(nodes) == limit { - conn.PageInfo.HasNextPage = first != nil - conn.PageInfo.HasPreviousPage = last != nil - nodes = nodes[:len(nodes)-1] + if rc, err = pager.applyCursors(rc, after, before); err != nil { + return nil, err } - - var nodeAt func(int) *RepoCommit - if last != nil { - n := len(nodes) - 1 - nodeAt = func(i int) *RepoCommit { - return nodes[n-i] - } - } else { - nodeAt = func(i int) *RepoCommit { - return nodes[i] - } + if limit := paginateLimit(first, last); limit != 0 { + rc.Limit(limit) } - - conn.Edges = make([]*RepoCommitEdge, len(nodes)) - for i := range nodes { - node := nodeAt(i) - conn.Edges[i] = &RepoCommitEdge{ - Node: node, - Cursor: pager.toCursor(node), + if field := collectedField(ctx, edgesField, nodeField); field != nil { + if err := rc.collectField(ctx, graphql.GetOperationContext(ctx), *field, []string{edgesField, nodeField}); err != nil { + return nil, err } } - - conn.PageInfo.StartCursor = &conn.Edges[0].Cursor - conn.PageInfo.EndCursor = &conn.Edges[len(conn.Edges)-1].Cursor - if conn.TotalCount == 0 { - conn.TotalCount = len(nodes) + rc = pager.applyOrder(rc) + nodes, err := rc.All(ctx) + if err != nil { + return nil, err } - + conn.build(nodes, pager, after, first, before, last) return conn, nil } // RepoCommitOrderField defines the ordering field of RepoCommit. type RepoCommitOrderField struct { - field string + // Value extracts the ordering value from the given RepoCommit. + Value func(*RepoCommit) (ent.Value, error) + column string // field or computed. + toTerm func(...sql.OrderTermOption) repocommit.OrderOption toCursor func(*RepoCommit) Cursor } @@ -6832,9 +7242,13 @@ type RepoCommitOrder struct { // DefaultRepoCommitOrder is the default ordering of RepoCommit. var DefaultRepoCommitOrder = &RepoCommitOrder{ - Direction: OrderDirectionAsc, + Direction: entgql.OrderDirectionAsc, Field: &RepoCommitOrderField{ - field: repocommit.FieldID, + Value: func(rc *RepoCommit) (ent.Value, error) { + return rc.ID, nil + }, + column: repocommit.FieldID, + toTerm: repocommit.ByID, toCursor: func(rc *RepoCommit) Cursor { return Cursor{ID: rc.ID} }, @@ -6865,6 +7279,44 @@ type RepositoryConnection struct { TotalCount int `json:"totalCount"` } +func (c *RepositoryConnection) build(nodes []*Repository, pager *repositoryPager, after *Cursor, first *int, before *Cursor, last *int) { + c.PageInfo.HasNextPage = before != nil + c.PageInfo.HasPreviousPage = after != nil + if first != nil && *first+1 == len(nodes) { + c.PageInfo.HasNextPage = true + nodes = nodes[:len(nodes)-1] + } else if last != nil && *last+1 == len(nodes) { + c.PageInfo.HasPreviousPage = true + nodes = nodes[:len(nodes)-1] + } + var nodeAt func(int) *Repository + if last != nil { + n := len(nodes) - 1 + nodeAt = func(i int) *Repository { + return nodes[n-i] + } + } else { + nodeAt = func(i int) *Repository { + return nodes[i] + } + } + c.Edges = make([]*RepositoryEdge, len(nodes)) + for i := range nodes { + node := nodeAt(i) + c.Edges[i] = &RepositoryEdge{ + Node: node, + Cursor: pager.toCursor(node), + } + } + if l := len(c.Edges); l > 0 { + c.PageInfo.StartCursor = &c.Edges[0].Cursor + c.PageInfo.EndCursor = &c.Edges[l-1].Cursor + } + if c.TotalCount == 0 { + c.TotalCount = len(nodes) + } +} + // RepositoryPaginateOption enables pagination customization. type RepositoryPaginateOption func(*repositoryPager) error @@ -6898,12 +7350,13 @@ func WithRepositoryFilter(filter func(*RepositoryQuery) (*RepositoryQuery, error } type repositoryPager struct { - order *RepositoryOrder - filter func(*RepositoryQuery) (*RepositoryQuery, error) + reverse bool + order *RepositoryOrder + filter func(*RepositoryQuery) (*RepositoryQuery, error) } -func newRepositoryPager(opts []RepositoryPaginateOption) (*repositoryPager, error) { - pager := &repositoryPager{} +func newRepositoryPager(opts []RepositoryPaginateOption, reverse bool) (*repositoryPager, error) { + pager := &repositoryPager{reverse: reverse} for _, opt := range opts { if err := opt(pager); err != nil { return nil, err @@ -6926,28 +7379,48 @@ func (p *repositoryPager) toCursor(r *Repository) Cursor { return p.order.Field.toCursor(r) } -func (p *repositoryPager) applyCursors(query *RepositoryQuery, after, before *Cursor) *RepositoryQuery { - for _, predicate := range cursorsToPredicates( - p.order.Direction, after, before, - p.order.Field.field, DefaultRepositoryOrder.Field.field, - ) { +func (p *repositoryPager) applyCursors(query *RepositoryQuery, after, before *Cursor) (*RepositoryQuery, error) { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + for _, predicate := range entgql.CursorsPredicate(after, before, DefaultRepositoryOrder.Field.column, p.order.Field.column, direction) { query = query.Where(predicate) } - return query + return query, nil } -func (p *repositoryPager) applyOrder(query *RepositoryQuery, reverse bool) *RepositoryQuery { +func (p *repositoryPager) applyOrder(query *RepositoryQuery) *RepositoryQuery { direction := p.order.Direction - if reverse { - direction = direction.reverse() + if p.reverse { + direction = direction.Reverse() } - query = query.Order(direction.orderFunc(p.order.Field.field)) + query = query.Order(p.order.Field.toTerm(direction.OrderTermOption())) if p.order.Field != DefaultRepositoryOrder.Field { - query = query.Order(direction.orderFunc(DefaultRepositoryOrder.Field.field)) + query = query.Order(DefaultRepositoryOrder.Field.toTerm(direction.OrderTermOption())) + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) } return query } +func (p *repositoryPager) orderExpr(query *RepositoryQuery) sql.Querier { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) + } + return sql.ExprFunc(func(b *sql.Builder) { + b.Ident(p.order.Field.column).Pad().WriteString(string(direction)) + if p.order.Field != DefaultRepositoryOrder.Field { + b.Comma().Ident(DefaultRepositoryOrder.Field.column).Pad().WriteString(string(direction)) + } + }) +} + // Paginate executes the query and returns a relay based cursor connection to Repository. func (r *RepositoryQuery) Paginate( ctx context.Context, after *Cursor, first *int, @@ -6956,98 +7429,54 @@ func (r *RepositoryQuery) Paginate( if err := validateFirstLast(first, last); err != nil { return nil, err } - pager, err := newRepositoryPager(opts) + pager, err := newRepositoryPager(opts, last != nil) if err != nil { return nil, err } - if r, err = pager.applyFilter(r); err != nil { return nil, err } - conn := &RepositoryConnection{Edges: []*RepositoryEdge{}} - if !hasCollectedField(ctx, edgesField) || first != nil && *first == 0 || last != nil && *last == 0 { - if hasCollectedField(ctx, totalCountField) || - hasCollectedField(ctx, pageInfoField) { - count, err := r.Count(ctx) - if err != nil { + ignoredEdges := !hasCollectedField(ctx, edgesField) + if hasCollectedField(ctx, totalCountField) || hasCollectedField(ctx, pageInfoField) { + hasPagination := after != nil || first != nil || before != nil || last != nil + if hasPagination || ignoredEdges { + if conn.TotalCount, err = r.Clone().Count(ctx); err != nil { return nil, err } - conn.TotalCount = count - conn.PageInfo.HasNextPage = first != nil && count > 0 - conn.PageInfo.HasPreviousPage = last != nil && count > 0 - } - return conn, nil - } - - if (after != nil || first != nil || before != nil || last != nil) && hasCollectedField(ctx, totalCountField) { - count, err := r.Clone().Count(ctx) - if err != nil { - return nil, err + conn.PageInfo.HasNextPage = first != nil && conn.TotalCount > 0 + conn.PageInfo.HasPreviousPage = last != nil && conn.TotalCount > 0 } - conn.TotalCount = count - } - - r = pager.applyCursors(r, after, before) - r = pager.applyOrder(r, last != nil) - var limit int - if first != nil { - limit = *first + 1 - } else if last != nil { - limit = *last + 1 - } - if limit > 0 { - r = r.Limit(limit) } - - if field := getCollectedField(ctx, edgesField, nodeField); field != nil { - r = r.collectField(graphql.GetOperationContext(ctx), *field) - } - - nodes, err := r.All(ctx) - if err != nil || len(nodes) == 0 { - return conn, err + if ignoredEdges || (first != nil && *first == 0) || (last != nil && *last == 0) { + return conn, nil } - - if len(nodes) == limit { - conn.PageInfo.HasNextPage = first != nil - conn.PageInfo.HasPreviousPage = last != nil - nodes = nodes[:len(nodes)-1] + if r, err = pager.applyCursors(r, after, before); err != nil { + return nil, err } - - var nodeAt func(int) *Repository - if last != nil { - n := len(nodes) - 1 - nodeAt = func(i int) *Repository { - return nodes[n-i] - } - } else { - nodeAt = func(i int) *Repository { - return nodes[i] - } + if limit := paginateLimit(first, last); limit != 0 { + r.Limit(limit) } - - conn.Edges = make([]*RepositoryEdge, len(nodes)) - for i := range nodes { - node := nodeAt(i) - conn.Edges[i] = &RepositoryEdge{ - Node: node, - Cursor: pager.toCursor(node), + if field := collectedField(ctx, edgesField, nodeField); field != nil { + if err := r.collectField(ctx, graphql.GetOperationContext(ctx), *field, []string{edgesField, nodeField}); err != nil { + return nil, err } } - - conn.PageInfo.StartCursor = &conn.Edges[0].Cursor - conn.PageInfo.EndCursor = &conn.Edges[len(conn.Edges)-1].Cursor - if conn.TotalCount == 0 { - conn.TotalCount = len(nodes) + r = pager.applyOrder(r) + nodes, err := r.All(ctx) + if err != nil { + return nil, err } - + conn.build(nodes, pager, after, first, before, last) return conn, nil } // RepositoryOrderField defines the ordering field of Repository. type RepositoryOrderField struct { - field string + // Value extracts the ordering value from the given Repository. + Value func(*Repository) (ent.Value, error) + column string // field or computed. + toTerm func(...sql.OrderTermOption) repository.OrderOption toCursor func(*Repository) Cursor } @@ -7059,9 +7488,13 @@ type RepositoryOrder struct { // DefaultRepositoryOrder is the default ordering of Repository. var DefaultRepositoryOrder = &RepositoryOrder{ - Direction: OrderDirectionAsc, + Direction: entgql.OrderDirectionAsc, Field: &RepositoryOrderField{ - field: repository.FieldID, + Value: func(r *Repository) (ent.Value, error) { + return r.ID, nil + }, + column: repository.FieldID, + toTerm: repository.ByID, toCursor: func(r *Repository) Cursor { return Cursor{ID: r.ID} }, @@ -7092,6 +7525,44 @@ type ScriptConnection struct { TotalCount int `json:"totalCount"` } +func (c *ScriptConnection) build(nodes []*Script, pager *scriptPager, after *Cursor, first *int, before *Cursor, last *int) { + c.PageInfo.HasNextPage = before != nil + c.PageInfo.HasPreviousPage = after != nil + if first != nil && *first+1 == len(nodes) { + c.PageInfo.HasNextPage = true + nodes = nodes[:len(nodes)-1] + } else if last != nil && *last+1 == len(nodes) { + c.PageInfo.HasPreviousPage = true + nodes = nodes[:len(nodes)-1] + } + var nodeAt func(int) *Script + if last != nil { + n := len(nodes) - 1 + nodeAt = func(i int) *Script { + return nodes[n-i] + } + } else { + nodeAt = func(i int) *Script { + return nodes[i] + } + } + c.Edges = make([]*ScriptEdge, len(nodes)) + for i := range nodes { + node := nodeAt(i) + c.Edges[i] = &ScriptEdge{ + Node: node, + Cursor: pager.toCursor(node), + } + } + if l := len(c.Edges); l > 0 { + c.PageInfo.StartCursor = &c.Edges[0].Cursor + c.PageInfo.EndCursor = &c.Edges[l-1].Cursor + } + if c.TotalCount == 0 { + c.TotalCount = len(nodes) + } +} + // ScriptPaginateOption enables pagination customization. type ScriptPaginateOption func(*scriptPager) error @@ -7125,12 +7596,13 @@ func WithScriptFilter(filter func(*ScriptQuery) (*ScriptQuery, error)) ScriptPag } type scriptPager struct { - order *ScriptOrder - filter func(*ScriptQuery) (*ScriptQuery, error) + reverse bool + order *ScriptOrder + filter func(*ScriptQuery) (*ScriptQuery, error) } -func newScriptPager(opts []ScriptPaginateOption) (*scriptPager, error) { - pager := &scriptPager{} +func newScriptPager(opts []ScriptPaginateOption, reverse bool) (*scriptPager, error) { + pager := &scriptPager{reverse: reverse} for _, opt := range opts { if err := opt(pager); err != nil { return nil, err @@ -7153,28 +7625,48 @@ func (p *scriptPager) toCursor(s *Script) Cursor { return p.order.Field.toCursor(s) } -func (p *scriptPager) applyCursors(query *ScriptQuery, after, before *Cursor) *ScriptQuery { - for _, predicate := range cursorsToPredicates( - p.order.Direction, after, before, - p.order.Field.field, DefaultScriptOrder.Field.field, - ) { +func (p *scriptPager) applyCursors(query *ScriptQuery, after, before *Cursor) (*ScriptQuery, error) { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + for _, predicate := range entgql.CursorsPredicate(after, before, DefaultScriptOrder.Field.column, p.order.Field.column, direction) { query = query.Where(predicate) } - return query + return query, nil } -func (p *scriptPager) applyOrder(query *ScriptQuery, reverse bool) *ScriptQuery { +func (p *scriptPager) applyOrder(query *ScriptQuery) *ScriptQuery { direction := p.order.Direction - if reverse { - direction = direction.reverse() + if p.reverse { + direction = direction.Reverse() } - query = query.Order(direction.orderFunc(p.order.Field.field)) + query = query.Order(p.order.Field.toTerm(direction.OrderTermOption())) if p.order.Field != DefaultScriptOrder.Field { - query = query.Order(direction.orderFunc(DefaultScriptOrder.Field.field)) + query = query.Order(DefaultScriptOrder.Field.toTerm(direction.OrderTermOption())) + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) } return query } +func (p *scriptPager) orderExpr(query *ScriptQuery) sql.Querier { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) + } + return sql.ExprFunc(func(b *sql.Builder) { + b.Ident(p.order.Field.column).Pad().WriteString(string(direction)) + if p.order.Field != DefaultScriptOrder.Field { + b.Comma().Ident(DefaultScriptOrder.Field.column).Pad().WriteString(string(direction)) + } + }) +} + // Paginate executes the query and returns a relay based cursor connection to Script. func (s *ScriptQuery) Paginate( ctx context.Context, after *Cursor, first *int, @@ -7183,98 +7675,54 @@ func (s *ScriptQuery) Paginate( if err := validateFirstLast(first, last); err != nil { return nil, err } - pager, err := newScriptPager(opts) + pager, err := newScriptPager(opts, last != nil) if err != nil { return nil, err - } - - if s, err = pager.applyFilter(s); err != nil { - return nil, err - } - - conn := &ScriptConnection{Edges: []*ScriptEdge{}} - if !hasCollectedField(ctx, edgesField) || first != nil && *first == 0 || last != nil && *last == 0 { - if hasCollectedField(ctx, totalCountField) || - hasCollectedField(ctx, pageInfoField) { - count, err := s.Count(ctx) - if err != nil { - return nil, err - } - conn.TotalCount = count - conn.PageInfo.HasNextPage = first != nil && count > 0 - conn.PageInfo.HasPreviousPage = last != nil && count > 0 - } - return conn, nil - } - - if (after != nil || first != nil || before != nil || last != nil) && hasCollectedField(ctx, totalCountField) { - count, err := s.Clone().Count(ctx) - if err != nil { - return nil, err - } - conn.TotalCount = count - } - - s = pager.applyCursors(s, after, before) - s = pager.applyOrder(s, last != nil) - var limit int - if first != nil { - limit = *first + 1 - } else if last != nil { - limit = *last + 1 - } - if limit > 0 { - s = s.Limit(limit) - } - - if field := getCollectedField(ctx, edgesField, nodeField); field != nil { - s = s.collectField(graphql.GetOperationContext(ctx), *field) - } - - nodes, err := s.All(ctx) - if err != nil || len(nodes) == 0 { - return conn, err - } - - if len(nodes) == limit { - conn.PageInfo.HasNextPage = first != nil - conn.PageInfo.HasPreviousPage = last != nil - nodes = nodes[:len(nodes)-1] - } - - var nodeAt func(int) *Script - if last != nil { - n := len(nodes) - 1 - nodeAt = func(i int) *Script { - return nodes[n-i] - } - } else { - nodeAt = func(i int) *Script { - return nodes[i] + } + if s, err = pager.applyFilter(s); err != nil { + return nil, err + } + conn := &ScriptConnection{Edges: []*ScriptEdge{}} + ignoredEdges := !hasCollectedField(ctx, edgesField) + if hasCollectedField(ctx, totalCountField) || hasCollectedField(ctx, pageInfoField) { + hasPagination := after != nil || first != nil || before != nil || last != nil + if hasPagination || ignoredEdges { + if conn.TotalCount, err = s.Clone().Count(ctx); err != nil { + return nil, err + } + conn.PageInfo.HasNextPage = first != nil && conn.TotalCount > 0 + conn.PageInfo.HasPreviousPage = last != nil && conn.TotalCount > 0 } } - - conn.Edges = make([]*ScriptEdge, len(nodes)) - for i := range nodes { - node := nodeAt(i) - conn.Edges[i] = &ScriptEdge{ - Node: node, - Cursor: pager.toCursor(node), + if ignoredEdges || (first != nil && *first == 0) || (last != nil && *last == 0) { + return conn, nil + } + if s, err = pager.applyCursors(s, after, before); err != nil { + return nil, err + } + if limit := paginateLimit(first, last); limit != 0 { + s.Limit(limit) + } + if field := collectedField(ctx, edgesField, nodeField); field != nil { + if err := s.collectField(ctx, graphql.GetOperationContext(ctx), *field, []string{edgesField, nodeField}); err != nil { + return nil, err } } - - conn.PageInfo.StartCursor = &conn.Edges[0].Cursor - conn.PageInfo.EndCursor = &conn.Edges[len(conn.Edges)-1].Cursor - if conn.TotalCount == 0 { - conn.TotalCount = len(nodes) + s = pager.applyOrder(s) + nodes, err := s.All(ctx) + if err != nil { + return nil, err } - + conn.build(nodes, pager, after, first, before, last) return conn, nil } // ScriptOrderField defines the ordering field of Script. type ScriptOrderField struct { - field string + // Value extracts the ordering value from the given Script. + Value func(*Script) (ent.Value, error) + column string // field or computed. + toTerm func(...sql.OrderTermOption) script.OrderOption toCursor func(*Script) Cursor } @@ -7286,9 +7734,13 @@ type ScriptOrder struct { // DefaultScriptOrder is the default ordering of Script. var DefaultScriptOrder = &ScriptOrder{ - Direction: OrderDirectionAsc, + Direction: entgql.OrderDirectionAsc, Field: &ScriptOrderField{ - field: script.FieldID, + Value: func(s *Script) (ent.Value, error) { + return s.ID, nil + }, + column: script.FieldID, + toTerm: script.ByID, toCursor: func(s *Script) Cursor { return Cursor{ID: s.ID} }, @@ -7319,8 +7771,46 @@ type ServerTaskConnection struct { TotalCount int `json:"totalCount"` } +func (c *ServerTaskConnection) build(nodes []*ServerTask, pager *servertaskPager, after *Cursor, first *int, before *Cursor, last *int) { + c.PageInfo.HasNextPage = before != nil + c.PageInfo.HasPreviousPage = after != nil + if first != nil && *first+1 == len(nodes) { + c.PageInfo.HasNextPage = true + nodes = nodes[:len(nodes)-1] + } else if last != nil && *last+1 == len(nodes) { + c.PageInfo.HasPreviousPage = true + nodes = nodes[:len(nodes)-1] + } + var nodeAt func(int) *ServerTask + if last != nil { + n := len(nodes) - 1 + nodeAt = func(i int) *ServerTask { + return nodes[n-i] + } + } else { + nodeAt = func(i int) *ServerTask { + return nodes[i] + } + } + c.Edges = make([]*ServerTaskEdge, len(nodes)) + for i := range nodes { + node := nodeAt(i) + c.Edges[i] = &ServerTaskEdge{ + Node: node, + Cursor: pager.toCursor(node), + } + } + if l := len(c.Edges); l > 0 { + c.PageInfo.StartCursor = &c.Edges[0].Cursor + c.PageInfo.EndCursor = &c.Edges[l-1].Cursor + } + if c.TotalCount == 0 { + c.TotalCount = len(nodes) + } +} + // ServerTaskPaginateOption enables pagination customization. -type ServerTaskPaginateOption func(*serverTaskPager) error +type ServerTaskPaginateOption func(*servertaskPager) error // WithServerTaskOrder configures pagination ordering. func WithServerTaskOrder(order *ServerTaskOrder) ServerTaskPaginateOption { @@ -7328,7 +7818,7 @@ func WithServerTaskOrder(order *ServerTaskOrder) ServerTaskPaginateOption { order = DefaultServerTaskOrder } o := *order - return func(pager *serverTaskPager) error { + return func(pager *servertaskPager) error { if err := o.Direction.Validate(); err != nil { return err } @@ -7342,7 +7832,7 @@ func WithServerTaskOrder(order *ServerTaskOrder) ServerTaskPaginateOption { // WithServerTaskFilter configures pagination filter. func WithServerTaskFilter(filter func(*ServerTaskQuery) (*ServerTaskQuery, error)) ServerTaskPaginateOption { - return func(pager *serverTaskPager) error { + return func(pager *servertaskPager) error { if filter == nil { return errors.New("ServerTaskQuery filter cannot be nil") } @@ -7351,13 +7841,14 @@ func WithServerTaskFilter(filter func(*ServerTaskQuery) (*ServerTaskQuery, error } } -type serverTaskPager struct { - order *ServerTaskOrder - filter func(*ServerTaskQuery) (*ServerTaskQuery, error) +type servertaskPager struct { + reverse bool + order *ServerTaskOrder + filter func(*ServerTaskQuery) (*ServerTaskQuery, error) } -func newServerTaskPager(opts []ServerTaskPaginateOption) (*serverTaskPager, error) { - pager := &serverTaskPager{} +func newServerTaskPager(opts []ServerTaskPaginateOption, reverse bool) (*servertaskPager, error) { + pager := &servertaskPager{reverse: reverse} for _, opt := range opts { if err := opt(pager); err != nil { return nil, err @@ -7369,39 +7860,59 @@ func newServerTaskPager(opts []ServerTaskPaginateOption) (*serverTaskPager, erro return pager, nil } -func (p *serverTaskPager) applyFilter(query *ServerTaskQuery) (*ServerTaskQuery, error) { +func (p *servertaskPager) applyFilter(query *ServerTaskQuery) (*ServerTaskQuery, error) { if p.filter != nil { return p.filter(query) } return query, nil } -func (p *serverTaskPager) toCursor(st *ServerTask) Cursor { +func (p *servertaskPager) toCursor(st *ServerTask) Cursor { return p.order.Field.toCursor(st) } -func (p *serverTaskPager) applyCursors(query *ServerTaskQuery, after, before *Cursor) *ServerTaskQuery { - for _, predicate := range cursorsToPredicates( - p.order.Direction, after, before, - p.order.Field.field, DefaultServerTaskOrder.Field.field, - ) { +func (p *servertaskPager) applyCursors(query *ServerTaskQuery, after, before *Cursor) (*ServerTaskQuery, error) { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + for _, predicate := range entgql.CursorsPredicate(after, before, DefaultServerTaskOrder.Field.column, p.order.Field.column, direction) { query = query.Where(predicate) } - return query + return query, nil } -func (p *serverTaskPager) applyOrder(query *ServerTaskQuery, reverse bool) *ServerTaskQuery { +func (p *servertaskPager) applyOrder(query *ServerTaskQuery) *ServerTaskQuery { direction := p.order.Direction - if reverse { - direction = direction.reverse() + if p.reverse { + direction = direction.Reverse() } - query = query.Order(direction.orderFunc(p.order.Field.field)) + query = query.Order(p.order.Field.toTerm(direction.OrderTermOption())) if p.order.Field != DefaultServerTaskOrder.Field { - query = query.Order(direction.orderFunc(DefaultServerTaskOrder.Field.field)) + query = query.Order(DefaultServerTaskOrder.Field.toTerm(direction.OrderTermOption())) + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) } return query } +func (p *servertaskPager) orderExpr(query *ServerTaskQuery) sql.Querier { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) + } + return sql.ExprFunc(func(b *sql.Builder) { + b.Ident(p.order.Field.column).Pad().WriteString(string(direction)) + if p.order.Field != DefaultServerTaskOrder.Field { + b.Comma().Ident(DefaultServerTaskOrder.Field.column).Pad().WriteString(string(direction)) + } + }) +} + // Paginate executes the query and returns a relay based cursor connection to ServerTask. func (st *ServerTaskQuery) Paginate( ctx context.Context, after *Cursor, first *int, @@ -7410,98 +7921,54 @@ func (st *ServerTaskQuery) Paginate( if err := validateFirstLast(first, last); err != nil { return nil, err } - pager, err := newServerTaskPager(opts) + pager, err := newServerTaskPager(opts, last != nil) if err != nil { return nil, err } - if st, err = pager.applyFilter(st); err != nil { return nil, err } - conn := &ServerTaskConnection{Edges: []*ServerTaskEdge{}} - if !hasCollectedField(ctx, edgesField) || first != nil && *first == 0 || last != nil && *last == 0 { - if hasCollectedField(ctx, totalCountField) || - hasCollectedField(ctx, pageInfoField) { - count, err := st.Count(ctx) - if err != nil { + ignoredEdges := !hasCollectedField(ctx, edgesField) + if hasCollectedField(ctx, totalCountField) || hasCollectedField(ctx, pageInfoField) { + hasPagination := after != nil || first != nil || before != nil || last != nil + if hasPagination || ignoredEdges { + if conn.TotalCount, err = st.Clone().Count(ctx); err != nil { return nil, err } - conn.TotalCount = count - conn.PageInfo.HasNextPage = first != nil && count > 0 - conn.PageInfo.HasPreviousPage = last != nil && count > 0 - } - return conn, nil - } - - if (after != nil || first != nil || before != nil || last != nil) && hasCollectedField(ctx, totalCountField) { - count, err := st.Clone().Count(ctx) - if err != nil { - return nil, err + conn.PageInfo.HasNextPage = first != nil && conn.TotalCount > 0 + conn.PageInfo.HasPreviousPage = last != nil && conn.TotalCount > 0 } - conn.TotalCount = count - } - - st = pager.applyCursors(st, after, before) - st = pager.applyOrder(st, last != nil) - var limit int - if first != nil { - limit = *first + 1 - } else if last != nil { - limit = *last + 1 - } - if limit > 0 { - st = st.Limit(limit) - } - - if field := getCollectedField(ctx, edgesField, nodeField); field != nil { - st = st.collectField(graphql.GetOperationContext(ctx), *field) } - - nodes, err := st.All(ctx) - if err != nil || len(nodes) == 0 { - return conn, err + if ignoredEdges || (first != nil && *first == 0) || (last != nil && *last == 0) { + return conn, nil } - - if len(nodes) == limit { - conn.PageInfo.HasNextPage = first != nil - conn.PageInfo.HasPreviousPage = last != nil - nodes = nodes[:len(nodes)-1] + if st, err = pager.applyCursors(st, after, before); err != nil { + return nil, err } - - var nodeAt func(int) *ServerTask - if last != nil { - n := len(nodes) - 1 - nodeAt = func(i int) *ServerTask { - return nodes[n-i] - } - } else { - nodeAt = func(i int) *ServerTask { - return nodes[i] - } + if limit := paginateLimit(first, last); limit != 0 { + st.Limit(limit) } - - conn.Edges = make([]*ServerTaskEdge, len(nodes)) - for i := range nodes { - node := nodeAt(i) - conn.Edges[i] = &ServerTaskEdge{ - Node: node, - Cursor: pager.toCursor(node), + if field := collectedField(ctx, edgesField, nodeField); field != nil { + if err := st.collectField(ctx, graphql.GetOperationContext(ctx), *field, []string{edgesField, nodeField}); err != nil { + return nil, err } } - - conn.PageInfo.StartCursor = &conn.Edges[0].Cursor - conn.PageInfo.EndCursor = &conn.Edges[len(conn.Edges)-1].Cursor - if conn.TotalCount == 0 { - conn.TotalCount = len(nodes) + st = pager.applyOrder(st) + nodes, err := st.All(ctx) + if err != nil { + return nil, err } - + conn.build(nodes, pager, after, first, before, last) return conn, nil } // ServerTaskOrderField defines the ordering field of ServerTask. type ServerTaskOrderField struct { - field string + // Value extracts the ordering value from the given ServerTask. + Value func(*ServerTask) (ent.Value, error) + column string // field or computed. + toTerm func(...sql.OrderTermOption) servertask.OrderOption toCursor func(*ServerTask) Cursor } @@ -7513,9 +7980,13 @@ type ServerTaskOrder struct { // DefaultServerTaskOrder is the default ordering of ServerTask. var DefaultServerTaskOrder = &ServerTaskOrder{ - Direction: OrderDirectionAsc, + Direction: entgql.OrderDirectionAsc, Field: &ServerTaskOrderField{ - field: servertask.FieldID, + Value: func(st *ServerTask) (ent.Value, error) { + return st.ID, nil + }, + column: servertask.FieldID, + toTerm: servertask.ByID, toCursor: func(st *ServerTask) Cursor { return Cursor{ID: st.ID} }, @@ -7546,6 +8017,44 @@ type StatusConnection struct { TotalCount int `json:"totalCount"` } +func (c *StatusConnection) build(nodes []*Status, pager *statusPager, after *Cursor, first *int, before *Cursor, last *int) { + c.PageInfo.HasNextPage = before != nil + c.PageInfo.HasPreviousPage = after != nil + if first != nil && *first+1 == len(nodes) { + c.PageInfo.HasNextPage = true + nodes = nodes[:len(nodes)-1] + } else if last != nil && *last+1 == len(nodes) { + c.PageInfo.HasPreviousPage = true + nodes = nodes[:len(nodes)-1] + } + var nodeAt func(int) *Status + if last != nil { + n := len(nodes) - 1 + nodeAt = func(i int) *Status { + return nodes[n-i] + } + } else { + nodeAt = func(i int) *Status { + return nodes[i] + } + } + c.Edges = make([]*StatusEdge, len(nodes)) + for i := range nodes { + node := nodeAt(i) + c.Edges[i] = &StatusEdge{ + Node: node, + Cursor: pager.toCursor(node), + } + } + if l := len(c.Edges); l > 0 { + c.PageInfo.StartCursor = &c.Edges[0].Cursor + c.PageInfo.EndCursor = &c.Edges[l-1].Cursor + } + if c.TotalCount == 0 { + c.TotalCount = len(nodes) + } +} + // StatusPaginateOption enables pagination customization. type StatusPaginateOption func(*statusPager) error @@ -7579,12 +8088,13 @@ func WithStatusFilter(filter func(*StatusQuery) (*StatusQuery, error)) StatusPag } type statusPager struct { - order *StatusOrder - filter func(*StatusQuery) (*StatusQuery, error) + reverse bool + order *StatusOrder + filter func(*StatusQuery) (*StatusQuery, error) } -func newStatusPager(opts []StatusPaginateOption) (*statusPager, error) { - pager := &statusPager{} +func newStatusPager(opts []StatusPaginateOption, reverse bool) (*statusPager, error) { + pager := &statusPager{reverse: reverse} for _, opt := range opts { if err := opt(pager); err != nil { return nil, err @@ -7607,28 +8117,48 @@ func (p *statusPager) toCursor(s *Status) Cursor { return p.order.Field.toCursor(s) } -func (p *statusPager) applyCursors(query *StatusQuery, after, before *Cursor) *StatusQuery { - for _, predicate := range cursorsToPredicates( - p.order.Direction, after, before, - p.order.Field.field, DefaultStatusOrder.Field.field, - ) { +func (p *statusPager) applyCursors(query *StatusQuery, after, before *Cursor) (*StatusQuery, error) { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + for _, predicate := range entgql.CursorsPredicate(after, before, DefaultStatusOrder.Field.column, p.order.Field.column, direction) { query = query.Where(predicate) } - return query + return query, nil } -func (p *statusPager) applyOrder(query *StatusQuery, reverse bool) *StatusQuery { +func (p *statusPager) applyOrder(query *StatusQuery) *StatusQuery { direction := p.order.Direction - if reverse { - direction = direction.reverse() + if p.reverse { + direction = direction.Reverse() } - query = query.Order(direction.orderFunc(p.order.Field.field)) + query = query.Order(p.order.Field.toTerm(direction.OrderTermOption())) if p.order.Field != DefaultStatusOrder.Field { - query = query.Order(direction.orderFunc(DefaultStatusOrder.Field.field)) + query = query.Order(DefaultStatusOrder.Field.toTerm(direction.OrderTermOption())) + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) } return query } +func (p *statusPager) orderExpr(query *StatusQuery) sql.Querier { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) + } + return sql.ExprFunc(func(b *sql.Builder) { + b.Ident(p.order.Field.column).Pad().WriteString(string(direction)) + if p.order.Field != DefaultStatusOrder.Field { + b.Comma().Ident(DefaultStatusOrder.Field.column).Pad().WriteString(string(direction)) + } + }) +} + // Paginate executes the query and returns a relay based cursor connection to Status. func (s *StatusQuery) Paginate( ctx context.Context, after *Cursor, first *int, @@ -7637,98 +8167,54 @@ func (s *StatusQuery) Paginate( if err := validateFirstLast(first, last); err != nil { return nil, err } - pager, err := newStatusPager(opts) + pager, err := newStatusPager(opts, last != nil) if err != nil { return nil, err } - if s, err = pager.applyFilter(s); err != nil { return nil, err } - conn := &StatusConnection{Edges: []*StatusEdge{}} - if !hasCollectedField(ctx, edgesField) || first != nil && *first == 0 || last != nil && *last == 0 { - if hasCollectedField(ctx, totalCountField) || - hasCollectedField(ctx, pageInfoField) { - count, err := s.Count(ctx) - if err != nil { + ignoredEdges := !hasCollectedField(ctx, edgesField) + if hasCollectedField(ctx, totalCountField) || hasCollectedField(ctx, pageInfoField) { + hasPagination := after != nil || first != nil || before != nil || last != nil + if hasPagination || ignoredEdges { + if conn.TotalCount, err = s.Clone().Count(ctx); err != nil { return nil, err } - conn.TotalCount = count - conn.PageInfo.HasNextPage = first != nil && count > 0 - conn.PageInfo.HasPreviousPage = last != nil && count > 0 - } - return conn, nil - } - - if (after != nil || first != nil || before != nil || last != nil) && hasCollectedField(ctx, totalCountField) { - count, err := s.Clone().Count(ctx) - if err != nil { - return nil, err + conn.PageInfo.HasNextPage = first != nil && conn.TotalCount > 0 + conn.PageInfo.HasPreviousPage = last != nil && conn.TotalCount > 0 } - conn.TotalCount = count - } - - s = pager.applyCursors(s, after, before) - s = pager.applyOrder(s, last != nil) - var limit int - if first != nil { - limit = *first + 1 - } else if last != nil { - limit = *last + 1 - } - if limit > 0 { - s = s.Limit(limit) - } - - if field := getCollectedField(ctx, edgesField, nodeField); field != nil { - s = s.collectField(graphql.GetOperationContext(ctx), *field) } - - nodes, err := s.All(ctx) - if err != nil || len(nodes) == 0 { - return conn, err + if ignoredEdges || (first != nil && *first == 0) || (last != nil && *last == 0) { + return conn, nil } - - if len(nodes) == limit { - conn.PageInfo.HasNextPage = first != nil - conn.PageInfo.HasPreviousPage = last != nil - nodes = nodes[:len(nodes)-1] + if s, err = pager.applyCursors(s, after, before); err != nil { + return nil, err } - - var nodeAt func(int) *Status - if last != nil { - n := len(nodes) - 1 - nodeAt = func(i int) *Status { - return nodes[n-i] - } - } else { - nodeAt = func(i int) *Status { - return nodes[i] - } + if limit := paginateLimit(first, last); limit != 0 { + s.Limit(limit) } - - conn.Edges = make([]*StatusEdge, len(nodes)) - for i := range nodes { - node := nodeAt(i) - conn.Edges[i] = &StatusEdge{ - Node: node, - Cursor: pager.toCursor(node), + if field := collectedField(ctx, edgesField, nodeField); field != nil { + if err := s.collectField(ctx, graphql.GetOperationContext(ctx), *field, []string{edgesField, nodeField}); err != nil { + return nil, err } } - - conn.PageInfo.StartCursor = &conn.Edges[0].Cursor - conn.PageInfo.EndCursor = &conn.Edges[len(conn.Edges)-1].Cursor - if conn.TotalCount == 0 { - conn.TotalCount = len(nodes) + s = pager.applyOrder(s) + nodes, err := s.All(ctx) + if err != nil { + return nil, err } - + conn.build(nodes, pager, after, first, before, last) return conn, nil } // StatusOrderField defines the ordering field of Status. type StatusOrderField struct { - field string + // Value extracts the ordering value from the given Status. + Value func(*Status) (ent.Value, error) + column string // field or computed. + toTerm func(...sql.OrderTermOption) status.OrderOption toCursor func(*Status) Cursor } @@ -7740,9 +8226,13 @@ type StatusOrder struct { // DefaultStatusOrder is the default ordering of Status. var DefaultStatusOrder = &StatusOrder{ - Direction: OrderDirectionAsc, + Direction: entgql.OrderDirectionAsc, Field: &StatusOrderField{ - field: status.FieldID, + Value: func(s *Status) (ent.Value, error) { + return s.ID, nil + }, + column: status.FieldID, + toTerm: status.ByID, toCursor: func(s *Status) Cursor { return Cursor{ID: s.ID} }, @@ -7773,6 +8263,44 @@ type TagConnection struct { TotalCount int `json:"totalCount"` } +func (c *TagConnection) build(nodes []*Tag, pager *tagPager, after *Cursor, first *int, before *Cursor, last *int) { + c.PageInfo.HasNextPage = before != nil + c.PageInfo.HasPreviousPage = after != nil + if first != nil && *first+1 == len(nodes) { + c.PageInfo.HasNextPage = true + nodes = nodes[:len(nodes)-1] + } else if last != nil && *last+1 == len(nodes) { + c.PageInfo.HasPreviousPage = true + nodes = nodes[:len(nodes)-1] + } + var nodeAt func(int) *Tag + if last != nil { + n := len(nodes) - 1 + nodeAt = func(i int) *Tag { + return nodes[n-i] + } + } else { + nodeAt = func(i int) *Tag { + return nodes[i] + } + } + c.Edges = make([]*TagEdge, len(nodes)) + for i := range nodes { + node := nodeAt(i) + c.Edges[i] = &TagEdge{ + Node: node, + Cursor: pager.toCursor(node), + } + } + if l := len(c.Edges); l > 0 { + c.PageInfo.StartCursor = &c.Edges[0].Cursor + c.PageInfo.EndCursor = &c.Edges[l-1].Cursor + } + if c.TotalCount == 0 { + c.TotalCount = len(nodes) + } +} + // TagPaginateOption enables pagination customization. type TagPaginateOption func(*tagPager) error @@ -7806,12 +8334,13 @@ func WithTagFilter(filter func(*TagQuery) (*TagQuery, error)) TagPaginateOption } type tagPager struct { - order *TagOrder - filter func(*TagQuery) (*TagQuery, error) + reverse bool + order *TagOrder + filter func(*TagQuery) (*TagQuery, error) } -func newTagPager(opts []TagPaginateOption) (*tagPager, error) { - pager := &tagPager{} +func newTagPager(opts []TagPaginateOption, reverse bool) (*tagPager, error) { + pager := &tagPager{reverse: reverse} for _, opt := range opts { if err := opt(pager); err != nil { return nil, err @@ -7834,28 +8363,48 @@ func (p *tagPager) toCursor(t *Tag) Cursor { return p.order.Field.toCursor(t) } -func (p *tagPager) applyCursors(query *TagQuery, after, before *Cursor) *TagQuery { - for _, predicate := range cursorsToPredicates( - p.order.Direction, after, before, - p.order.Field.field, DefaultTagOrder.Field.field, - ) { +func (p *tagPager) applyCursors(query *TagQuery, after, before *Cursor) (*TagQuery, error) { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + for _, predicate := range entgql.CursorsPredicate(after, before, DefaultTagOrder.Field.column, p.order.Field.column, direction) { query = query.Where(predicate) } - return query + return query, nil } -func (p *tagPager) applyOrder(query *TagQuery, reverse bool) *TagQuery { +func (p *tagPager) applyOrder(query *TagQuery) *TagQuery { direction := p.order.Direction - if reverse { - direction = direction.reverse() + if p.reverse { + direction = direction.Reverse() } - query = query.Order(direction.orderFunc(p.order.Field.field)) + query = query.Order(p.order.Field.toTerm(direction.OrderTermOption())) if p.order.Field != DefaultTagOrder.Field { - query = query.Order(direction.orderFunc(DefaultTagOrder.Field.field)) + query = query.Order(DefaultTagOrder.Field.toTerm(direction.OrderTermOption())) + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) } return query } +func (p *tagPager) orderExpr(query *TagQuery) sql.Querier { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) + } + return sql.ExprFunc(func(b *sql.Builder) { + b.Ident(p.order.Field.column).Pad().WriteString(string(direction)) + if p.order.Field != DefaultTagOrder.Field { + b.Comma().Ident(DefaultTagOrder.Field.column).Pad().WriteString(string(direction)) + } + }) +} + // Paginate executes the query and returns a relay based cursor connection to Tag. func (t *TagQuery) Paginate( ctx context.Context, after *Cursor, first *int, @@ -7864,98 +8413,54 @@ func (t *TagQuery) Paginate( if err := validateFirstLast(first, last); err != nil { return nil, err } - pager, err := newTagPager(opts) + pager, err := newTagPager(opts, last != nil) if err != nil { return nil, err } - if t, err = pager.applyFilter(t); err != nil { return nil, err } - conn := &TagConnection{Edges: []*TagEdge{}} - if !hasCollectedField(ctx, edgesField) || first != nil && *first == 0 || last != nil && *last == 0 { - if hasCollectedField(ctx, totalCountField) || - hasCollectedField(ctx, pageInfoField) { - count, err := t.Count(ctx) - if err != nil { + ignoredEdges := !hasCollectedField(ctx, edgesField) + if hasCollectedField(ctx, totalCountField) || hasCollectedField(ctx, pageInfoField) { + hasPagination := after != nil || first != nil || before != nil || last != nil + if hasPagination || ignoredEdges { + if conn.TotalCount, err = t.Clone().Count(ctx); err != nil { return nil, err } - conn.TotalCount = count - conn.PageInfo.HasNextPage = first != nil && count > 0 - conn.PageInfo.HasPreviousPage = last != nil && count > 0 - } - return conn, nil - } - - if (after != nil || first != nil || before != nil || last != nil) && hasCollectedField(ctx, totalCountField) { - count, err := t.Clone().Count(ctx) - if err != nil { - return nil, err + conn.PageInfo.HasNextPage = first != nil && conn.TotalCount > 0 + conn.PageInfo.HasPreviousPage = last != nil && conn.TotalCount > 0 } - conn.TotalCount = count - } - - t = pager.applyCursors(t, after, before) - t = pager.applyOrder(t, last != nil) - var limit int - if first != nil { - limit = *first + 1 - } else if last != nil { - limit = *last + 1 - } - if limit > 0 { - t = t.Limit(limit) - } - - if field := getCollectedField(ctx, edgesField, nodeField); field != nil { - t = t.collectField(graphql.GetOperationContext(ctx), *field) - } - - nodes, err := t.All(ctx) - if err != nil || len(nodes) == 0 { - return conn, err } - - if len(nodes) == limit { - conn.PageInfo.HasNextPage = first != nil - conn.PageInfo.HasPreviousPage = last != nil - nodes = nodes[:len(nodes)-1] + if ignoredEdges || (first != nil && *first == 0) || (last != nil && *last == 0) { + return conn, nil } - - var nodeAt func(int) *Tag - if last != nil { - n := len(nodes) - 1 - nodeAt = func(i int) *Tag { - return nodes[n-i] - } - } else { - nodeAt = func(i int) *Tag { - return nodes[i] - } + if t, err = pager.applyCursors(t, after, before); err != nil { + return nil, err } - - conn.Edges = make([]*TagEdge, len(nodes)) - for i := range nodes { - node := nodeAt(i) - conn.Edges[i] = &TagEdge{ - Node: node, - Cursor: pager.toCursor(node), + if limit := paginateLimit(first, last); limit != 0 { + t.Limit(limit) + } + if field := collectedField(ctx, edgesField, nodeField); field != nil { + if err := t.collectField(ctx, graphql.GetOperationContext(ctx), *field, []string{edgesField, nodeField}); err != nil { + return nil, err } } - - conn.PageInfo.StartCursor = &conn.Edges[0].Cursor - conn.PageInfo.EndCursor = &conn.Edges[len(conn.Edges)-1].Cursor - if conn.TotalCount == 0 { - conn.TotalCount = len(nodes) + t = pager.applyOrder(t) + nodes, err := t.All(ctx) + if err != nil { + return nil, err } - + conn.build(nodes, pager, after, first, before, last) return conn, nil } // TagOrderField defines the ordering field of Tag. type TagOrderField struct { - field string + // Value extracts the ordering value from the given Tag. + Value func(*Tag) (ent.Value, error) + column string // field or computed. + toTerm func(...sql.OrderTermOption) tag.OrderOption toCursor func(*Tag) Cursor } @@ -7967,9 +8472,13 @@ type TagOrder struct { // DefaultTagOrder is the default ordering of Tag. var DefaultTagOrder = &TagOrder{ - Direction: OrderDirectionAsc, + Direction: entgql.OrderDirectionAsc, Field: &TagOrderField{ - field: tag.FieldID, + Value: func(t *Tag) (ent.Value, error) { + return t.ID, nil + }, + column: tag.FieldID, + toTerm: tag.ByID, toCursor: func(t *Tag) Cursor { return Cursor{ID: t.ID} }, @@ -8000,6 +8509,44 @@ type TeamConnection struct { TotalCount int `json:"totalCount"` } +func (c *TeamConnection) build(nodes []*Team, pager *teamPager, after *Cursor, first *int, before *Cursor, last *int) { + c.PageInfo.HasNextPage = before != nil + c.PageInfo.HasPreviousPage = after != nil + if first != nil && *first+1 == len(nodes) { + c.PageInfo.HasNextPage = true + nodes = nodes[:len(nodes)-1] + } else if last != nil && *last+1 == len(nodes) { + c.PageInfo.HasPreviousPage = true + nodes = nodes[:len(nodes)-1] + } + var nodeAt func(int) *Team + if last != nil { + n := len(nodes) - 1 + nodeAt = func(i int) *Team { + return nodes[n-i] + } + } else { + nodeAt = func(i int) *Team { + return nodes[i] + } + } + c.Edges = make([]*TeamEdge, len(nodes)) + for i := range nodes { + node := nodeAt(i) + c.Edges[i] = &TeamEdge{ + Node: node, + Cursor: pager.toCursor(node), + } + } + if l := len(c.Edges); l > 0 { + c.PageInfo.StartCursor = &c.Edges[0].Cursor + c.PageInfo.EndCursor = &c.Edges[l-1].Cursor + } + if c.TotalCount == 0 { + c.TotalCount = len(nodes) + } +} + // TeamPaginateOption enables pagination customization. type TeamPaginateOption func(*teamPager) error @@ -8033,12 +8580,13 @@ func WithTeamFilter(filter func(*TeamQuery) (*TeamQuery, error)) TeamPaginateOpt } type teamPager struct { - order *TeamOrder - filter func(*TeamQuery) (*TeamQuery, error) + reverse bool + order *TeamOrder + filter func(*TeamQuery) (*TeamQuery, error) } -func newTeamPager(opts []TeamPaginateOption) (*teamPager, error) { - pager := &teamPager{} +func newTeamPager(opts []TeamPaginateOption, reverse bool) (*teamPager, error) { + pager := &teamPager{reverse: reverse} for _, opt := range opts { if err := opt(pager); err != nil { return nil, err @@ -8061,28 +8609,48 @@ func (p *teamPager) toCursor(t *Team) Cursor { return p.order.Field.toCursor(t) } -func (p *teamPager) applyCursors(query *TeamQuery, after, before *Cursor) *TeamQuery { - for _, predicate := range cursorsToPredicates( - p.order.Direction, after, before, - p.order.Field.field, DefaultTeamOrder.Field.field, - ) { +func (p *teamPager) applyCursors(query *TeamQuery, after, before *Cursor) (*TeamQuery, error) { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + for _, predicate := range entgql.CursorsPredicate(after, before, DefaultTeamOrder.Field.column, p.order.Field.column, direction) { query = query.Where(predicate) } - return query + return query, nil } -func (p *teamPager) applyOrder(query *TeamQuery, reverse bool) *TeamQuery { +func (p *teamPager) applyOrder(query *TeamQuery) *TeamQuery { direction := p.order.Direction - if reverse { - direction = direction.reverse() + if p.reverse { + direction = direction.Reverse() } - query = query.Order(direction.orderFunc(p.order.Field.field)) + query = query.Order(p.order.Field.toTerm(direction.OrderTermOption())) if p.order.Field != DefaultTeamOrder.Field { - query = query.Order(direction.orderFunc(DefaultTeamOrder.Field.field)) + query = query.Order(DefaultTeamOrder.Field.toTerm(direction.OrderTermOption())) + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) } return query } +func (p *teamPager) orderExpr(query *TeamQuery) sql.Querier { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) + } + return sql.ExprFunc(func(b *sql.Builder) { + b.Ident(p.order.Field.column).Pad().WriteString(string(direction)) + if p.order.Field != DefaultTeamOrder.Field { + b.Comma().Ident(DefaultTeamOrder.Field.column).Pad().WriteString(string(direction)) + } + }) +} + // Paginate executes the query and returns a relay based cursor connection to Team. func (t *TeamQuery) Paginate( ctx context.Context, after *Cursor, first *int, @@ -8091,98 +8659,54 @@ func (t *TeamQuery) Paginate( if err := validateFirstLast(first, last); err != nil { return nil, err } - pager, err := newTeamPager(opts) + pager, err := newTeamPager(opts, last != nil) if err != nil { return nil, err } - if t, err = pager.applyFilter(t); err != nil { return nil, err } - conn := &TeamConnection{Edges: []*TeamEdge{}} - if !hasCollectedField(ctx, edgesField) || first != nil && *first == 0 || last != nil && *last == 0 { - if hasCollectedField(ctx, totalCountField) || - hasCollectedField(ctx, pageInfoField) { - count, err := t.Count(ctx) - if err != nil { + ignoredEdges := !hasCollectedField(ctx, edgesField) + if hasCollectedField(ctx, totalCountField) || hasCollectedField(ctx, pageInfoField) { + hasPagination := after != nil || first != nil || before != nil || last != nil + if hasPagination || ignoredEdges { + if conn.TotalCount, err = t.Clone().Count(ctx); err != nil { return nil, err } - conn.TotalCount = count - conn.PageInfo.HasNextPage = first != nil && count > 0 - conn.PageInfo.HasPreviousPage = last != nil && count > 0 - } - return conn, nil - } - - if (after != nil || first != nil || before != nil || last != nil) && hasCollectedField(ctx, totalCountField) { - count, err := t.Clone().Count(ctx) - if err != nil { - return nil, err + conn.PageInfo.HasNextPage = first != nil && conn.TotalCount > 0 + conn.PageInfo.HasPreviousPage = last != nil && conn.TotalCount > 0 } - conn.TotalCount = count - } - - t = pager.applyCursors(t, after, before) - t = pager.applyOrder(t, last != nil) - var limit int - if first != nil { - limit = *first + 1 - } else if last != nil { - limit = *last + 1 - } - if limit > 0 { - t = t.Limit(limit) - } - - if field := getCollectedField(ctx, edgesField, nodeField); field != nil { - t = t.collectField(graphql.GetOperationContext(ctx), *field) } - - nodes, err := t.All(ctx) - if err != nil || len(nodes) == 0 { - return conn, err + if ignoredEdges || (first != nil && *first == 0) || (last != nil && *last == 0) { + return conn, nil } - - if len(nodes) == limit { - conn.PageInfo.HasNextPage = first != nil - conn.PageInfo.HasPreviousPage = last != nil - nodes = nodes[:len(nodes)-1] + if t, err = pager.applyCursors(t, after, before); err != nil { + return nil, err } - - var nodeAt func(int) *Team - if last != nil { - n := len(nodes) - 1 - nodeAt = func(i int) *Team { - return nodes[n-i] - } - } else { - nodeAt = func(i int) *Team { - return nodes[i] - } + if limit := paginateLimit(first, last); limit != 0 { + t.Limit(limit) } - - conn.Edges = make([]*TeamEdge, len(nodes)) - for i := range nodes { - node := nodeAt(i) - conn.Edges[i] = &TeamEdge{ - Node: node, - Cursor: pager.toCursor(node), + if field := collectedField(ctx, edgesField, nodeField); field != nil { + if err := t.collectField(ctx, graphql.GetOperationContext(ctx), *field, []string{edgesField, nodeField}); err != nil { + return nil, err } } - - conn.PageInfo.StartCursor = &conn.Edges[0].Cursor - conn.PageInfo.EndCursor = &conn.Edges[len(conn.Edges)-1].Cursor - if conn.TotalCount == 0 { - conn.TotalCount = len(nodes) + t = pager.applyOrder(t) + nodes, err := t.All(ctx) + if err != nil { + return nil, err } - + conn.build(nodes, pager, after, first, before, last) return conn, nil } // TeamOrderField defines the ordering field of Team. type TeamOrderField struct { - field string + // Value extracts the ordering value from the given Team. + Value func(*Team) (ent.Value, error) + column string // field or computed. + toTerm func(...sql.OrderTermOption) team.OrderOption toCursor func(*Team) Cursor } @@ -8194,9 +8718,13 @@ type TeamOrder struct { // DefaultTeamOrder is the default ordering of Team. var DefaultTeamOrder = &TeamOrder{ - Direction: OrderDirectionAsc, + Direction: entgql.OrderDirectionAsc, Field: &TeamOrderField{ - field: team.FieldID, + Value: func(t *Team) (ent.Value, error) { + return t.ID, nil + }, + column: team.FieldID, + toTerm: team.ByID, toCursor: func(t *Team) Cursor { return Cursor{ID: t.ID} }, @@ -8227,6 +8755,44 @@ type TokenConnection struct { TotalCount int `json:"totalCount"` } +func (c *TokenConnection) build(nodes []*Token, pager *tokenPager, after *Cursor, first *int, before *Cursor, last *int) { + c.PageInfo.HasNextPage = before != nil + c.PageInfo.HasPreviousPage = after != nil + if first != nil && *first+1 == len(nodes) { + c.PageInfo.HasNextPage = true + nodes = nodes[:len(nodes)-1] + } else if last != nil && *last+1 == len(nodes) { + c.PageInfo.HasPreviousPage = true + nodes = nodes[:len(nodes)-1] + } + var nodeAt func(int) *Token + if last != nil { + n := len(nodes) - 1 + nodeAt = func(i int) *Token { + return nodes[n-i] + } + } else { + nodeAt = func(i int) *Token { + return nodes[i] + } + } + c.Edges = make([]*TokenEdge, len(nodes)) + for i := range nodes { + node := nodeAt(i) + c.Edges[i] = &TokenEdge{ + Node: node, + Cursor: pager.toCursor(node), + } + } + if l := len(c.Edges); l > 0 { + c.PageInfo.StartCursor = &c.Edges[0].Cursor + c.PageInfo.EndCursor = &c.Edges[l-1].Cursor + } + if c.TotalCount == 0 { + c.TotalCount = len(nodes) + } +} + // TokenPaginateOption enables pagination customization. type TokenPaginateOption func(*tokenPager) error @@ -8260,12 +8826,13 @@ func WithTokenFilter(filter func(*TokenQuery) (*TokenQuery, error)) TokenPaginat } type tokenPager struct { - order *TokenOrder - filter func(*TokenQuery) (*TokenQuery, error) + reverse bool + order *TokenOrder + filter func(*TokenQuery) (*TokenQuery, error) } -func newTokenPager(opts []TokenPaginateOption) (*tokenPager, error) { - pager := &tokenPager{} +func newTokenPager(opts []TokenPaginateOption, reverse bool) (*tokenPager, error) { + pager := &tokenPager{reverse: reverse} for _, opt := range opts { if err := opt(pager); err != nil { return nil, err @@ -8288,28 +8855,48 @@ func (p *tokenPager) toCursor(t *Token) Cursor { return p.order.Field.toCursor(t) } -func (p *tokenPager) applyCursors(query *TokenQuery, after, before *Cursor) *TokenQuery { - for _, predicate := range cursorsToPredicates( - p.order.Direction, after, before, - p.order.Field.field, DefaultTokenOrder.Field.field, - ) { +func (p *tokenPager) applyCursors(query *TokenQuery, after, before *Cursor) (*TokenQuery, error) { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + for _, predicate := range entgql.CursorsPredicate(after, before, DefaultTokenOrder.Field.column, p.order.Field.column, direction) { query = query.Where(predicate) } - return query + return query, nil } -func (p *tokenPager) applyOrder(query *TokenQuery, reverse bool) *TokenQuery { +func (p *tokenPager) applyOrder(query *TokenQuery) *TokenQuery { direction := p.order.Direction - if reverse { - direction = direction.reverse() + if p.reverse { + direction = direction.Reverse() } - query = query.Order(direction.orderFunc(p.order.Field.field)) + query = query.Order(p.order.Field.toTerm(direction.OrderTermOption())) if p.order.Field != DefaultTokenOrder.Field { - query = query.Order(direction.orderFunc(DefaultTokenOrder.Field.field)) + query = query.Order(DefaultTokenOrder.Field.toTerm(direction.OrderTermOption())) + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) } return query } +func (p *tokenPager) orderExpr(query *TokenQuery) sql.Querier { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) + } + return sql.ExprFunc(func(b *sql.Builder) { + b.Ident(p.order.Field.column).Pad().WriteString(string(direction)) + if p.order.Field != DefaultTokenOrder.Field { + b.Comma().Ident(DefaultTokenOrder.Field.column).Pad().WriteString(string(direction)) + } + }) +} + // Paginate executes the query and returns a relay based cursor connection to Token. func (t *TokenQuery) Paginate( ctx context.Context, after *Cursor, first *int, @@ -8318,98 +8905,54 @@ func (t *TokenQuery) Paginate( if err := validateFirstLast(first, last); err != nil { return nil, err } - pager, err := newTokenPager(opts) + pager, err := newTokenPager(opts, last != nil) if err != nil { return nil, err } - if t, err = pager.applyFilter(t); err != nil { return nil, err } - conn := &TokenConnection{Edges: []*TokenEdge{}} - if !hasCollectedField(ctx, edgesField) || first != nil && *first == 0 || last != nil && *last == 0 { - if hasCollectedField(ctx, totalCountField) || - hasCollectedField(ctx, pageInfoField) { - count, err := t.Count(ctx) - if err != nil { + ignoredEdges := !hasCollectedField(ctx, edgesField) + if hasCollectedField(ctx, totalCountField) || hasCollectedField(ctx, pageInfoField) { + hasPagination := after != nil || first != nil || before != nil || last != nil + if hasPagination || ignoredEdges { + if conn.TotalCount, err = t.Clone().Count(ctx); err != nil { return nil, err } - conn.TotalCount = count - conn.PageInfo.HasNextPage = first != nil && count > 0 - conn.PageInfo.HasPreviousPage = last != nil && count > 0 - } - return conn, nil - } - - if (after != nil || first != nil || before != nil || last != nil) && hasCollectedField(ctx, totalCountField) { - count, err := t.Clone().Count(ctx) - if err != nil { - return nil, err + conn.PageInfo.HasNextPage = first != nil && conn.TotalCount > 0 + conn.PageInfo.HasPreviousPage = last != nil && conn.TotalCount > 0 } - conn.TotalCount = count - } - - t = pager.applyCursors(t, after, before) - t = pager.applyOrder(t, last != nil) - var limit int - if first != nil { - limit = *first + 1 - } else if last != nil { - limit = *last + 1 - } - if limit > 0 { - t = t.Limit(limit) } - - if field := getCollectedField(ctx, edgesField, nodeField); field != nil { - t = t.collectField(graphql.GetOperationContext(ctx), *field) - } - - nodes, err := t.All(ctx) - if err != nil || len(nodes) == 0 { - return conn, err + if ignoredEdges || (first != nil && *first == 0) || (last != nil && *last == 0) { + return conn, nil } - - if len(nodes) == limit { - conn.PageInfo.HasNextPage = first != nil - conn.PageInfo.HasPreviousPage = last != nil - nodes = nodes[:len(nodes)-1] + if t, err = pager.applyCursors(t, after, before); err != nil { + return nil, err } - - var nodeAt func(int) *Token - if last != nil { - n := len(nodes) - 1 - nodeAt = func(i int) *Token { - return nodes[n-i] - } - } else { - nodeAt = func(i int) *Token { - return nodes[i] - } + if limit := paginateLimit(first, last); limit != 0 { + t.Limit(limit) } - - conn.Edges = make([]*TokenEdge, len(nodes)) - for i := range nodes { - node := nodeAt(i) - conn.Edges[i] = &TokenEdge{ - Node: node, - Cursor: pager.toCursor(node), + if field := collectedField(ctx, edgesField, nodeField); field != nil { + if err := t.collectField(ctx, graphql.GetOperationContext(ctx), *field, []string{edgesField, nodeField}); err != nil { + return nil, err } } - - conn.PageInfo.StartCursor = &conn.Edges[0].Cursor - conn.PageInfo.EndCursor = &conn.Edges[len(conn.Edges)-1].Cursor - if conn.TotalCount == 0 { - conn.TotalCount = len(nodes) + t = pager.applyOrder(t) + nodes, err := t.All(ctx) + if err != nil { + return nil, err } - + conn.build(nodes, pager, after, first, before, last) return conn, nil } // TokenOrderField defines the ordering field of Token. type TokenOrderField struct { - field string + // Value extracts the ordering value from the given Token. + Value func(*Token) (ent.Value, error) + column string // field or computed. + toTerm func(...sql.OrderTermOption) token.OrderOption toCursor func(*Token) Cursor } @@ -8421,9 +8964,13 @@ type TokenOrder struct { // DefaultTokenOrder is the default ordering of Token. var DefaultTokenOrder = &TokenOrder{ - Direction: OrderDirectionAsc, + Direction: entgql.OrderDirectionAsc, Field: &TokenOrderField{ - field: token.FieldID, + Value: func(t *Token) (ent.Value, error) { + return t.ID, nil + }, + column: token.FieldID, + toTerm: token.ByID, toCursor: func(t *Token) Cursor { return Cursor{ID: t.ID} }, @@ -8454,6 +9001,44 @@ type UserConnection struct { TotalCount int `json:"totalCount"` } +func (c *UserConnection) build(nodes []*User, pager *userPager, after *Cursor, first *int, before *Cursor, last *int) { + c.PageInfo.HasNextPage = before != nil + c.PageInfo.HasPreviousPage = after != nil + if first != nil && *first+1 == len(nodes) { + c.PageInfo.HasNextPage = true + nodes = nodes[:len(nodes)-1] + } else if last != nil && *last+1 == len(nodes) { + c.PageInfo.HasPreviousPage = true + nodes = nodes[:len(nodes)-1] + } + var nodeAt func(int) *User + if last != nil { + n := len(nodes) - 1 + nodeAt = func(i int) *User { + return nodes[n-i] + } + } else { + nodeAt = func(i int) *User { + return nodes[i] + } + } + c.Edges = make([]*UserEdge, len(nodes)) + for i := range nodes { + node := nodeAt(i) + c.Edges[i] = &UserEdge{ + Node: node, + Cursor: pager.toCursor(node), + } + } + if l := len(c.Edges); l > 0 { + c.PageInfo.StartCursor = &c.Edges[0].Cursor + c.PageInfo.EndCursor = &c.Edges[l-1].Cursor + } + if c.TotalCount == 0 { + c.TotalCount = len(nodes) + } +} + // UserPaginateOption enables pagination customization. type UserPaginateOption func(*userPager) error @@ -8487,12 +9072,13 @@ func WithUserFilter(filter func(*UserQuery) (*UserQuery, error)) UserPaginateOpt } type userPager struct { - order *UserOrder - filter func(*UserQuery) (*UserQuery, error) + reverse bool + order *UserOrder + filter func(*UserQuery) (*UserQuery, error) } -func newUserPager(opts []UserPaginateOption) (*userPager, error) { - pager := &userPager{} +func newUserPager(opts []UserPaginateOption, reverse bool) (*userPager, error) { + pager := &userPager{reverse: reverse} for _, opt := range opts { if err := opt(pager); err != nil { return nil, err @@ -8515,28 +9101,48 @@ func (p *userPager) toCursor(u *User) Cursor { return p.order.Field.toCursor(u) } -func (p *userPager) applyCursors(query *UserQuery, after, before *Cursor) *UserQuery { - for _, predicate := range cursorsToPredicates( - p.order.Direction, after, before, - p.order.Field.field, DefaultUserOrder.Field.field, - ) { +func (p *userPager) applyCursors(query *UserQuery, after, before *Cursor) (*UserQuery, error) { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + for _, predicate := range entgql.CursorsPredicate(after, before, DefaultUserOrder.Field.column, p.order.Field.column, direction) { query = query.Where(predicate) } - return query + return query, nil } -func (p *userPager) applyOrder(query *UserQuery, reverse bool) *UserQuery { +func (p *userPager) applyOrder(query *UserQuery) *UserQuery { direction := p.order.Direction - if reverse { - direction = direction.reverse() + if p.reverse { + direction = direction.Reverse() } - query = query.Order(direction.orderFunc(p.order.Field.field)) + query = query.Order(p.order.Field.toTerm(direction.OrderTermOption())) if p.order.Field != DefaultUserOrder.Field { - query = query.Order(direction.orderFunc(DefaultUserOrder.Field.field)) + query = query.Order(DefaultUserOrder.Field.toTerm(direction.OrderTermOption())) + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) } return query } +func (p *userPager) orderExpr(query *UserQuery) sql.Querier { + direction := p.order.Direction + if p.reverse { + direction = direction.Reverse() + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(p.order.Field.column) + } + return sql.ExprFunc(func(b *sql.Builder) { + b.Ident(p.order.Field.column).Pad().WriteString(string(direction)) + if p.order.Field != DefaultUserOrder.Field { + b.Comma().Ident(DefaultUserOrder.Field.column).Pad().WriteString(string(direction)) + } + }) +} + // Paginate executes the query and returns a relay based cursor connection to User. func (u *UserQuery) Paginate( ctx context.Context, after *Cursor, first *int, @@ -8545,98 +9151,54 @@ func (u *UserQuery) Paginate( if err := validateFirstLast(first, last); err != nil { return nil, err } - pager, err := newUserPager(opts) + pager, err := newUserPager(opts, last != nil) if err != nil { return nil, err } - if u, err = pager.applyFilter(u); err != nil { return nil, err } - conn := &UserConnection{Edges: []*UserEdge{}} - if !hasCollectedField(ctx, edgesField) || first != nil && *first == 0 || last != nil && *last == 0 { - if hasCollectedField(ctx, totalCountField) || - hasCollectedField(ctx, pageInfoField) { - count, err := u.Count(ctx) - if err != nil { + ignoredEdges := !hasCollectedField(ctx, edgesField) + if hasCollectedField(ctx, totalCountField) || hasCollectedField(ctx, pageInfoField) { + hasPagination := after != nil || first != nil || before != nil || last != nil + if hasPagination || ignoredEdges { + if conn.TotalCount, err = u.Clone().Count(ctx); err != nil { return nil, err } - conn.TotalCount = count - conn.PageInfo.HasNextPage = first != nil && count > 0 - conn.PageInfo.HasPreviousPage = last != nil && count > 0 - } - return conn, nil - } - - if (after != nil || first != nil || before != nil || last != nil) && hasCollectedField(ctx, totalCountField) { - count, err := u.Clone().Count(ctx) - if err != nil { - return nil, err + conn.PageInfo.HasNextPage = first != nil && conn.TotalCount > 0 + conn.PageInfo.HasPreviousPage = last != nil && conn.TotalCount > 0 } - conn.TotalCount = count - } - - u = pager.applyCursors(u, after, before) - u = pager.applyOrder(u, last != nil) - var limit int - if first != nil { - limit = *first + 1 - } else if last != nil { - limit = *last + 1 } - if limit > 0 { - u = u.Limit(limit) - } - - if field := getCollectedField(ctx, edgesField, nodeField); field != nil { - u = u.collectField(graphql.GetOperationContext(ctx), *field) - } - - nodes, err := u.All(ctx) - if err != nil || len(nodes) == 0 { - return conn, err + if ignoredEdges || (first != nil && *first == 0) || (last != nil && *last == 0) { + return conn, nil } - - if len(nodes) == limit { - conn.PageInfo.HasNextPage = first != nil - conn.PageInfo.HasPreviousPage = last != nil - nodes = nodes[:len(nodes)-1] + if u, err = pager.applyCursors(u, after, before); err != nil { + return nil, err } - - var nodeAt func(int) *User - if last != nil { - n := len(nodes) - 1 - nodeAt = func(i int) *User { - return nodes[n-i] - } - } else { - nodeAt = func(i int) *User { - return nodes[i] - } + if limit := paginateLimit(first, last); limit != 0 { + u.Limit(limit) } - - conn.Edges = make([]*UserEdge, len(nodes)) - for i := range nodes { - node := nodeAt(i) - conn.Edges[i] = &UserEdge{ - Node: node, - Cursor: pager.toCursor(node), + if field := collectedField(ctx, edgesField, nodeField); field != nil { + if err := u.collectField(ctx, graphql.GetOperationContext(ctx), *field, []string{edgesField, nodeField}); err != nil { + return nil, err } } - - conn.PageInfo.StartCursor = &conn.Edges[0].Cursor - conn.PageInfo.EndCursor = &conn.Edges[len(conn.Edges)-1].Cursor - if conn.TotalCount == 0 { - conn.TotalCount = len(nodes) + u = pager.applyOrder(u) + nodes, err := u.All(ctx) + if err != nil { + return nil, err } - + conn.build(nodes, pager, after, first, before, last) return conn, nil } // UserOrderField defines the ordering field of User. type UserOrderField struct { - field string + // Value extracts the ordering value from the given User. + Value func(*User) (ent.Value, error) + column string // field or computed. + toTerm func(...sql.OrderTermOption) user.OrderOption toCursor func(*User) Cursor } @@ -8648,9 +9210,13 @@ type UserOrder struct { // DefaultUserOrder is the default ordering of User. var DefaultUserOrder = &UserOrder{ - Direction: OrderDirectionAsc, + Direction: entgql.OrderDirectionAsc, Field: &UserOrderField{ - field: user.FieldID, + Value: func(u *User) (ent.Value, error) { + return u.ID, nil + }, + column: user.FieldID, + toTerm: user.ByID, toCursor: func(u *User) Cursor { return Cursor{ID: u.ID} }, diff --git a/ent/gql_transaction.go b/ent/gql_transaction.go index 5fb5120d..96117136 100644 --- a/ent/gql_transaction.go +++ b/ent/gql_transaction.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent diff --git a/ent/hook/hook.go b/ent/hook/hook.go index 2b31c122..da2caa58 100755 --- a/ent/hook/hook.go +++ b/ent/hook/hook.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package hook @@ -15,11 +15,10 @@ type AdhocPlanFunc func(context.Context, *ent.AdhocPlanMutation) (ent.Value, err // Mutate calls f(ctx, m). func (f AdhocPlanFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.AdhocPlanMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AdhocPlanMutation", m) + if mv, ok := m.(*ent.AdhocPlanMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AdhocPlanMutation", m) } // The AgentStatusFunc type is an adapter to allow the use of ordinary @@ -28,11 +27,10 @@ type AgentStatusFunc func(context.Context, *ent.AgentStatusMutation) (ent.Value, // Mutate calls f(ctx, m). func (f AgentStatusFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.AgentStatusMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AgentStatusMutation", m) + if mv, ok := m.(*ent.AgentStatusMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AgentStatusMutation", m) } // The AgentTaskFunc type is an adapter to allow the use of ordinary @@ -41,11 +39,10 @@ type AgentTaskFunc func(context.Context, *ent.AgentTaskMutation) (ent.Value, err // Mutate calls f(ctx, m). func (f AgentTaskFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.AgentTaskMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AgentTaskMutation", m) + if mv, ok := m.(*ent.AgentTaskMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AgentTaskMutation", m) } // The AnsibleFunc type is an adapter to allow the use of ordinary @@ -54,11 +51,10 @@ type AnsibleFunc func(context.Context, *ent.AnsibleMutation) (ent.Value, error) // Mutate calls f(ctx, m). func (f AnsibleFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.AnsibleMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AnsibleMutation", m) + if mv, ok := m.(*ent.AnsibleMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AnsibleMutation", m) } // The AuthUserFunc type is an adapter to allow the use of ordinary @@ -67,11 +63,10 @@ type AuthUserFunc func(context.Context, *ent.AuthUserMutation) (ent.Value, error // Mutate calls f(ctx, m). func (f AuthUserFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.AuthUserMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AuthUserMutation", m) + if mv, ok := m.(*ent.AuthUserMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AuthUserMutation", m) } // The BuildFunc type is an adapter to allow the use of ordinary @@ -80,11 +75,10 @@ type BuildFunc func(context.Context, *ent.BuildMutation) (ent.Value, error) // Mutate calls f(ctx, m). func (f BuildFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.BuildMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.BuildMutation", m) + if mv, ok := m.(*ent.BuildMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.BuildMutation", m) } // The BuildCommitFunc type is an adapter to allow the use of ordinary @@ -93,11 +87,10 @@ type BuildCommitFunc func(context.Context, *ent.BuildCommitMutation) (ent.Value, // Mutate calls f(ctx, m). func (f BuildCommitFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.BuildCommitMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.BuildCommitMutation", m) + if mv, ok := m.(*ent.BuildCommitMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.BuildCommitMutation", m) } // The CommandFunc type is an adapter to allow the use of ordinary @@ -106,11 +99,10 @@ type CommandFunc func(context.Context, *ent.CommandMutation) (ent.Value, error) // Mutate calls f(ctx, m). func (f CommandFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.CommandMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.CommandMutation", m) + if mv, ok := m.(*ent.CommandMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.CommandMutation", m) } // The CompetitionFunc type is an adapter to allow the use of ordinary @@ -119,11 +111,10 @@ type CompetitionFunc func(context.Context, *ent.CompetitionMutation) (ent.Value, // Mutate calls f(ctx, m). func (f CompetitionFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.CompetitionMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.CompetitionMutation", m) + if mv, ok := m.(*ent.CompetitionMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.CompetitionMutation", m) } // The DNSFunc type is an adapter to allow the use of ordinary @@ -132,11 +123,10 @@ type DNSFunc func(context.Context, *ent.DNSMutation) (ent.Value, error) // Mutate calls f(ctx, m). func (f DNSFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.DNSMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.DNSMutation", m) + if mv, ok := m.(*ent.DNSMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.DNSMutation", m) } // The DNSRecordFunc type is an adapter to allow the use of ordinary @@ -145,11 +135,10 @@ type DNSRecordFunc func(context.Context, *ent.DNSRecordMutation) (ent.Value, err // Mutate calls f(ctx, m). func (f DNSRecordFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.DNSRecordMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.DNSRecordMutation", m) + if mv, ok := m.(*ent.DNSRecordMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.DNSRecordMutation", m) } // The DiskFunc type is an adapter to allow the use of ordinary @@ -158,11 +147,10 @@ type DiskFunc func(context.Context, *ent.DiskMutation) (ent.Value, error) // Mutate calls f(ctx, m). func (f DiskFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.DiskMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.DiskMutation", m) + if mv, ok := m.(*ent.DiskMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.DiskMutation", m) } // The EnvironmentFunc type is an adapter to allow the use of ordinary @@ -171,11 +159,10 @@ type EnvironmentFunc func(context.Context, *ent.EnvironmentMutation) (ent.Value, // Mutate calls f(ctx, m). func (f EnvironmentFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.EnvironmentMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.EnvironmentMutation", m) + if mv, ok := m.(*ent.EnvironmentMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.EnvironmentMutation", m) } // The FileDeleteFunc type is an adapter to allow the use of ordinary @@ -184,11 +171,10 @@ type FileDeleteFunc func(context.Context, *ent.FileDeleteMutation) (ent.Value, e // Mutate calls f(ctx, m). func (f FileDeleteFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.FileDeleteMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.FileDeleteMutation", m) + if mv, ok := m.(*ent.FileDeleteMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.FileDeleteMutation", m) } // The FileDownloadFunc type is an adapter to allow the use of ordinary @@ -197,11 +183,10 @@ type FileDownloadFunc func(context.Context, *ent.FileDownloadMutation) (ent.Valu // Mutate calls f(ctx, m). func (f FileDownloadFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.FileDownloadMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.FileDownloadMutation", m) + if mv, ok := m.(*ent.FileDownloadMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.FileDownloadMutation", m) } // The FileExtractFunc type is an adapter to allow the use of ordinary @@ -210,11 +195,10 @@ type FileExtractFunc func(context.Context, *ent.FileExtractMutation) (ent.Value, // Mutate calls f(ctx, m). func (f FileExtractFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.FileExtractMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.FileExtractMutation", m) + if mv, ok := m.(*ent.FileExtractMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.FileExtractMutation", m) } // The FindingFunc type is an adapter to allow the use of ordinary @@ -223,11 +207,10 @@ type FindingFunc func(context.Context, *ent.FindingMutation) (ent.Value, error) // Mutate calls f(ctx, m). func (f FindingFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.FindingMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.FindingMutation", m) + if mv, ok := m.(*ent.FindingMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.FindingMutation", m) } // The GinFileMiddlewareFunc type is an adapter to allow the use of ordinary @@ -236,11 +219,10 @@ type GinFileMiddlewareFunc func(context.Context, *ent.GinFileMiddlewareMutation) // Mutate calls f(ctx, m). func (f GinFileMiddlewareFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.GinFileMiddlewareMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.GinFileMiddlewareMutation", m) + if mv, ok := m.(*ent.GinFileMiddlewareMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.GinFileMiddlewareMutation", m) } // The HostFunc type is an adapter to allow the use of ordinary @@ -249,11 +231,10 @@ type HostFunc func(context.Context, *ent.HostMutation) (ent.Value, error) // Mutate calls f(ctx, m). func (f HostFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.HostMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.HostMutation", m) + if mv, ok := m.(*ent.HostMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.HostMutation", m) } // The HostDependencyFunc type is an adapter to allow the use of ordinary @@ -262,11 +243,10 @@ type HostDependencyFunc func(context.Context, *ent.HostDependencyMutation) (ent. // Mutate calls f(ctx, m). func (f HostDependencyFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.HostDependencyMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.HostDependencyMutation", m) + if mv, ok := m.(*ent.HostDependencyMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.HostDependencyMutation", m) } // The IdentityFunc type is an adapter to allow the use of ordinary @@ -275,11 +255,10 @@ type IdentityFunc func(context.Context, *ent.IdentityMutation) (ent.Value, error // Mutate calls f(ctx, m). func (f IdentityFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.IdentityMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.IdentityMutation", m) + if mv, ok := m.(*ent.IdentityMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.IdentityMutation", m) } // The IncludedNetworkFunc type is an adapter to allow the use of ordinary @@ -288,11 +267,10 @@ type IncludedNetworkFunc func(context.Context, *ent.IncludedNetworkMutation) (en // Mutate calls f(ctx, m). func (f IncludedNetworkFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.IncludedNetworkMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.IncludedNetworkMutation", m) + if mv, ok := m.(*ent.IncludedNetworkMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.IncludedNetworkMutation", m) } // The NetworkFunc type is an adapter to allow the use of ordinary @@ -301,11 +279,10 @@ type NetworkFunc func(context.Context, *ent.NetworkMutation) (ent.Value, error) // Mutate calls f(ctx, m). func (f NetworkFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.NetworkMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.NetworkMutation", m) + if mv, ok := m.(*ent.NetworkMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.NetworkMutation", m) } // The PlanFunc type is an adapter to allow the use of ordinary @@ -314,11 +291,10 @@ type PlanFunc func(context.Context, *ent.PlanMutation) (ent.Value, error) // Mutate calls f(ctx, m). func (f PlanFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.PlanMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.PlanMutation", m) + if mv, ok := m.(*ent.PlanMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.PlanMutation", m) } // The PlanDiffFunc type is an adapter to allow the use of ordinary @@ -327,11 +303,10 @@ type PlanDiffFunc func(context.Context, *ent.PlanDiffMutation) (ent.Value, error // Mutate calls f(ctx, m). func (f PlanDiffFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.PlanDiffMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.PlanDiffMutation", m) + if mv, ok := m.(*ent.PlanDiffMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.PlanDiffMutation", m) } // The ProvisionedHostFunc type is an adapter to allow the use of ordinary @@ -340,11 +315,10 @@ type ProvisionedHostFunc func(context.Context, *ent.ProvisionedHostMutation) (en // Mutate calls f(ctx, m). func (f ProvisionedHostFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.ProvisionedHostMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ProvisionedHostMutation", m) + if mv, ok := m.(*ent.ProvisionedHostMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ProvisionedHostMutation", m) } // The ProvisionedNetworkFunc type is an adapter to allow the use of ordinary @@ -353,11 +327,10 @@ type ProvisionedNetworkFunc func(context.Context, *ent.ProvisionedNetworkMutatio // Mutate calls f(ctx, m). func (f ProvisionedNetworkFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.ProvisionedNetworkMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ProvisionedNetworkMutation", m) + if mv, ok := m.(*ent.ProvisionedNetworkMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ProvisionedNetworkMutation", m) } // The ProvisioningStepFunc type is an adapter to allow the use of ordinary @@ -366,11 +339,10 @@ type ProvisioningStepFunc func(context.Context, *ent.ProvisioningStepMutation) ( // Mutate calls f(ctx, m). func (f ProvisioningStepFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.ProvisioningStepMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ProvisioningStepMutation", m) + if mv, ok := m.(*ent.ProvisioningStepMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ProvisioningStepMutation", m) } // The RepoCommitFunc type is an adapter to allow the use of ordinary @@ -379,11 +351,10 @@ type RepoCommitFunc func(context.Context, *ent.RepoCommitMutation) (ent.Value, e // Mutate calls f(ctx, m). func (f RepoCommitFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.RepoCommitMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.RepoCommitMutation", m) + if mv, ok := m.(*ent.RepoCommitMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.RepoCommitMutation", m) } // The RepositoryFunc type is an adapter to allow the use of ordinary @@ -392,11 +363,10 @@ type RepositoryFunc func(context.Context, *ent.RepositoryMutation) (ent.Value, e // Mutate calls f(ctx, m). func (f RepositoryFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.RepositoryMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.RepositoryMutation", m) + if mv, ok := m.(*ent.RepositoryMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.RepositoryMutation", m) } // The ScriptFunc type is an adapter to allow the use of ordinary @@ -405,11 +375,10 @@ type ScriptFunc func(context.Context, *ent.ScriptMutation) (ent.Value, error) // Mutate calls f(ctx, m). func (f ScriptFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.ScriptMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ScriptMutation", m) + if mv, ok := m.(*ent.ScriptMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ScriptMutation", m) } // The ServerTaskFunc type is an adapter to allow the use of ordinary @@ -418,11 +387,10 @@ type ServerTaskFunc func(context.Context, *ent.ServerTaskMutation) (ent.Value, e // Mutate calls f(ctx, m). func (f ServerTaskFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.ServerTaskMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ServerTaskMutation", m) + if mv, ok := m.(*ent.ServerTaskMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ServerTaskMutation", m) } // The StatusFunc type is an adapter to allow the use of ordinary @@ -431,11 +399,10 @@ type StatusFunc func(context.Context, *ent.StatusMutation) (ent.Value, error) // Mutate calls f(ctx, m). func (f StatusFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.StatusMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.StatusMutation", m) + if mv, ok := m.(*ent.StatusMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.StatusMutation", m) } // The TagFunc type is an adapter to allow the use of ordinary @@ -444,11 +411,10 @@ type TagFunc func(context.Context, *ent.TagMutation) (ent.Value, error) // Mutate calls f(ctx, m). func (f TagFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.TagMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.TagMutation", m) + if mv, ok := m.(*ent.TagMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.TagMutation", m) } // The TeamFunc type is an adapter to allow the use of ordinary @@ -457,11 +423,10 @@ type TeamFunc func(context.Context, *ent.TeamMutation) (ent.Value, error) // Mutate calls f(ctx, m). func (f TeamFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.TeamMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.TeamMutation", m) + if mv, ok := m.(*ent.TeamMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.TeamMutation", m) } // The TokenFunc type is an adapter to allow the use of ordinary @@ -470,11 +435,10 @@ type TokenFunc func(context.Context, *ent.TokenMutation) (ent.Value, error) // Mutate calls f(ctx, m). func (f TokenFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.TokenMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.TokenMutation", m) + if mv, ok := m.(*ent.TokenMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.TokenMutation", m) } // The UserFunc type is an adapter to allow the use of ordinary @@ -483,11 +447,10 @@ type UserFunc func(context.Context, *ent.UserMutation) (ent.Value, error) // Mutate calls f(ctx, m). func (f UserFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.UserMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.UserMutation", m) + if mv, ok := m.(*ent.UserMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.UserMutation", m) } // Condition is a hook condition function. @@ -585,7 +548,6 @@ func HasFields(field string, fields ...string) Condition { // If executes the given hook under condition. // // hook.If(ComputeAverage, And(HasFields(...), HasAddedFields(...))) -// func If(hk ent.Hook, cond Condition) ent.Hook { return func(next ent.Mutator) ent.Mutator { return ent.MutateFunc(func(ctx context.Context, m ent.Mutation) (ent.Value, error) { @@ -600,7 +562,6 @@ func If(hk ent.Hook, cond Condition) ent.Hook { // On executes the given hook only for the given operation. // // hook.On(Log, ent.Delete|ent.Create) -// func On(hk ent.Hook, op ent.Op) ent.Hook { return If(hk, HasOp(op)) } @@ -608,7 +569,6 @@ func On(hk ent.Hook, op ent.Op) ent.Hook { // Unless skips the given hook only for the given operation. // // hook.Unless(Log, ent.Update|ent.UpdateOne) -// func Unless(hk ent.Hook, op ent.Op) ent.Hook { return If(hk, Not(HasOp(op))) } @@ -629,7 +589,6 @@ func FixedError(err error) ent.Hook { // Reject(ent.Delete|ent.Update), // } // } -// func Reject(op ent.Op) ent.Hook { hk := FixedError(fmt.Errorf("%s operation is not allowed", op)) return On(hk, op) diff --git a/ent/host.go b/ent/host.go index e59cc615..2b5f1055 100755 --- a/ent/host.go +++ b/ent/host.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -7,6 +7,7 @@ import ( "fmt" "strings" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/gen0cide/laforge/ent/disk" "github.com/gen0cide/laforge/ent/environment" @@ -19,8 +20,8 @@ type Host struct { config ` json:"-"` // ID of the ent. ID uuid.UUID `json:"id,omitempty"` - // HclID holds the value of the "hcl_id" field. - HclID string `json:"hcl_id,omitempty" hcl:"id,label"` + // HCLID holds the value of the "hcl_id" field. + HCLID string `json:"hcl_id,omitempty" hcl:"id,label"` // Hostname holds the value of the "hostname" field. Hostname string `json:"hostname,omitempty" hcl:"hostname,attr"` // Description holds the value of the "description" field. @@ -51,6 +52,7 @@ type Host struct { // The values are being populated by the HostQuery when eager-loading is set. Edges HostEdges `json:"edges"` + // vvvvvvvvvvvv CUSTOM vvvvvvvvvvvv // Edges put into the main struct to be loaded via hcl // HostToDisk holds the value of the HostToDisk edge. HCLHostToDisk *Disk `json:"HostToDisk,omitempty" hcl:"disk,block"` @@ -64,8 +66,9 @@ type Host struct { HCLDependOnHostToHostDependency []*HostDependency `json:"DependOnHostToHostDependency,omitempty" hcl:"depends_on,block"` // DependByHostToHostDependency holds the value of the DependByHostToHostDependency edge. HCLDependByHostToHostDependency []*HostDependency `json:"DependByHostToHostDependency,omitempty"` - // + // ^^^^^^^^^^^^ CUSTOM ^^^^^^^^^^^^^ environment_environment_to_host *uuid.UUID + selectValues sql.SelectValues } // HostEdges holds the relations/edges for other nodes in the graph. @@ -85,6 +88,13 @@ type HostEdges struct { // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. loadedTypes [6]bool + // totalCount holds the count of the edges above. + totalCount [6]map[string]int + + namedHostToUser map[string][]*User + namedHostToIncludedNetwork map[string][]*IncludedNetwork + namedDependOnHostToHostDependency map[string][]*HostDependency + namedDependByHostToHostDependency map[string][]*HostDependency } // HostToDiskOrErr returns the HostToDisk value or an error if the edge @@ -92,8 +102,7 @@ type HostEdges struct { func (e HostEdges) HostToDiskOrErr() (*Disk, error) { if e.loadedTypes[0] { if e.HostToDisk == nil { - // The edge HostToDisk was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: disk.Label} } return e.HostToDisk, nil @@ -115,8 +124,7 @@ func (e HostEdges) HostToUserOrErr() ([]*User, error) { func (e HostEdges) HostToEnvironmentOrErr() (*Environment, error) { if e.loadedTypes[2] { if e.HostToEnvironment == nil { - // The edge HostToEnvironment was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: environment.Label} } return e.HostToEnvironment, nil @@ -152,8 +160,8 @@ func (e HostEdges) DependByHostToHostDependencyOrErr() ([]*HostDependency, error } // scanValues returns the types for scanning values from sql.Rows. -func (*Host) scanValues(columns []string) ([]interface{}, error) { - values := make([]interface{}, len(columns)) +func (*Host) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) for i := range columns { switch columns[i] { case host.FieldExposedTCPPorts, host.FieldExposedUDPPorts, host.FieldVars, host.FieldUserGroups, host.FieldProvisionSteps, host.FieldTags: @@ -162,14 +170,14 @@ func (*Host) scanValues(columns []string) ([]interface{}, error) { values[i] = new(sql.NullBool) case host.FieldLastOctet: values[i] = new(sql.NullInt64) - case host.FieldHclID, host.FieldHostname, host.FieldDescription, host.FieldOS, host.FieldInstanceSize, host.FieldOverridePassword: + case host.FieldHCLID, host.FieldHostname, host.FieldDescription, host.FieldOS, host.FieldInstanceSize, host.FieldOverridePassword: values[i] = new(sql.NullString) case host.FieldID: values[i] = new(uuid.UUID) case host.ForeignKeys[0]: // environment_environment_to_host values[i] = &sql.NullScanner{S: new(uuid.UUID)} default: - return nil, fmt.Errorf("unexpected column %q for type Host", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -177,7 +185,7 @@ func (*Host) scanValues(columns []string) ([]interface{}, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the Host fields. -func (h *Host) assignValues(columns []string, values []interface{}) error { +func (h *Host) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -189,11 +197,11 @@ func (h *Host) assignValues(columns []string, values []interface{}) error { } else if value != nil { h.ID = *value } - case host.FieldHclID: + case host.FieldHCLID: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field hcl_id", values[i]) } else if value.Valid { - h.HclID = value.String + h.HCLID = value.String } case host.FieldHostname: if value, ok := values[i].(*sql.NullString); !ok { @@ -292,56 +300,64 @@ func (h *Host) assignValues(columns []string, values []interface{}) error { h.environment_environment_to_host = new(uuid.UUID) *h.environment_environment_to_host = *value.S.(*uuid.UUID) } + default: + h.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the Host. +// This includes values selected through modifiers, order, etc. +func (h *Host) Value(name string) (ent.Value, error) { + return h.selectValues.Get(name) +} + // QueryHostToDisk queries the "HostToDisk" edge of the Host entity. func (h *Host) QueryHostToDisk() *DiskQuery { - return (&HostClient{config: h.config}).QueryHostToDisk(h) + return NewHostClient(h.config).QueryHostToDisk(h) } // QueryHostToUser queries the "HostToUser" edge of the Host entity. func (h *Host) QueryHostToUser() *UserQuery { - return (&HostClient{config: h.config}).QueryHostToUser(h) + return NewHostClient(h.config).QueryHostToUser(h) } // QueryHostToEnvironment queries the "HostToEnvironment" edge of the Host entity. func (h *Host) QueryHostToEnvironment() *EnvironmentQuery { - return (&HostClient{config: h.config}).QueryHostToEnvironment(h) + return NewHostClient(h.config).QueryHostToEnvironment(h) } // QueryHostToIncludedNetwork queries the "HostToIncludedNetwork" edge of the Host entity. func (h *Host) QueryHostToIncludedNetwork() *IncludedNetworkQuery { - return (&HostClient{config: h.config}).QueryHostToIncludedNetwork(h) + return NewHostClient(h.config).QueryHostToIncludedNetwork(h) } // QueryDependOnHostToHostDependency queries the "DependOnHostToHostDependency" edge of the Host entity. func (h *Host) QueryDependOnHostToHostDependency() *HostDependencyQuery { - return (&HostClient{config: h.config}).QueryDependOnHostToHostDependency(h) + return NewHostClient(h.config).QueryDependOnHostToHostDependency(h) } // QueryDependByHostToHostDependency queries the "DependByHostToHostDependency" edge of the Host entity. func (h *Host) QueryDependByHostToHostDependency() *HostDependencyQuery { - return (&HostClient{config: h.config}).QueryDependByHostToHostDependency(h) + return NewHostClient(h.config).QueryDependByHostToHostDependency(h) } // Update returns a builder for updating this Host. // Note that you need to call Host.Unwrap() before calling this method if this Host // was returned from a transaction, and the transaction was committed or rolled back. func (h *Host) Update() *HostUpdateOne { - return (&HostClient{config: h.config}).UpdateOne(h) + return NewHostClient(h.config).UpdateOne(h) } // Unwrap unwraps the Host entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. func (h *Host) Unwrap() *Host { - tx, ok := h.config.driver.(*txDriver) + _tx, ok := h.config.driver.(*txDriver) if !ok { panic("ent: Host is not a transactional entity") } - h.config.driver = tx.drv + h.config.driver = _tx.drv return h } @@ -349,44 +365,147 @@ func (h *Host) Unwrap() *Host { func (h *Host) String() string { var builder strings.Builder builder.WriteString("Host(") - builder.WriteString(fmt.Sprintf("id=%v", h.ID)) - builder.WriteString(", hcl_id=") - builder.WriteString(h.HclID) - builder.WriteString(", hostname=") + builder.WriteString(fmt.Sprintf("id=%v, ", h.ID)) + builder.WriteString("hcl_id=") + builder.WriteString(h.HCLID) + builder.WriteString(", ") + builder.WriteString("hostname=") builder.WriteString(h.Hostname) - builder.WriteString(", description=") + builder.WriteString(", ") + builder.WriteString("description=") builder.WriteString(h.Description) - builder.WriteString(", OS=") + builder.WriteString(", ") + builder.WriteString("OS=") builder.WriteString(h.OS) - builder.WriteString(", last_octet=") + builder.WriteString(", ") + builder.WriteString("last_octet=") builder.WriteString(fmt.Sprintf("%v", h.LastOctet)) - builder.WriteString(", instance_size=") + builder.WriteString(", ") + builder.WriteString("instance_size=") builder.WriteString(h.InstanceSize) - builder.WriteString(", allow_mac_changes=") + builder.WriteString(", ") + builder.WriteString("allow_mac_changes=") builder.WriteString(fmt.Sprintf("%v", h.AllowMACChanges)) - builder.WriteString(", exposed_tcp_ports=") + builder.WriteString(", ") + builder.WriteString("exposed_tcp_ports=") builder.WriteString(fmt.Sprintf("%v", h.ExposedTCPPorts)) - builder.WriteString(", exposed_udp_ports=") + builder.WriteString(", ") + builder.WriteString("exposed_udp_ports=") builder.WriteString(fmt.Sprintf("%v", h.ExposedUDPPorts)) - builder.WriteString(", override_password=") + builder.WriteString(", ") + builder.WriteString("override_password=") builder.WriteString(h.OverridePassword) - builder.WriteString(", vars=") + builder.WriteString(", ") + builder.WriteString("vars=") builder.WriteString(fmt.Sprintf("%v", h.Vars)) - builder.WriteString(", user_groups=") + builder.WriteString(", ") + builder.WriteString("user_groups=") builder.WriteString(fmt.Sprintf("%v", h.UserGroups)) - builder.WriteString(", provision_steps=") + builder.WriteString(", ") + builder.WriteString("provision_steps=") builder.WriteString(fmt.Sprintf("%v", h.ProvisionSteps)) - builder.WriteString(", tags=") + builder.WriteString(", ") + builder.WriteString("tags=") builder.WriteString(fmt.Sprintf("%v", h.Tags)) builder.WriteByte(')') return builder.String() } -// Hosts is a parsable slice of Host. -type Hosts []*Host +// NamedHostToUser returns the HostToUser named value or an error if the edge was not +// loaded in eager-loading with this name. +func (h *Host) NamedHostToUser(name string) ([]*User, error) { + if h.Edges.namedHostToUser == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := h.Edges.namedHostToUser[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} -func (h Hosts) config(cfg config) { - for _i := range h { - h[_i].config = cfg +func (h *Host) appendNamedHostToUser(name string, edges ...*User) { + if h.Edges.namedHostToUser == nil { + h.Edges.namedHostToUser = make(map[string][]*User) + } + if len(edges) == 0 { + h.Edges.namedHostToUser[name] = []*User{} + } else { + h.Edges.namedHostToUser[name] = append(h.Edges.namedHostToUser[name], edges...) } } + +// NamedHostToIncludedNetwork returns the HostToIncludedNetwork named value or an error if the edge was not +// loaded in eager-loading with this name. +func (h *Host) NamedHostToIncludedNetwork(name string) ([]*IncludedNetwork, error) { + if h.Edges.namedHostToIncludedNetwork == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := h.Edges.namedHostToIncludedNetwork[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} + +func (h *Host) appendNamedHostToIncludedNetwork(name string, edges ...*IncludedNetwork) { + if h.Edges.namedHostToIncludedNetwork == nil { + h.Edges.namedHostToIncludedNetwork = make(map[string][]*IncludedNetwork) + } + if len(edges) == 0 { + h.Edges.namedHostToIncludedNetwork[name] = []*IncludedNetwork{} + } else { + h.Edges.namedHostToIncludedNetwork[name] = append(h.Edges.namedHostToIncludedNetwork[name], edges...) + } +} + +// NamedDependOnHostToHostDependency returns the DependOnHostToHostDependency named value or an error if the edge was not +// loaded in eager-loading with this name. +func (h *Host) NamedDependOnHostToHostDependency(name string) ([]*HostDependency, error) { + if h.Edges.namedDependOnHostToHostDependency == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := h.Edges.namedDependOnHostToHostDependency[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} + +func (h *Host) appendNamedDependOnHostToHostDependency(name string, edges ...*HostDependency) { + if h.Edges.namedDependOnHostToHostDependency == nil { + h.Edges.namedDependOnHostToHostDependency = make(map[string][]*HostDependency) + } + if len(edges) == 0 { + h.Edges.namedDependOnHostToHostDependency[name] = []*HostDependency{} + } else { + h.Edges.namedDependOnHostToHostDependency[name] = append(h.Edges.namedDependOnHostToHostDependency[name], edges...) + } +} + +// NamedDependByHostToHostDependency returns the DependByHostToHostDependency named value or an error if the edge was not +// loaded in eager-loading with this name. +func (h *Host) NamedDependByHostToHostDependency(name string) ([]*HostDependency, error) { + if h.Edges.namedDependByHostToHostDependency == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := h.Edges.namedDependByHostToHostDependency[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} + +func (h *Host) appendNamedDependByHostToHostDependency(name string, edges ...*HostDependency) { + if h.Edges.namedDependByHostToHostDependency == nil { + h.Edges.namedDependByHostToHostDependency = make(map[string][]*HostDependency) + } + if len(edges) == 0 { + h.Edges.namedDependByHostToHostDependency[name] = []*HostDependency{} + } else { + h.Edges.namedDependByHostToHostDependency[name] = append(h.Edges.namedDependByHostToHostDependency[name], edges...) + } +} + +// Hosts is a parsable slice of Host. +type Hosts []*Host diff --git a/ent/host/host.go b/ent/host/host.go index 9d65d5b1..66059d43 100755 --- a/ent/host/host.go +++ b/ent/host/host.go @@ -1,8 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package host import ( + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -11,8 +13,8 @@ const ( Label = "host" // FieldID holds the string denoting the id field in the database. FieldID = "id" - // FieldHclID holds the string denoting the hcl_id field in the database. - FieldHclID = "hcl_id" + // FieldHCLID holds the string denoting the hcl_id field in the database. + FieldHCLID = "hcl_id" // FieldHostname holds the string denoting the hostname field in the database. FieldHostname = "hostname" // FieldDescription holds the string denoting the description field in the database. @@ -98,7 +100,7 @@ const ( // Columns holds all SQL columns for host fields. var Columns = []string{ FieldID, - FieldHclID, + FieldHCLID, FieldHostname, FieldDescription, FieldOS, @@ -145,3 +147,163 @@ var ( // DefaultID holds the default value on creation for the "id" field. DefaultID func() uuid.UUID ) + +// OrderOption defines the ordering options for the Host queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByHCLID orders the results by the hcl_id field. +func ByHCLID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldHCLID, opts...).ToFunc() +} + +// ByHostname orders the results by the hostname field. +func ByHostname(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldHostname, opts...).ToFunc() +} + +// ByDescription orders the results by the description field. +func ByDescription(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDescription, opts...).ToFunc() +} + +// ByOS orders the results by the OS field. +func ByOS(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldOS, opts...).ToFunc() +} + +// ByLastOctet orders the results by the last_octet field. +func ByLastOctet(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldLastOctet, opts...).ToFunc() +} + +// ByInstanceSize orders the results by the instance_size field. +func ByInstanceSize(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldInstanceSize, opts...).ToFunc() +} + +// ByAllowMACChanges orders the results by the allow_mac_changes field. +func ByAllowMACChanges(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAllowMACChanges, opts...).ToFunc() +} + +// ByOverridePassword orders the results by the override_password field. +func ByOverridePassword(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldOverridePassword, opts...).ToFunc() +} + +// ByHostToDiskField orders the results by HostToDisk field. +func ByHostToDiskField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newHostToDiskStep(), sql.OrderByField(field, opts...)) + } +} + +// ByHostToUserCount orders the results by HostToUser count. +func ByHostToUserCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newHostToUserStep(), opts...) + } +} + +// ByHostToUser orders the results by HostToUser terms. +func ByHostToUser(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newHostToUserStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByHostToEnvironmentField orders the results by HostToEnvironment field. +func ByHostToEnvironmentField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newHostToEnvironmentStep(), sql.OrderByField(field, opts...)) + } +} + +// ByHostToIncludedNetworkCount orders the results by HostToIncludedNetwork count. +func ByHostToIncludedNetworkCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newHostToIncludedNetworkStep(), opts...) + } +} + +// ByHostToIncludedNetwork orders the results by HostToIncludedNetwork terms. +func ByHostToIncludedNetwork(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newHostToIncludedNetworkStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByDependOnHostToHostDependencyCount orders the results by DependOnHostToHostDependency count. +func ByDependOnHostToHostDependencyCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newDependOnHostToHostDependencyStep(), opts...) + } +} + +// ByDependOnHostToHostDependency orders the results by DependOnHostToHostDependency terms. +func ByDependOnHostToHostDependency(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newDependOnHostToHostDependencyStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByDependByHostToHostDependencyCount orders the results by DependByHostToHostDependency count. +func ByDependByHostToHostDependencyCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newDependByHostToHostDependencyStep(), opts...) + } +} + +// ByDependByHostToHostDependency orders the results by DependByHostToHostDependency terms. +func ByDependByHostToHostDependency(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newDependByHostToHostDependencyStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newHostToDiskStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(HostToDiskInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2O, false, HostToDiskTable, HostToDiskColumn), + ) +} +func newHostToUserStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(HostToUserInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, HostToUserTable, HostToUserColumn), + ) +} +func newHostToEnvironmentStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(HostToEnvironmentInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, HostToEnvironmentTable, HostToEnvironmentColumn), + ) +} +func newHostToIncludedNetworkStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(HostToIncludedNetworkInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, HostToIncludedNetworkTable, HostToIncludedNetworkPrimaryKey...), + ) +} +func newDependOnHostToHostDependencyStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(DependOnHostToHostDependencyInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, DependOnHostToHostDependencyTable, DependOnHostToHostDependencyColumn), + ) +} +func newDependByHostToHostDependencyStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(DependByHostToHostDependencyInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, DependByHostToHostDependencyTable, DependByHostToHostDependencyColumn), + ) +} diff --git a/ent/host/where.go b/ent/host/where.go index f4475aee..a6de4a99 100755 --- a/ent/host/where.go +++ b/ent/host/where.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package host @@ -11,911 +11,537 @@ import ( // ID filters vertices based on their ID field. func ID(id uuid.UUID) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Host(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id uuid.UUID) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Host(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id uuid.UUID) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.Host(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...uuid.UUID) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.Host(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...uuid.UUID) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.Host(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id uuid.UUID) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.Host(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id uuid.UUID) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.Host(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id uuid.UUID) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.Host(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id uuid.UUID) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.Host(sql.FieldLTE(FieldID, id)) } -// HclID applies equality check predicate on the "hcl_id" field. It's identical to HclIDEQ. -func HclID(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldHclID), v)) - }) +// HCLID applies equality check predicate on the "hcl_id" field. It's identical to HCLIDEQ. +func HCLID(v string) predicate.Host { + return predicate.Host(sql.FieldEQ(FieldHCLID, v)) } // Hostname applies equality check predicate on the "hostname" field. It's identical to HostnameEQ. func Hostname(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldHostname), v)) - }) + return predicate.Host(sql.FieldEQ(FieldHostname, v)) } // Description applies equality check predicate on the "description" field. It's identical to DescriptionEQ. func Description(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldDescription), v)) - }) + return predicate.Host(sql.FieldEQ(FieldDescription, v)) } // OS applies equality check predicate on the "OS" field. It's identical to OSEQ. func OS(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldOS), v)) - }) + return predicate.Host(sql.FieldEQ(FieldOS, v)) } // LastOctet applies equality check predicate on the "last_octet" field. It's identical to LastOctetEQ. func LastOctet(v int) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldLastOctet), v)) - }) + return predicate.Host(sql.FieldEQ(FieldLastOctet, v)) } // InstanceSize applies equality check predicate on the "instance_size" field. It's identical to InstanceSizeEQ. func InstanceSize(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldInstanceSize), v)) - }) + return predicate.Host(sql.FieldEQ(FieldInstanceSize, v)) } // AllowMACChanges applies equality check predicate on the "allow_mac_changes" field. It's identical to AllowMACChangesEQ. func AllowMACChanges(v bool) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldAllowMACChanges), v)) - }) + return predicate.Host(sql.FieldEQ(FieldAllowMACChanges, v)) } // OverridePassword applies equality check predicate on the "override_password" field. It's identical to OverridePasswordEQ. func OverridePassword(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldOverridePassword), v)) - }) + return predicate.Host(sql.FieldEQ(FieldOverridePassword, v)) } -// HclIDEQ applies the EQ predicate on the "hcl_id" field. -func HclIDEQ(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldHclID), v)) - }) +// HCLIDEQ applies the EQ predicate on the "hcl_id" field. +func HCLIDEQ(v string) predicate.Host { + return predicate.Host(sql.FieldEQ(FieldHCLID, v)) } -// HclIDNEQ applies the NEQ predicate on the "hcl_id" field. -func HclIDNEQ(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldHclID), v)) - }) +// HCLIDNEQ applies the NEQ predicate on the "hcl_id" field. +func HCLIDNEQ(v string) predicate.Host { + return predicate.Host(sql.FieldNEQ(FieldHCLID, v)) } -// HclIDIn applies the In predicate on the "hcl_id" field. -func HclIDIn(vs ...string) predicate.Host { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Host(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldHclID), v...)) - }) +// HCLIDIn applies the In predicate on the "hcl_id" field. +func HCLIDIn(vs ...string) predicate.Host { + return predicate.Host(sql.FieldIn(FieldHCLID, vs...)) } -// HclIDNotIn applies the NotIn predicate on the "hcl_id" field. -func HclIDNotIn(vs ...string) predicate.Host { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Host(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldHclID), v...)) - }) +// HCLIDNotIn applies the NotIn predicate on the "hcl_id" field. +func HCLIDNotIn(vs ...string) predicate.Host { + return predicate.Host(sql.FieldNotIn(FieldHCLID, vs...)) } -// HclIDGT applies the GT predicate on the "hcl_id" field. -func HclIDGT(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldHclID), v)) - }) +// HCLIDGT applies the GT predicate on the "hcl_id" field. +func HCLIDGT(v string) predicate.Host { + return predicate.Host(sql.FieldGT(FieldHCLID, v)) } -// HclIDGTE applies the GTE predicate on the "hcl_id" field. -func HclIDGTE(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldHclID), v)) - }) +// HCLIDGTE applies the GTE predicate on the "hcl_id" field. +func HCLIDGTE(v string) predicate.Host { + return predicate.Host(sql.FieldGTE(FieldHCLID, v)) } -// HclIDLT applies the LT predicate on the "hcl_id" field. -func HclIDLT(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldHclID), v)) - }) +// HCLIDLT applies the LT predicate on the "hcl_id" field. +func HCLIDLT(v string) predicate.Host { + return predicate.Host(sql.FieldLT(FieldHCLID, v)) } -// HclIDLTE applies the LTE predicate on the "hcl_id" field. -func HclIDLTE(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldHclID), v)) - }) +// HCLIDLTE applies the LTE predicate on the "hcl_id" field. +func HCLIDLTE(v string) predicate.Host { + return predicate.Host(sql.FieldLTE(FieldHCLID, v)) } -// HclIDContains applies the Contains predicate on the "hcl_id" field. -func HclIDContains(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldHclID), v)) - }) +// HCLIDContains applies the Contains predicate on the "hcl_id" field. +func HCLIDContains(v string) predicate.Host { + return predicate.Host(sql.FieldContains(FieldHCLID, v)) } -// HclIDHasPrefix applies the HasPrefix predicate on the "hcl_id" field. -func HclIDHasPrefix(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldHclID), v)) - }) +// HCLIDHasPrefix applies the HasPrefix predicate on the "hcl_id" field. +func HCLIDHasPrefix(v string) predicate.Host { + return predicate.Host(sql.FieldHasPrefix(FieldHCLID, v)) } -// HclIDHasSuffix applies the HasSuffix predicate on the "hcl_id" field. -func HclIDHasSuffix(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldHclID), v)) - }) +// HCLIDHasSuffix applies the HasSuffix predicate on the "hcl_id" field. +func HCLIDHasSuffix(v string) predicate.Host { + return predicate.Host(sql.FieldHasSuffix(FieldHCLID, v)) } -// HclIDEqualFold applies the EqualFold predicate on the "hcl_id" field. -func HclIDEqualFold(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldHclID), v)) - }) +// HCLIDEqualFold applies the EqualFold predicate on the "hcl_id" field. +func HCLIDEqualFold(v string) predicate.Host { + return predicate.Host(sql.FieldEqualFold(FieldHCLID, v)) } -// HclIDContainsFold applies the ContainsFold predicate on the "hcl_id" field. -func HclIDContainsFold(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldHclID), v)) - }) +// HCLIDContainsFold applies the ContainsFold predicate on the "hcl_id" field. +func HCLIDContainsFold(v string) predicate.Host { + return predicate.Host(sql.FieldContainsFold(FieldHCLID, v)) } // HostnameEQ applies the EQ predicate on the "hostname" field. func HostnameEQ(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldHostname), v)) - }) + return predicate.Host(sql.FieldEQ(FieldHostname, v)) } // HostnameNEQ applies the NEQ predicate on the "hostname" field. func HostnameNEQ(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldHostname), v)) - }) + return predicate.Host(sql.FieldNEQ(FieldHostname, v)) } // HostnameIn applies the In predicate on the "hostname" field. func HostnameIn(vs ...string) predicate.Host { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Host(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldHostname), v...)) - }) + return predicate.Host(sql.FieldIn(FieldHostname, vs...)) } // HostnameNotIn applies the NotIn predicate on the "hostname" field. func HostnameNotIn(vs ...string) predicate.Host { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Host(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldHostname), v...)) - }) + return predicate.Host(sql.FieldNotIn(FieldHostname, vs...)) } // HostnameGT applies the GT predicate on the "hostname" field. func HostnameGT(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldHostname), v)) - }) + return predicate.Host(sql.FieldGT(FieldHostname, v)) } // HostnameGTE applies the GTE predicate on the "hostname" field. func HostnameGTE(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldHostname), v)) - }) + return predicate.Host(sql.FieldGTE(FieldHostname, v)) } // HostnameLT applies the LT predicate on the "hostname" field. func HostnameLT(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldHostname), v)) - }) + return predicate.Host(sql.FieldLT(FieldHostname, v)) } // HostnameLTE applies the LTE predicate on the "hostname" field. func HostnameLTE(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldHostname), v)) - }) + return predicate.Host(sql.FieldLTE(FieldHostname, v)) } // HostnameContains applies the Contains predicate on the "hostname" field. func HostnameContains(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldHostname), v)) - }) + return predicate.Host(sql.FieldContains(FieldHostname, v)) } // HostnameHasPrefix applies the HasPrefix predicate on the "hostname" field. func HostnameHasPrefix(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldHostname), v)) - }) + return predicate.Host(sql.FieldHasPrefix(FieldHostname, v)) } // HostnameHasSuffix applies the HasSuffix predicate on the "hostname" field. func HostnameHasSuffix(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldHostname), v)) - }) + return predicate.Host(sql.FieldHasSuffix(FieldHostname, v)) } // HostnameEqualFold applies the EqualFold predicate on the "hostname" field. func HostnameEqualFold(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldHostname), v)) - }) + return predicate.Host(sql.FieldEqualFold(FieldHostname, v)) } // HostnameContainsFold applies the ContainsFold predicate on the "hostname" field. func HostnameContainsFold(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldHostname), v)) - }) + return predicate.Host(sql.FieldContainsFold(FieldHostname, v)) } // DescriptionEQ applies the EQ predicate on the "description" field. func DescriptionEQ(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldDescription), v)) - }) + return predicate.Host(sql.FieldEQ(FieldDescription, v)) } // DescriptionNEQ applies the NEQ predicate on the "description" field. func DescriptionNEQ(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldDescription), v)) - }) + return predicate.Host(sql.FieldNEQ(FieldDescription, v)) } // DescriptionIn applies the In predicate on the "description" field. func DescriptionIn(vs ...string) predicate.Host { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Host(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldDescription), v...)) - }) + return predicate.Host(sql.FieldIn(FieldDescription, vs...)) } // DescriptionNotIn applies the NotIn predicate on the "description" field. func DescriptionNotIn(vs ...string) predicate.Host { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Host(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldDescription), v...)) - }) + return predicate.Host(sql.FieldNotIn(FieldDescription, vs...)) } // DescriptionGT applies the GT predicate on the "description" field. func DescriptionGT(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldDescription), v)) - }) + return predicate.Host(sql.FieldGT(FieldDescription, v)) } // DescriptionGTE applies the GTE predicate on the "description" field. func DescriptionGTE(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldDescription), v)) - }) + return predicate.Host(sql.FieldGTE(FieldDescription, v)) } // DescriptionLT applies the LT predicate on the "description" field. func DescriptionLT(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldDescription), v)) - }) + return predicate.Host(sql.FieldLT(FieldDescription, v)) } // DescriptionLTE applies the LTE predicate on the "description" field. func DescriptionLTE(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldDescription), v)) - }) + return predicate.Host(sql.FieldLTE(FieldDescription, v)) } // DescriptionContains applies the Contains predicate on the "description" field. func DescriptionContains(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldDescription), v)) - }) + return predicate.Host(sql.FieldContains(FieldDescription, v)) } // DescriptionHasPrefix applies the HasPrefix predicate on the "description" field. func DescriptionHasPrefix(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldDescription), v)) - }) + return predicate.Host(sql.FieldHasPrefix(FieldDescription, v)) } // DescriptionHasSuffix applies the HasSuffix predicate on the "description" field. func DescriptionHasSuffix(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldDescription), v)) - }) + return predicate.Host(sql.FieldHasSuffix(FieldDescription, v)) } // DescriptionEqualFold applies the EqualFold predicate on the "description" field. func DescriptionEqualFold(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldDescription), v)) - }) + return predicate.Host(sql.FieldEqualFold(FieldDescription, v)) } // DescriptionContainsFold applies the ContainsFold predicate on the "description" field. func DescriptionContainsFold(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldDescription), v)) - }) + return predicate.Host(sql.FieldContainsFold(FieldDescription, v)) } // OSEQ applies the EQ predicate on the "OS" field. func OSEQ(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldOS), v)) - }) + return predicate.Host(sql.FieldEQ(FieldOS, v)) } // OSNEQ applies the NEQ predicate on the "OS" field. func OSNEQ(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldOS), v)) - }) + return predicate.Host(sql.FieldNEQ(FieldOS, v)) } // OSIn applies the In predicate on the "OS" field. func OSIn(vs ...string) predicate.Host { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Host(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldOS), v...)) - }) + return predicate.Host(sql.FieldIn(FieldOS, vs...)) } // OSNotIn applies the NotIn predicate on the "OS" field. func OSNotIn(vs ...string) predicate.Host { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Host(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldOS), v...)) - }) + return predicate.Host(sql.FieldNotIn(FieldOS, vs...)) } // OSGT applies the GT predicate on the "OS" field. func OSGT(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldOS), v)) - }) + return predicate.Host(sql.FieldGT(FieldOS, v)) } // OSGTE applies the GTE predicate on the "OS" field. func OSGTE(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldOS), v)) - }) + return predicate.Host(sql.FieldGTE(FieldOS, v)) } // OSLT applies the LT predicate on the "OS" field. func OSLT(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldOS), v)) - }) + return predicate.Host(sql.FieldLT(FieldOS, v)) } // OSLTE applies the LTE predicate on the "OS" field. func OSLTE(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldOS), v)) - }) + return predicate.Host(sql.FieldLTE(FieldOS, v)) } // OSContains applies the Contains predicate on the "OS" field. func OSContains(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldOS), v)) - }) + return predicate.Host(sql.FieldContains(FieldOS, v)) } // OSHasPrefix applies the HasPrefix predicate on the "OS" field. func OSHasPrefix(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldOS), v)) - }) + return predicate.Host(sql.FieldHasPrefix(FieldOS, v)) } // OSHasSuffix applies the HasSuffix predicate on the "OS" field. func OSHasSuffix(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldOS), v)) - }) + return predicate.Host(sql.FieldHasSuffix(FieldOS, v)) } // OSEqualFold applies the EqualFold predicate on the "OS" field. func OSEqualFold(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldOS), v)) - }) + return predicate.Host(sql.FieldEqualFold(FieldOS, v)) } // OSContainsFold applies the ContainsFold predicate on the "OS" field. func OSContainsFold(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldOS), v)) - }) + return predicate.Host(sql.FieldContainsFold(FieldOS, v)) } // LastOctetEQ applies the EQ predicate on the "last_octet" field. func LastOctetEQ(v int) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldLastOctet), v)) - }) + return predicate.Host(sql.FieldEQ(FieldLastOctet, v)) } // LastOctetNEQ applies the NEQ predicate on the "last_octet" field. func LastOctetNEQ(v int) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldLastOctet), v)) - }) + return predicate.Host(sql.FieldNEQ(FieldLastOctet, v)) } // LastOctetIn applies the In predicate on the "last_octet" field. func LastOctetIn(vs ...int) predicate.Host { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Host(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldLastOctet), v...)) - }) + return predicate.Host(sql.FieldIn(FieldLastOctet, vs...)) } // LastOctetNotIn applies the NotIn predicate on the "last_octet" field. func LastOctetNotIn(vs ...int) predicate.Host { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Host(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldLastOctet), v...)) - }) + return predicate.Host(sql.FieldNotIn(FieldLastOctet, vs...)) } // LastOctetGT applies the GT predicate on the "last_octet" field. func LastOctetGT(v int) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldLastOctet), v)) - }) + return predicate.Host(sql.FieldGT(FieldLastOctet, v)) } // LastOctetGTE applies the GTE predicate on the "last_octet" field. func LastOctetGTE(v int) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldLastOctet), v)) - }) + return predicate.Host(sql.FieldGTE(FieldLastOctet, v)) } // LastOctetLT applies the LT predicate on the "last_octet" field. func LastOctetLT(v int) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldLastOctet), v)) - }) + return predicate.Host(sql.FieldLT(FieldLastOctet, v)) } // LastOctetLTE applies the LTE predicate on the "last_octet" field. func LastOctetLTE(v int) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldLastOctet), v)) - }) + return predicate.Host(sql.FieldLTE(FieldLastOctet, v)) } // InstanceSizeEQ applies the EQ predicate on the "instance_size" field. func InstanceSizeEQ(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldInstanceSize), v)) - }) + return predicate.Host(sql.FieldEQ(FieldInstanceSize, v)) } // InstanceSizeNEQ applies the NEQ predicate on the "instance_size" field. func InstanceSizeNEQ(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldInstanceSize), v)) - }) + return predicate.Host(sql.FieldNEQ(FieldInstanceSize, v)) } // InstanceSizeIn applies the In predicate on the "instance_size" field. func InstanceSizeIn(vs ...string) predicate.Host { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Host(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldInstanceSize), v...)) - }) + return predicate.Host(sql.FieldIn(FieldInstanceSize, vs...)) } // InstanceSizeNotIn applies the NotIn predicate on the "instance_size" field. func InstanceSizeNotIn(vs ...string) predicate.Host { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Host(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldInstanceSize), v...)) - }) + return predicate.Host(sql.FieldNotIn(FieldInstanceSize, vs...)) } // InstanceSizeGT applies the GT predicate on the "instance_size" field. func InstanceSizeGT(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldInstanceSize), v)) - }) + return predicate.Host(sql.FieldGT(FieldInstanceSize, v)) } // InstanceSizeGTE applies the GTE predicate on the "instance_size" field. func InstanceSizeGTE(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldInstanceSize), v)) - }) + return predicate.Host(sql.FieldGTE(FieldInstanceSize, v)) } // InstanceSizeLT applies the LT predicate on the "instance_size" field. func InstanceSizeLT(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldInstanceSize), v)) - }) + return predicate.Host(sql.FieldLT(FieldInstanceSize, v)) } // InstanceSizeLTE applies the LTE predicate on the "instance_size" field. func InstanceSizeLTE(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldInstanceSize), v)) - }) + return predicate.Host(sql.FieldLTE(FieldInstanceSize, v)) } // InstanceSizeContains applies the Contains predicate on the "instance_size" field. func InstanceSizeContains(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldInstanceSize), v)) - }) + return predicate.Host(sql.FieldContains(FieldInstanceSize, v)) } // InstanceSizeHasPrefix applies the HasPrefix predicate on the "instance_size" field. func InstanceSizeHasPrefix(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldInstanceSize), v)) - }) + return predicate.Host(sql.FieldHasPrefix(FieldInstanceSize, v)) } // InstanceSizeHasSuffix applies the HasSuffix predicate on the "instance_size" field. func InstanceSizeHasSuffix(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldInstanceSize), v)) - }) + return predicate.Host(sql.FieldHasSuffix(FieldInstanceSize, v)) } // InstanceSizeEqualFold applies the EqualFold predicate on the "instance_size" field. func InstanceSizeEqualFold(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldInstanceSize), v)) - }) + return predicate.Host(sql.FieldEqualFold(FieldInstanceSize, v)) } // InstanceSizeContainsFold applies the ContainsFold predicate on the "instance_size" field. func InstanceSizeContainsFold(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldInstanceSize), v)) - }) + return predicate.Host(sql.FieldContainsFold(FieldInstanceSize, v)) } // AllowMACChangesEQ applies the EQ predicate on the "allow_mac_changes" field. func AllowMACChangesEQ(v bool) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldAllowMACChanges), v)) - }) + return predicate.Host(sql.FieldEQ(FieldAllowMACChanges, v)) } // AllowMACChangesNEQ applies the NEQ predicate on the "allow_mac_changes" field. func AllowMACChangesNEQ(v bool) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldAllowMACChanges), v)) - }) + return predicate.Host(sql.FieldNEQ(FieldAllowMACChanges, v)) } // OverridePasswordEQ applies the EQ predicate on the "override_password" field. func OverridePasswordEQ(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldOverridePassword), v)) - }) + return predicate.Host(sql.FieldEQ(FieldOverridePassword, v)) } // OverridePasswordNEQ applies the NEQ predicate on the "override_password" field. func OverridePasswordNEQ(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldOverridePassword), v)) - }) + return predicate.Host(sql.FieldNEQ(FieldOverridePassword, v)) } // OverridePasswordIn applies the In predicate on the "override_password" field. func OverridePasswordIn(vs ...string) predicate.Host { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Host(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldOverridePassword), v...)) - }) + return predicate.Host(sql.FieldIn(FieldOverridePassword, vs...)) } // OverridePasswordNotIn applies the NotIn predicate on the "override_password" field. func OverridePasswordNotIn(vs ...string) predicate.Host { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Host(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldOverridePassword), v...)) - }) + return predicate.Host(sql.FieldNotIn(FieldOverridePassword, vs...)) } // OverridePasswordGT applies the GT predicate on the "override_password" field. func OverridePasswordGT(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldOverridePassword), v)) - }) + return predicate.Host(sql.FieldGT(FieldOverridePassword, v)) } // OverridePasswordGTE applies the GTE predicate on the "override_password" field. func OverridePasswordGTE(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldOverridePassword), v)) - }) + return predicate.Host(sql.FieldGTE(FieldOverridePassword, v)) } // OverridePasswordLT applies the LT predicate on the "override_password" field. func OverridePasswordLT(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldOverridePassword), v)) - }) + return predicate.Host(sql.FieldLT(FieldOverridePassword, v)) } // OverridePasswordLTE applies the LTE predicate on the "override_password" field. func OverridePasswordLTE(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldOverridePassword), v)) - }) + return predicate.Host(sql.FieldLTE(FieldOverridePassword, v)) } // OverridePasswordContains applies the Contains predicate on the "override_password" field. func OverridePasswordContains(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldOverridePassword), v)) - }) + return predicate.Host(sql.FieldContains(FieldOverridePassword, v)) } // OverridePasswordHasPrefix applies the HasPrefix predicate on the "override_password" field. func OverridePasswordHasPrefix(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldOverridePassword), v)) - }) + return predicate.Host(sql.FieldHasPrefix(FieldOverridePassword, v)) } // OverridePasswordHasSuffix applies the HasSuffix predicate on the "override_password" field. func OverridePasswordHasSuffix(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldOverridePassword), v)) - }) + return predicate.Host(sql.FieldHasSuffix(FieldOverridePassword, v)) } // OverridePasswordEqualFold applies the EqualFold predicate on the "override_password" field. func OverridePasswordEqualFold(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldOverridePassword), v)) - }) + return predicate.Host(sql.FieldEqualFold(FieldOverridePassword, v)) } // OverridePasswordContainsFold applies the ContainsFold predicate on the "override_password" field. func OverridePasswordContainsFold(v string) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldOverridePassword), v)) - }) + return predicate.Host(sql.FieldContainsFold(FieldOverridePassword, v)) } // ProvisionStepsIsNil applies the IsNil predicate on the "provision_steps" field. func ProvisionStepsIsNil() predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldProvisionSteps))) - }) + return predicate.Host(sql.FieldIsNull(FieldProvisionSteps)) } // ProvisionStepsNotNil applies the NotNil predicate on the "provision_steps" field. func ProvisionStepsNotNil() predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldProvisionSteps))) - }) + return predicate.Host(sql.FieldNotNull(FieldProvisionSteps)) } // HasHostToDisk applies the HasEdge predicate on the "HostToDisk" edge. @@ -923,7 +549,6 @@ func HasHostToDisk() predicate.Host { return predicate.Host(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(HostToDiskTable, FieldID), sqlgraph.Edge(sqlgraph.O2O, false, HostToDiskTable, HostToDiskColumn), ) sqlgraph.HasNeighbors(s, step) @@ -933,11 +558,7 @@ func HasHostToDisk() predicate.Host { // HasHostToDiskWith applies the HasEdge predicate on the "HostToDisk" edge with a given conditions (other predicates). func HasHostToDiskWith(preds ...predicate.Disk) predicate.Host { return predicate.Host(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(HostToDiskInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2O, false, HostToDiskTable, HostToDiskColumn), - ) + step := newHostToDiskStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -951,7 +572,6 @@ func HasHostToUser() predicate.Host { return predicate.Host(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(HostToUserTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, false, HostToUserTable, HostToUserColumn), ) sqlgraph.HasNeighbors(s, step) @@ -961,11 +581,7 @@ func HasHostToUser() predicate.Host { // HasHostToUserWith applies the HasEdge predicate on the "HostToUser" edge with a given conditions (other predicates). func HasHostToUserWith(preds ...predicate.User) predicate.Host { return predicate.Host(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(HostToUserInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, HostToUserTable, HostToUserColumn), - ) + step := newHostToUserStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -979,7 +595,6 @@ func HasHostToEnvironment() predicate.Host { return predicate.Host(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(HostToEnvironmentTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, true, HostToEnvironmentTable, HostToEnvironmentColumn), ) sqlgraph.HasNeighbors(s, step) @@ -989,11 +604,7 @@ func HasHostToEnvironment() predicate.Host { // HasHostToEnvironmentWith applies the HasEdge predicate on the "HostToEnvironment" edge with a given conditions (other predicates). func HasHostToEnvironmentWith(preds ...predicate.Environment) predicate.Host { return predicate.Host(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(HostToEnvironmentInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, HostToEnvironmentTable, HostToEnvironmentColumn), - ) + step := newHostToEnvironmentStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -1007,7 +618,6 @@ func HasHostToIncludedNetwork() predicate.Host { return predicate.Host(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(HostToIncludedNetworkTable, FieldID), sqlgraph.Edge(sqlgraph.M2M, true, HostToIncludedNetworkTable, HostToIncludedNetworkPrimaryKey...), ) sqlgraph.HasNeighbors(s, step) @@ -1017,11 +627,7 @@ func HasHostToIncludedNetwork() predicate.Host { // HasHostToIncludedNetworkWith applies the HasEdge predicate on the "HostToIncludedNetwork" edge with a given conditions (other predicates). func HasHostToIncludedNetworkWith(preds ...predicate.IncludedNetwork) predicate.Host { return predicate.Host(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(HostToIncludedNetworkInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2M, true, HostToIncludedNetworkTable, HostToIncludedNetworkPrimaryKey...), - ) + step := newHostToIncludedNetworkStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -1035,7 +641,6 @@ func HasDependOnHostToHostDependency() predicate.Host { return predicate.Host(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(DependOnHostToHostDependencyTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, true, DependOnHostToHostDependencyTable, DependOnHostToHostDependencyColumn), ) sqlgraph.HasNeighbors(s, step) @@ -1045,11 +650,7 @@ func HasDependOnHostToHostDependency() predicate.Host { // HasDependOnHostToHostDependencyWith applies the HasEdge predicate on the "DependOnHostToHostDependency" edge with a given conditions (other predicates). func HasDependOnHostToHostDependencyWith(preds ...predicate.HostDependency) predicate.Host { return predicate.Host(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(DependOnHostToHostDependencyInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, true, DependOnHostToHostDependencyTable, DependOnHostToHostDependencyColumn), - ) + step := newDependOnHostToHostDependencyStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -1063,7 +664,6 @@ func HasDependByHostToHostDependency() predicate.Host { return predicate.Host(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(DependByHostToHostDependencyTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, true, DependByHostToHostDependencyTable, DependByHostToHostDependencyColumn), ) sqlgraph.HasNeighbors(s, step) @@ -1073,11 +673,7 @@ func HasDependByHostToHostDependency() predicate.Host { // HasDependByHostToHostDependencyWith applies the HasEdge predicate on the "DependByHostToHostDependency" edge with a given conditions (other predicates). func HasDependByHostToHostDependencyWith(preds ...predicate.HostDependency) predicate.Host { return predicate.Host(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(DependByHostToHostDependencyInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, true, DependByHostToHostDependencyTable, DependByHostToHostDependencyColumn), - ) + step := newDependByHostToHostDependencyStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -1088,32 +684,15 @@ func HasDependByHostToHostDependencyWith(preds ...predicate.HostDependency) pred // And groups predicates with the AND operator between them. func And(predicates ...predicate.Host) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Host(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.Host) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Host(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.Host) predicate.Host { - return predicate.Host(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.Host(sql.NotPredicates(p)) } diff --git a/ent/host_create.go b/ent/host_create.go index 5c5d7127..b42ff64a 100755 --- a/ent/host_create.go +++ b/ent/host_create.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -25,9 +25,9 @@ type HostCreate struct { hooks []Hook } -// SetHclID sets the "hcl_id" field. -func (hc *HostCreate) SetHclID(s string) *HostCreate { - hc.mutation.SetHclID(s) +// SetHCLID sets the "hcl_id" field. +func (hc *HostCreate) SetHCLID(s string) *HostCreate { + hc.mutation.SetHCLID(s) return hc } @@ -228,44 +228,8 @@ func (hc *HostCreate) Mutation() *HostMutation { // Save creates the Host in the database. func (hc *HostCreate) Save(ctx context.Context) (*Host, error) { - var ( - err error - node *Host - ) hc.defaults() - if len(hc.hooks) == 0 { - if err = hc.check(); err != nil { - return nil, err - } - node, err = hc.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*HostMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = hc.check(); err != nil { - return nil, err - } - hc.mutation = mutation - if node, err = hc.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(hc.hooks) - 1; i >= 0; i-- { - if hc.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = hc.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, hc.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, hc.sqlSave, hc.mutation, hc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -300,7 +264,7 @@ func (hc *HostCreate) defaults() { // check runs all checks and user-defined validators on the builder. func (hc *HostCreate) check() error { - if _, ok := hc.mutation.HclID(); !ok { + if _, ok := hc.mutation.HCLID(); !ok { return &ValidationError{Name: "hcl_id", err: errors.New(`ent: missing required field "Host.hcl_id"`)} } if _, ok := hc.mutation.Hostname(); !ok { @@ -343,10 +307,13 @@ func (hc *HostCreate) check() error { } func (hc *HostCreate) sqlSave(ctx context.Context) (*Host, error) { + if err := hc.check(); err != nil { + return nil, err + } _node, _spec := hc.createSpec() if err := sqlgraph.CreateNode(ctx, hc.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } @@ -357,134 +324,74 @@ func (hc *HostCreate) sqlSave(ctx context.Context) (*Host, error) { return nil, err } } + hc.mutation.id = &_node.ID + hc.mutation.done = true return _node, nil } func (hc *HostCreate) createSpec() (*Host, *sqlgraph.CreateSpec) { var ( _node = &Host{config: hc.config} - _spec = &sqlgraph.CreateSpec{ - Table: host.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: host.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(host.Table, sqlgraph.NewFieldSpec(host.FieldID, field.TypeUUID)) ) if id, ok := hc.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } - if value, ok := hc.mutation.HclID(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: host.FieldHclID, - }) - _node.HclID = value + if value, ok := hc.mutation.HCLID(); ok { + _spec.SetField(host.FieldHCLID, field.TypeString, value) + _node.HCLID = value } if value, ok := hc.mutation.Hostname(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: host.FieldHostname, - }) + _spec.SetField(host.FieldHostname, field.TypeString, value) _node.Hostname = value } if value, ok := hc.mutation.Description(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: host.FieldDescription, - }) + _spec.SetField(host.FieldDescription, field.TypeString, value) _node.Description = value } if value, ok := hc.mutation.OS(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: host.FieldOS, - }) + _spec.SetField(host.FieldOS, field.TypeString, value) _node.OS = value } if value, ok := hc.mutation.LastOctet(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: host.FieldLastOctet, - }) + _spec.SetField(host.FieldLastOctet, field.TypeInt, value) _node.LastOctet = value } if value, ok := hc.mutation.InstanceSize(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: host.FieldInstanceSize, - }) + _spec.SetField(host.FieldInstanceSize, field.TypeString, value) _node.InstanceSize = value } if value, ok := hc.mutation.AllowMACChanges(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: host.FieldAllowMACChanges, - }) + _spec.SetField(host.FieldAllowMACChanges, field.TypeBool, value) _node.AllowMACChanges = value } if value, ok := hc.mutation.ExposedTCPPorts(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: host.FieldExposedTCPPorts, - }) + _spec.SetField(host.FieldExposedTCPPorts, field.TypeJSON, value) _node.ExposedTCPPorts = value } if value, ok := hc.mutation.ExposedUDPPorts(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: host.FieldExposedUDPPorts, - }) + _spec.SetField(host.FieldExposedUDPPorts, field.TypeJSON, value) _node.ExposedUDPPorts = value } if value, ok := hc.mutation.OverridePassword(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: host.FieldOverridePassword, - }) + _spec.SetField(host.FieldOverridePassword, field.TypeString, value) _node.OverridePassword = value } if value, ok := hc.mutation.Vars(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: host.FieldVars, - }) + _spec.SetField(host.FieldVars, field.TypeJSON, value) _node.Vars = value } if value, ok := hc.mutation.UserGroups(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: host.FieldUserGroups, - }) + _spec.SetField(host.FieldUserGroups, field.TypeJSON, value) _node.UserGroups = value } if value, ok := hc.mutation.ProvisionSteps(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: host.FieldProvisionSteps, - }) + _spec.SetField(host.FieldProvisionSteps, field.TypeJSON, value) _node.ProvisionSteps = value } if value, ok := hc.mutation.Tags(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: host.FieldTags, - }) + _spec.SetField(host.FieldTags, field.TypeJSON, value) _node.Tags = value } if nodes := hc.mutation.HostToDiskIDs(); len(nodes) > 0 { @@ -495,10 +402,7 @@ func (hc *HostCreate) createSpec() (*Host, *sqlgraph.CreateSpec) { Columns: []string{host.HostToDiskColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: disk.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(disk.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -514,10 +418,7 @@ func (hc *HostCreate) createSpec() (*Host, *sqlgraph.CreateSpec) { Columns: []string{host.HostToUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -533,10 +434,7 @@ func (hc *HostCreate) createSpec() (*Host, *sqlgraph.CreateSpec) { Columns: []string{host.HostToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -553,10 +451,7 @@ func (hc *HostCreate) createSpec() (*Host, *sqlgraph.CreateSpec) { Columns: host.HostToIncludedNetworkPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: includednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(includednetwork.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -572,10 +467,7 @@ func (hc *HostCreate) createSpec() (*Host, *sqlgraph.CreateSpec) { Columns: []string{host.DependOnHostToHostDependencyColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: hostdependency.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(hostdependency.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -591,10 +483,7 @@ func (hc *HostCreate) createSpec() (*Host, *sqlgraph.CreateSpec) { Columns: []string{host.DependByHostToHostDependencyColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: hostdependency.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(hostdependency.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -608,11 +497,15 @@ func (hc *HostCreate) createSpec() (*Host, *sqlgraph.CreateSpec) { // HostCreateBulk is the builder for creating many Host entities in bulk. type HostCreateBulk struct { config + err error builders []*HostCreate } // Save creates the Host entities in the database. func (hcb *HostCreateBulk) Save(ctx context.Context) ([]*Host, error) { + if hcb.err != nil { + return nil, hcb.err + } specs := make([]*sqlgraph.CreateSpec, len(hcb.builders)) nodes := make([]*Host, len(hcb.builders)) mutators := make([]Mutator, len(hcb.builders)) @@ -629,8 +522,8 @@ func (hcb *HostCreateBulk) Save(ctx context.Context) ([]*Host, error) { return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, hcb.builders[i+1].mutation) } else { @@ -638,7 +531,7 @@ func (hcb *HostCreateBulk) Save(ctx context.Context) ([]*Host, error) { // Invoke the actual operation on the latest mutation in the chain. if err = sqlgraph.BatchCreate(ctx, hcb.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } } } diff --git a/ent/host_delete.go b/ent/host_delete.go index f156bd21..2d6f05de 100755 --- a/ent/host_delete.go +++ b/ent/host_delete.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (hd *HostDelete) Where(ps ...predicate.Host) *HostDelete { // Exec executes the deletion query and returns how many vertices were deleted. func (hd *HostDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(hd.hooks) == 0 { - affected, err = hd.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*HostMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - hd.mutation = mutation - affected, err = hd.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(hd.hooks) - 1; i >= 0; i-- { - if hd.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = hd.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, hd.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, hd.sqlExec, hd.mutation, hd.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (hd *HostDelete) ExecX(ctx context.Context) int { } func (hd *HostDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: host.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: host.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(host.Table, sqlgraph.NewFieldSpec(host.FieldID, field.TypeUUID)) if ps := hd.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -84,7 +48,12 @@ func (hd *HostDelete) sqlExec(ctx context.Context) (int, error) { } } } - return sqlgraph.DeleteNodes(ctx, hd.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, hd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + hd.mutation.done = true + return affected, err } // HostDeleteOne is the builder for deleting a single Host entity. @@ -92,6 +61,12 @@ type HostDeleteOne struct { hd *HostDelete } +// Where appends a list predicates to the HostDelete builder. +func (hdo *HostDeleteOne) Where(ps ...predicate.Host) *HostDeleteOne { + hdo.hd.mutation.Where(ps...) + return hdo +} + // Exec executes the deletion query. func (hdo *HostDeleteOne) Exec(ctx context.Context) error { n, err := hdo.hd.Exec(ctx) @@ -107,5 +82,7 @@ func (hdo *HostDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (hdo *HostDeleteOne) ExecX(ctx context.Context) { - hdo.hd.ExecX(ctx) + if err := hdo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/ent/host_query.go b/ent/host_query.go index 2d066c08..6ef74c74 100755 --- a/ent/host_query.go +++ b/ent/host_query.go @@ -1,11 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" "database/sql/driver" - "errors" "fmt" "math" @@ -25,20 +24,23 @@ import ( // HostQuery is the builder for querying Host entities. type HostQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string - predicates []predicate.Host - // eager-loading edges. - withHostToDisk *DiskQuery - withHostToUser *UserQuery - withHostToEnvironment *EnvironmentQuery - withHostToIncludedNetwork *IncludedNetworkQuery - withDependOnHostToHostDependency *HostDependencyQuery - withDependByHostToHostDependency *HostDependencyQuery - withFKs bool + ctx *QueryContext + order []host.OrderOption + inters []Interceptor + predicates []predicate.Host + withHostToDisk *DiskQuery + withHostToUser *UserQuery + withHostToEnvironment *EnvironmentQuery + withHostToIncludedNetwork *IncludedNetworkQuery + withDependOnHostToHostDependency *HostDependencyQuery + withDependByHostToHostDependency *HostDependencyQuery + withFKs bool + modifiers []func(*sql.Selector) + loadTotal []func(context.Context, []*Host) error + withNamedHostToUser map[string]*UserQuery + withNamedHostToIncludedNetwork map[string]*IncludedNetworkQuery + withNamedDependOnHostToHostDependency map[string]*HostDependencyQuery + withNamedDependByHostToHostDependency map[string]*HostDependencyQuery // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -50,34 +52,34 @@ func (hq *HostQuery) Where(ps ...predicate.Host) *HostQuery { return hq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (hq *HostQuery) Limit(limit int) *HostQuery { - hq.limit = &limit + hq.ctx.Limit = &limit return hq } -// Offset adds an offset step to the query. +// Offset to start from. func (hq *HostQuery) Offset(offset int) *HostQuery { - hq.offset = &offset + hq.ctx.Offset = &offset return hq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (hq *HostQuery) Unique(unique bool) *HostQuery { - hq.unique = &unique + hq.ctx.Unique = &unique return hq } -// Order adds an order step to the query. -func (hq *HostQuery) Order(o ...OrderFunc) *HostQuery { +// Order specifies how the records should be ordered. +func (hq *HostQuery) Order(o ...host.OrderOption) *HostQuery { hq.order = append(hq.order, o...) return hq } // QueryHostToDisk chains the current query on the "HostToDisk" edge. func (hq *HostQuery) QueryHostToDisk() *DiskQuery { - query := &DiskQuery{config: hq.config} + query := (&DiskClient{config: hq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := hq.prepareQuery(ctx); err != nil { return nil, err @@ -99,7 +101,7 @@ func (hq *HostQuery) QueryHostToDisk() *DiskQuery { // QueryHostToUser chains the current query on the "HostToUser" edge. func (hq *HostQuery) QueryHostToUser() *UserQuery { - query := &UserQuery{config: hq.config} + query := (&UserClient{config: hq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := hq.prepareQuery(ctx); err != nil { return nil, err @@ -121,7 +123,7 @@ func (hq *HostQuery) QueryHostToUser() *UserQuery { // QueryHostToEnvironment chains the current query on the "HostToEnvironment" edge. func (hq *HostQuery) QueryHostToEnvironment() *EnvironmentQuery { - query := &EnvironmentQuery{config: hq.config} + query := (&EnvironmentClient{config: hq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := hq.prepareQuery(ctx); err != nil { return nil, err @@ -143,7 +145,7 @@ func (hq *HostQuery) QueryHostToEnvironment() *EnvironmentQuery { // QueryHostToIncludedNetwork chains the current query on the "HostToIncludedNetwork" edge. func (hq *HostQuery) QueryHostToIncludedNetwork() *IncludedNetworkQuery { - query := &IncludedNetworkQuery{config: hq.config} + query := (&IncludedNetworkClient{config: hq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := hq.prepareQuery(ctx); err != nil { return nil, err @@ -165,7 +167,7 @@ func (hq *HostQuery) QueryHostToIncludedNetwork() *IncludedNetworkQuery { // QueryDependOnHostToHostDependency chains the current query on the "DependOnHostToHostDependency" edge. func (hq *HostQuery) QueryDependOnHostToHostDependency() *HostDependencyQuery { - query := &HostDependencyQuery{config: hq.config} + query := (&HostDependencyClient{config: hq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := hq.prepareQuery(ctx); err != nil { return nil, err @@ -187,7 +189,7 @@ func (hq *HostQuery) QueryDependOnHostToHostDependency() *HostDependencyQuery { // QueryDependByHostToHostDependency chains the current query on the "DependByHostToHostDependency" edge. func (hq *HostQuery) QueryDependByHostToHostDependency() *HostDependencyQuery { - query := &HostDependencyQuery{config: hq.config} + query := (&HostDependencyClient{config: hq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := hq.prepareQuery(ctx); err != nil { return nil, err @@ -210,7 +212,7 @@ func (hq *HostQuery) QueryDependByHostToHostDependency() *HostDependencyQuery { // First returns the first Host entity from the query. // Returns a *NotFoundError when no Host was found. func (hq *HostQuery) First(ctx context.Context) (*Host, error) { - nodes, err := hq.Limit(1).All(ctx) + nodes, err := hq.Limit(1).All(setContextOp(ctx, hq.ctx, "First")) if err != nil { return nil, err } @@ -233,7 +235,7 @@ func (hq *HostQuery) FirstX(ctx context.Context) *Host { // Returns a *NotFoundError when no Host ID was found. func (hq *HostQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = hq.Limit(1).IDs(ctx); err != nil { + if ids, err = hq.Limit(1).IDs(setContextOp(ctx, hq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -256,7 +258,7 @@ func (hq *HostQuery) FirstIDX(ctx context.Context) uuid.UUID { // Returns a *NotSingularError when more than one Host entity is found. // Returns a *NotFoundError when no Host entities are found. func (hq *HostQuery) Only(ctx context.Context) (*Host, error) { - nodes, err := hq.Limit(2).All(ctx) + nodes, err := hq.Limit(2).All(setContextOp(ctx, hq.ctx, "Only")) if err != nil { return nil, err } @@ -284,7 +286,7 @@ func (hq *HostQuery) OnlyX(ctx context.Context) *Host { // Returns a *NotFoundError when no entities are found. func (hq *HostQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = hq.Limit(2).IDs(ctx); err != nil { + if ids, err = hq.Limit(2).IDs(setContextOp(ctx, hq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -309,10 +311,12 @@ func (hq *HostQuery) OnlyIDX(ctx context.Context) uuid.UUID { // All executes the query and returns a list of Hosts. func (hq *HostQuery) All(ctx context.Context) ([]*Host, error) { + ctx = setContextOp(ctx, hq.ctx, "All") if err := hq.prepareQuery(ctx); err != nil { return nil, err } - return hq.sqlAll(ctx) + qr := querierAll[[]*Host, *HostQuery]() + return withInterceptors[[]*Host](ctx, hq, qr, hq.inters) } // AllX is like All, but panics if an error occurs. @@ -325,9 +329,12 @@ func (hq *HostQuery) AllX(ctx context.Context) []*Host { } // IDs executes the query and returns a list of Host IDs. -func (hq *HostQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { - var ids []uuid.UUID - if err := hq.Select(host.FieldID).Scan(ctx, &ids); err != nil { +func (hq *HostQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if hq.ctx.Unique == nil && hq.path != nil { + hq.Unique(true) + } + ctx = setContextOp(ctx, hq.ctx, "IDs") + if err = hq.Select(host.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -344,10 +351,11 @@ func (hq *HostQuery) IDsX(ctx context.Context) []uuid.UUID { // Count returns the count of the given query. func (hq *HostQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, hq.ctx, "Count") if err := hq.prepareQuery(ctx); err != nil { return 0, err } - return hq.sqlCount(ctx) + return withInterceptors[int](ctx, hq, querierCount[*HostQuery](), hq.inters) } // CountX is like Count, but panics if an error occurs. @@ -361,10 +369,15 @@ func (hq *HostQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (hq *HostQuery) Exist(ctx context.Context) (bool, error) { - if err := hq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, hq.ctx, "Exist") + switch _, err := hq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return hq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -384,9 +397,9 @@ func (hq *HostQuery) Clone() *HostQuery { } return &HostQuery{ config: hq.config, - limit: hq.limit, - offset: hq.offset, - order: append([]OrderFunc{}, hq.order...), + ctx: hq.ctx.Clone(), + order: append([]host.OrderOption{}, hq.order...), + inters: append([]Interceptor{}, hq.inters...), predicates: append([]predicate.Host{}, hq.predicates...), withHostToDisk: hq.withHostToDisk.Clone(), withHostToUser: hq.withHostToUser.Clone(), @@ -395,16 +408,15 @@ func (hq *HostQuery) Clone() *HostQuery { withDependOnHostToHostDependency: hq.withDependOnHostToHostDependency.Clone(), withDependByHostToHostDependency: hq.withDependByHostToHostDependency.Clone(), // clone intermediate query. - sql: hq.sql.Clone(), - path: hq.path, - unique: hq.unique, + sql: hq.sql.Clone(), + path: hq.path, } } // WithHostToDisk tells the query-builder to eager-load the nodes that are connected to // the "HostToDisk" edge. The optional arguments are used to configure the query builder of the edge. func (hq *HostQuery) WithHostToDisk(opts ...func(*DiskQuery)) *HostQuery { - query := &DiskQuery{config: hq.config} + query := (&DiskClient{config: hq.config}).Query() for _, opt := range opts { opt(query) } @@ -415,7 +427,7 @@ func (hq *HostQuery) WithHostToDisk(opts ...func(*DiskQuery)) *HostQuery { // WithHostToUser tells the query-builder to eager-load the nodes that are connected to // the "HostToUser" edge. The optional arguments are used to configure the query builder of the edge. func (hq *HostQuery) WithHostToUser(opts ...func(*UserQuery)) *HostQuery { - query := &UserQuery{config: hq.config} + query := (&UserClient{config: hq.config}).Query() for _, opt := range opts { opt(query) } @@ -426,7 +438,7 @@ func (hq *HostQuery) WithHostToUser(opts ...func(*UserQuery)) *HostQuery { // WithHostToEnvironment tells the query-builder to eager-load the nodes that are connected to // the "HostToEnvironment" edge. The optional arguments are used to configure the query builder of the edge. func (hq *HostQuery) WithHostToEnvironment(opts ...func(*EnvironmentQuery)) *HostQuery { - query := &EnvironmentQuery{config: hq.config} + query := (&EnvironmentClient{config: hq.config}).Query() for _, opt := range opts { opt(query) } @@ -437,7 +449,7 @@ func (hq *HostQuery) WithHostToEnvironment(opts ...func(*EnvironmentQuery)) *Hos // WithHostToIncludedNetwork tells the query-builder to eager-load the nodes that are connected to // the "HostToIncludedNetwork" edge. The optional arguments are used to configure the query builder of the edge. func (hq *HostQuery) WithHostToIncludedNetwork(opts ...func(*IncludedNetworkQuery)) *HostQuery { - query := &IncludedNetworkQuery{config: hq.config} + query := (&IncludedNetworkClient{config: hq.config}).Query() for _, opt := range opts { opt(query) } @@ -448,7 +460,7 @@ func (hq *HostQuery) WithHostToIncludedNetwork(opts ...func(*IncludedNetworkQuer // WithDependOnHostToHostDependency tells the query-builder to eager-load the nodes that are connected to // the "DependOnHostToHostDependency" edge. The optional arguments are used to configure the query builder of the edge. func (hq *HostQuery) WithDependOnHostToHostDependency(opts ...func(*HostDependencyQuery)) *HostQuery { - query := &HostDependencyQuery{config: hq.config} + query := (&HostDependencyClient{config: hq.config}).Query() for _, opt := range opts { opt(query) } @@ -459,7 +471,7 @@ func (hq *HostQuery) WithDependOnHostToHostDependency(opts ...func(*HostDependen // WithDependByHostToHostDependency tells the query-builder to eager-load the nodes that are connected to // the "DependByHostToHostDependency" edge. The optional arguments are used to configure the query builder of the edge. func (hq *HostQuery) WithDependByHostToHostDependency(opts ...func(*HostDependencyQuery)) *HostQuery { - query := &HostDependencyQuery{config: hq.config} + query := (&HostDependencyClient{config: hq.config}).Query() for _, opt := range opts { opt(query) } @@ -473,25 +485,21 @@ func (hq *HostQuery) WithDependByHostToHostDependency(opts ...func(*HostDependen // Example: // // var v []struct { -// HclID string `json:"hcl_id,omitempty" hcl:"id,label"` +// HCLID string `json:"hcl_id,omitempty" hcl:"id,label"` // Count int `json:"count,omitempty"` // } // // client.Host.Query(). -// GroupBy(host.FieldHclID). +// GroupBy(host.FieldHCLID). // Aggregate(ent.Count()). // Scan(ctx, &v) -// func (hq *HostQuery) GroupBy(field string, fields ...string) *HostGroupBy { - group := &HostGroupBy{config: hq.config} - group.fields = append([]string{field}, fields...) - group.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := hq.prepareQuery(ctx); err != nil { - return nil, err - } - return hq.sqlQuery(ctx), nil - } - return group + hq.ctx.Fields = append([]string{field}, fields...) + grbuild := &HostGroupBy{build: hq} + grbuild.flds = &hq.ctx.Fields + grbuild.label = host.Label + grbuild.scan = grbuild.Scan + return grbuild } // Select allows the selection one or more fields/columns for the given query, @@ -500,20 +508,37 @@ func (hq *HostQuery) GroupBy(field string, fields ...string) *HostGroupBy { // Example: // // var v []struct { -// HclID string `json:"hcl_id,omitempty" hcl:"id,label"` +// HCLID string `json:"hcl_id,omitempty" hcl:"id,label"` // } // // client.Host.Query(). -// Select(host.FieldHclID). +// Select(host.FieldHCLID). // Scan(ctx, &v) -// func (hq *HostQuery) Select(fields ...string) *HostSelect { - hq.fields = append(hq.fields, fields...) - return &HostSelect{HostQuery: hq} + hq.ctx.Fields = append(hq.ctx.Fields, fields...) + sbuild := &HostSelect{HostQuery: hq} + sbuild.label = host.Label + sbuild.flds, sbuild.scan = &hq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a HostSelect configured with the given aggregations. +func (hq *HostQuery) Aggregate(fns ...AggregateFunc) *HostSelect { + return hq.Select().Aggregate(fns...) } func (hq *HostQuery) prepareQuery(ctx context.Context) error { - for _, f := range hq.fields { + for _, inter := range hq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, hq); err != nil { + return err + } + } + } + for _, f := range hq.ctx.Fields { if !host.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -528,7 +553,7 @@ func (hq *HostQuery) prepareQuery(ctx context.Context) error { return nil } -func (hq *HostQuery) sqlAll(ctx context.Context) ([]*Host, error) { +func (hq *HostQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Host, error) { var ( nodes = []*Host{} withFKs = hq.withFKs @@ -548,272 +573,345 @@ func (hq *HostQuery) sqlAll(ctx context.Context) ([]*Host, error) { if withFKs { _spec.Node.Columns = append(_spec.Node.Columns, host.ForeignKeys...) } - _spec.ScanValues = func(columns []string) ([]interface{}, error) { + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Host).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { node := &Host{config: hq.config} nodes = append(nodes, node) - return node.scanValues(columns) - } - _spec.Assign = func(columns []string, values []interface{}) error { - if len(nodes) == 0 { - return fmt.Errorf("ent: Assign called without calling ScanValues") - } - node := nodes[len(nodes)-1] node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(hq.modifiers) > 0 { + _spec.Modifiers = hq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } if err := sqlgraph.QueryNodes(ctx, hq.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := hq.withHostToDisk; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*Host) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] + if err := hq.loadHostToDisk(ctx, query, nodes, nil, + func(n *Host, e *Disk) { n.Edges.HostToDisk = e }); err != nil { + return nil, err } - query.withFKs = true - query.Where(predicate.Disk(func(s *sql.Selector) { - s.Where(sql.InValues(host.HostToDiskColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { + } + if query := hq.withHostToUser; query != nil { + if err := hq.loadHostToUser(ctx, query, nodes, + func(n *Host) { n.Edges.HostToUser = []*User{} }, + func(n *Host, e *User) { n.Edges.HostToUser = append(n.Edges.HostToUser, e) }); err != nil { return nil, err } - for _, n := range neighbors { - fk := n.host_host_to_disk - if fk == nil { - return nil, fmt.Errorf(`foreign-key "host_host_to_disk" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "host_host_to_disk" returned %v for node %v`, *fk, n.ID) - } - node.Edges.HostToDisk = n + } + if query := hq.withHostToEnvironment; query != nil { + if err := hq.loadHostToEnvironment(ctx, query, nodes, nil, + func(n *Host, e *Environment) { n.Edges.HostToEnvironment = e }); err != nil { + return nil, err } } - - if query := hq.withHostToUser; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*Host) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.HostToUser = []*User{} + if query := hq.withHostToIncludedNetwork; query != nil { + if err := hq.loadHostToIncludedNetwork(ctx, query, nodes, + func(n *Host) { n.Edges.HostToIncludedNetwork = []*IncludedNetwork{} }, + func(n *Host, e *IncludedNetwork) { + n.Edges.HostToIncludedNetwork = append(n.Edges.HostToIncludedNetwork, e) + }); err != nil { + return nil, err } - query.withFKs = true - query.Where(predicate.User(func(s *sql.Selector) { - s.Where(sql.InValues(host.HostToUserColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { + } + if query := hq.withDependOnHostToHostDependency; query != nil { + if err := hq.loadDependOnHostToHostDependency(ctx, query, nodes, + func(n *Host) { n.Edges.DependOnHostToHostDependency = []*HostDependency{} }, + func(n *Host, e *HostDependency) { + n.Edges.DependOnHostToHostDependency = append(n.Edges.DependOnHostToHostDependency, e) + }); err != nil { return nil, err } - for _, n := range neighbors { - fk := n.host_host_to_user - if fk == nil { - return nil, fmt.Errorf(`foreign-key "host_host_to_user" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "host_host_to_user" returned %v for node %v`, *fk, n.ID) - } - node.Edges.HostToUser = append(node.Edges.HostToUser, n) + } + if query := hq.withDependByHostToHostDependency; query != nil { + if err := hq.loadDependByHostToHostDependency(ctx, query, nodes, + func(n *Host) { n.Edges.DependByHostToHostDependency = []*HostDependency{} }, + func(n *Host, e *HostDependency) { + n.Edges.DependByHostToHostDependency = append(n.Edges.DependByHostToHostDependency, e) + }); err != nil { + return nil, err } } - - if query := hq.withHostToEnvironment; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*Host) - for i := range nodes { - if nodes[i].environment_environment_to_host == nil { - continue - } - fk := *nodes[i].environment_environment_to_host - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + for name, query := range hq.withNamedHostToUser { + if err := hq.loadHostToUser(ctx, query, nodes, + func(n *Host) { n.appendNamedHostToUser(name) }, + func(n *Host, e *User) { n.appendNamedHostToUser(name, e) }); err != nil { + return nil, err } - query.Where(environment.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + } + for name, query := range hq.withNamedHostToIncludedNetwork { + if err := hq.loadHostToIncludedNetwork(ctx, query, nodes, + func(n *Host) { n.appendNamedHostToIncludedNetwork(name) }, + func(n *Host, e *IncludedNetwork) { n.appendNamedHostToIncludedNetwork(name, e) }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "environment_environment_to_host" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.HostToEnvironment = n - } + } + for name, query := range hq.withNamedDependOnHostToHostDependency { + if err := hq.loadDependOnHostToHostDependency(ctx, query, nodes, + func(n *Host) { n.appendNamedDependOnHostToHostDependency(name) }, + func(n *Host, e *HostDependency) { n.appendNamedDependOnHostToHostDependency(name, e) }); err != nil { + return nil, err + } + } + for name, query := range hq.withNamedDependByHostToHostDependency { + if err := hq.loadDependByHostToHostDependency(ctx, query, nodes, + func(n *Host) { n.appendNamedDependByHostToHostDependency(name) }, + func(n *Host, e *HostDependency) { n.appendNamedDependByHostToHostDependency(name, e) }); err != nil { + return nil, err } } + for i := range hq.loadTotal { + if err := hq.loadTotal[i](ctx, nodes); err != nil { + return nil, err + } + } + return nodes, nil +} - if query := hq.withHostToIncludedNetwork; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - ids := make(map[uuid.UUID]*Host, len(nodes)) - for _, node := range nodes { - ids[node.ID] = node - fks = append(fks, node.ID) - node.Edges.HostToIncludedNetwork = []*IncludedNetwork{} +func (hq *HostQuery) loadHostToDisk(ctx context.Context, query *DiskQuery, nodes []*Host, init func(*Host), assign func(*Host, *Disk)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Host) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + } + query.withFKs = true + query.Where(predicate.Disk(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(host.HostToDiskColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.host_host_to_disk + if fk == nil { + return fmt.Errorf(`foreign-key "host_host_to_disk" is nil for node %v`, n.ID) } - var ( - edgeids []uuid.UUID - edges = make(map[uuid.UUID][]*Host) - ) - _spec := &sqlgraph.EdgeQuerySpec{ - Edge: &sqlgraph.EdgeSpec{ - Inverse: true, - Table: host.HostToIncludedNetworkTable, - Columns: host.HostToIncludedNetworkPrimaryKey, - }, - Predicate: func(s *sql.Selector) { - s.Where(sql.InValues(host.HostToIncludedNetworkPrimaryKey[1], fks...)) - }, - ScanValues: func() [2]interface{} { - return [2]interface{}{new(uuid.UUID), new(uuid.UUID)} - }, - Assign: func(out, in interface{}) error { - eout, ok := out.(*uuid.UUID) - if !ok || eout == nil { - return fmt.Errorf("unexpected id value for edge-out") - } - ein, ok := in.(*uuid.UUID) - if !ok || ein == nil { - return fmt.Errorf("unexpected id value for edge-in") - } - outValue := *eout - inValue := *ein - node, ok := ids[outValue] - if !ok { - return fmt.Errorf("unexpected node id in edges: %v", outValue) - } - if _, ok := edges[inValue]; !ok { - edgeids = append(edgeids, inValue) - } - edges[inValue] = append(edges[inValue], node) - return nil - }, + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "host_host_to_disk" returned %v for node %v`, *fk, n.ID) } - if err := sqlgraph.QueryEdges(ctx, hq.driver, _spec); err != nil { - return nil, fmt.Errorf(`query edges "HostToIncludedNetwork": %w`, err) + assign(node, n) + } + return nil +} +func (hq *HostQuery) loadHostToUser(ctx context.Context, query *UserQuery, nodes []*Host, init func(*Host), assign func(*Host, *User)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Host) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) } - query.Where(includednetwork.IDIn(edgeids...)) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + } + query.withFKs = true + query.Where(predicate.User(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(host.HostToUserColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.host_host_to_user + if fk == nil { + return fmt.Errorf(`foreign-key "host_host_to_user" is nil for node %v`, n.ID) } - for _, n := range neighbors { - nodes, ok := edges[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected "HostToIncludedNetwork" node returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.HostToIncludedNetwork = append(nodes[i].Edges.HostToIncludedNetwork, n) - } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "host_host_to_user" returned %v for node %v`, *fk, n.ID) } + assign(node, n) } - - if query := hq.withDependOnHostToHostDependency; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*Host) + return nil +} +func (hq *HostQuery) loadHostToEnvironment(ctx context.Context, query *EnvironmentQuery, nodes []*Host, init func(*Host), assign func(*Host, *Environment)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*Host) + for i := range nodes { + if nodes[i].environment_environment_to_host == nil { + continue + } + fk := *nodes[i].environment_environment_to_host + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(environment.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "environment_environment_to_host" returned %v`, n.ID) + } for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.DependOnHostToHostDependency = []*HostDependency{} + assign(nodes[i], n) } - query.withFKs = true - query.Where(predicate.HostDependency(func(s *sql.Selector) { - s.Where(sql.InValues(host.DependOnHostToHostDependencyColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + } + return nil +} +func (hq *HostQuery) loadHostToIncludedNetwork(ctx context.Context, query *IncludedNetworkQuery, nodes []*Host, init func(*Host), assign func(*Host, *IncludedNetwork)) error { + edgeIDs := make([]driver.Value, len(nodes)) + byID := make(map[uuid.UUID]*Host) + nids := make(map[uuid.UUID]map[*Host]struct{}) + for i, node := range nodes { + edgeIDs[i] = node.ID + byID[node.ID] = node + if init != nil { + init(node) } - for _, n := range neighbors { - fk := n.host_dependency_host_dependency_to_depend_on_host - if fk == nil { - return nil, fmt.Errorf(`foreign-key "host_dependency_host_dependency_to_depend_on_host" is nil for node %v`, n.ID) + } + query.Where(func(s *sql.Selector) { + joinT := sql.Table(host.HostToIncludedNetworkTable) + s.Join(joinT).On(s.C(includednetwork.FieldID), joinT.C(host.HostToIncludedNetworkPrimaryKey[0])) + s.Where(sql.InValues(joinT.C(host.HostToIncludedNetworkPrimaryKey[1]), edgeIDs...)) + columns := s.SelectedColumns() + s.Select(joinT.C(host.HostToIncludedNetworkPrimaryKey[1])) + s.AppendSelect(columns...) + s.SetDistinct(false) + }) + if err := query.prepareQuery(ctx); err != nil { + return err + } + qr := QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + return query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) { + assign := spec.Assign + values := spec.ScanValues + spec.ScanValues = func(columns []string) ([]any, error) { + values, err := values(columns[1:]) + if err != nil { + return nil, err + } + return append([]any{new(uuid.UUID)}, values...), nil } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "host_dependency_host_dependency_to_depend_on_host" returned %v for node %v`, *fk, n.ID) + spec.Assign = func(columns []string, values []any) error { + outValue := *values[0].(*uuid.UUID) + inValue := *values[1].(*uuid.UUID) + if nids[inValue] == nil { + nids[inValue] = map[*Host]struct{}{byID[outValue]: {}} + return assign(columns[1:], values[1:]) + } + nids[inValue][byID[outValue]] = struct{}{} + return nil } - node.Edges.DependOnHostToHostDependency = append(node.Edges.DependOnHostToHostDependency, n) + }) + }) + neighbors, err := withInterceptors[[]*IncludedNetwork](ctx, query, qr, query.inters) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nids[n.ID] + if !ok { + return fmt.Errorf(`unexpected "HostToIncludedNetwork" node returned %v`, n.ID) + } + for kn := range nodes { + assign(kn, n) } } - - if query := hq.withDependByHostToHostDependency; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*Host) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.DependByHostToHostDependency = []*HostDependency{} + return nil +} +func (hq *HostQuery) loadDependOnHostToHostDependency(ctx context.Context, query *HostDependencyQuery, nodes []*Host, init func(*Host), assign func(*Host, *HostDependency)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Host) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) } - query.withFKs = true - query.Where(predicate.HostDependency(func(s *sql.Selector) { - s.Where(sql.InValues(host.DependByHostToHostDependencyColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + } + query.withFKs = true + query.Where(predicate.HostDependency(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(host.DependOnHostToHostDependencyColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.host_dependency_host_dependency_to_depend_on_host + if fk == nil { + return fmt.Errorf(`foreign-key "host_dependency_host_dependency_to_depend_on_host" is nil for node %v`, n.ID) } - for _, n := range neighbors { - fk := n.host_dependency_host_dependency_to_depend_by_host - if fk == nil { - return nil, fmt.Errorf(`foreign-key "host_dependency_host_dependency_to_depend_by_host" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "host_dependency_host_dependency_to_depend_by_host" returned %v for node %v`, *fk, n.ID) - } - node.Edges.DependByHostToHostDependency = append(node.Edges.DependByHostToHostDependency, n) + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "host_dependency_host_dependency_to_depend_on_host" returned %v for node %v`, *fk, n.ID) } + assign(node, n) } - - return nodes, nil + return nil +} +func (hq *HostQuery) loadDependByHostToHostDependency(ctx context.Context, query *HostDependencyQuery, nodes []*Host, init func(*Host), assign func(*Host, *HostDependency)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Host) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.HostDependency(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(host.DependByHostToHostDependencyColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.host_dependency_host_dependency_to_depend_by_host + if fk == nil { + return fmt.Errorf(`foreign-key "host_dependency_host_dependency_to_depend_by_host" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "host_dependency_host_dependency_to_depend_by_host" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil } func (hq *HostQuery) sqlCount(ctx context.Context) (int, error) { _spec := hq.querySpec() - _spec.Node.Columns = hq.fields - if len(hq.fields) > 0 { - _spec.Unique = hq.unique != nil && *hq.unique + if len(hq.modifiers) > 0 { + _spec.Modifiers = hq.modifiers } - return sqlgraph.CountNodes(ctx, hq.driver, _spec) -} - -func (hq *HostQuery) sqlExist(ctx context.Context) (bool, error) { - n, err := hq.sqlCount(ctx) - if err != nil { - return false, fmt.Errorf("ent: check existence: %w", err) + _spec.Node.Columns = hq.ctx.Fields + if len(hq.ctx.Fields) > 0 { + _spec.Unique = hq.ctx.Unique != nil && *hq.ctx.Unique } - return n > 0, nil + return sqlgraph.CountNodes(ctx, hq.driver, _spec) } func (hq *HostQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: host.Table, - Columns: host.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: host.FieldID, - }, - }, - From: hq.sql, - Unique: true, - } - if unique := hq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(host.Table, host.Columns, sqlgraph.NewFieldSpec(host.FieldID, field.TypeUUID)) + _spec.From = hq.sql + if unique := hq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if hq.path != nil { + _spec.Unique = true } - if fields := hq.fields; len(fields) > 0 { + if fields := hq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, host.FieldID) for i := range fields { @@ -829,10 +927,10 @@ func (hq *HostQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := hq.limit; limit != nil { + if limit := hq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := hq.offset; offset != nil { + if offset := hq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := hq.order; len(ps) > 0 { @@ -848,7 +946,7 @@ func (hq *HostQuery) querySpec() *sqlgraph.QuerySpec { func (hq *HostQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(hq.driver.Dialect()) t1 := builder.Table(host.Table) - columns := hq.fields + columns := hq.ctx.Fields if len(columns) == 0 { columns = host.Columns } @@ -857,7 +955,7 @@ func (hq *HostQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = hq.sql selector.Select(selector.Columns(columns...)...) } - if hq.unique != nil && *hq.unique { + if hq.ctx.Unique != nil && *hq.ctx.Unique { selector.Distinct() } for _, p := range hq.predicates { @@ -866,498 +964,156 @@ func (hq *HostQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range hq.order { p(selector) } - if offset := hq.offset; offset != nil { + if offset := hq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := hq.limit; limit != nil { + if limit := hq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector } -// HostGroupBy is the group-by builder for Host entities. -type HostGroupBy struct { - config - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) -} - -// Aggregate adds the given aggregation functions to the group-by query. -func (hgb *HostGroupBy) Aggregate(fns ...AggregateFunc) *HostGroupBy { - hgb.fns = append(hgb.fns, fns...) - return hgb -} - -// Scan applies the group-by query and scans the result into the given value. -func (hgb *HostGroupBy) Scan(ctx context.Context, v interface{}) error { - query, err := hgb.path(ctx) - if err != nil { - return err - } - hgb.sql = query - return hgb.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (hgb *HostGroupBy) ScanX(ctx context.Context, v interface{}) { - if err := hgb.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from group-by. -// It is only allowed when executing a group-by query with one field. -func (hgb *HostGroupBy) Strings(ctx context.Context) ([]string, error) { - if len(hgb.fields) > 1 { - return nil, errors.New("ent: HostGroupBy.Strings is not achievable when grouping more than 1 field") - } - var v []string - if err := hgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (hgb *HostGroupBy) StringsX(ctx context.Context) []string { - v, err := hgb.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (hgb *HostGroupBy) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = hgb.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{host.Label} - default: - err = fmt.Errorf("ent: HostGroupBy.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (hgb *HostGroupBy) StringX(ctx context.Context) string { - v, err := hgb.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from group-by. -// It is only allowed when executing a group-by query with one field. -func (hgb *HostGroupBy) Ints(ctx context.Context) ([]int, error) { - if len(hgb.fields) > 1 { - return nil, errors.New("ent: HostGroupBy.Ints is not achievable when grouping more than 1 field") - } - var v []int - if err := hgb.Scan(ctx, &v); err != nil { - return nil, err +// WithNamedHostToUser tells the query-builder to eager-load the nodes that are connected to the "HostToUser" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (hq *HostQuery) WithNamedHostToUser(name string, opts ...func(*UserQuery)) *HostQuery { + query := (&UserClient{config: hq.config}).Query() + for _, opt := range opts { + opt(query) } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (hgb *HostGroupBy) IntsX(ctx context.Context) []int { - v, err := hgb.Ints(ctx) - if err != nil { - panic(err) + if hq.withNamedHostToUser == nil { + hq.withNamedHostToUser = make(map[string]*UserQuery) } - return v + hq.withNamedHostToUser[name] = query + return hq } -// Int returns a single int from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (hgb *HostGroupBy) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = hgb.Ints(ctx); err != nil { - return +// WithNamedHostToIncludedNetwork tells the query-builder to eager-load the nodes that are connected to the "HostToIncludedNetwork" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (hq *HostQuery) WithNamedHostToIncludedNetwork(name string, opts ...func(*IncludedNetworkQuery)) *HostQuery { + query := (&IncludedNetworkClient{config: hq.config}).Query() + for _, opt := range opts { + opt(query) } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{host.Label} - default: - err = fmt.Errorf("ent: HostGroupBy.Ints returned %d results when one was expected", len(v)) + if hq.withNamedHostToIncludedNetwork == nil { + hq.withNamedHostToIncludedNetwork = make(map[string]*IncludedNetworkQuery) } - return + hq.withNamedHostToIncludedNetwork[name] = query + return hq } -// IntX is like Int, but panics if an error occurs. -func (hgb *HostGroupBy) IntX(ctx context.Context) int { - v, err := hgb.Int(ctx) - if err != nil { - panic(err) +// WithNamedDependOnHostToHostDependency tells the query-builder to eager-load the nodes that are connected to the "DependOnHostToHostDependency" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (hq *HostQuery) WithNamedDependOnHostToHostDependency(name string, opts ...func(*HostDependencyQuery)) *HostQuery { + query := (&HostDependencyClient{config: hq.config}).Query() + for _, opt := range opts { + opt(query) } - return v -} - -// Float64s returns list of float64s from group-by. -// It is only allowed when executing a group-by query with one field. -func (hgb *HostGroupBy) Float64s(ctx context.Context) ([]float64, error) { - if len(hgb.fields) > 1 { - return nil, errors.New("ent: HostGroupBy.Float64s is not achievable when grouping more than 1 field") - } - var v []float64 - if err := hgb.Scan(ctx, &v); err != nil { - return nil, err + if hq.withNamedDependOnHostToHostDependency == nil { + hq.withNamedDependOnHostToHostDependency = make(map[string]*HostDependencyQuery) } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (hgb *HostGroupBy) Float64sX(ctx context.Context) []float64 { - v, err := hgb.Float64s(ctx) - if err != nil { - panic(err) - } - return v + hq.withNamedDependOnHostToHostDependency[name] = query + return hq } -// Float64 returns a single float64 from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (hgb *HostGroupBy) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = hgb.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{host.Label} - default: - err = fmt.Errorf("ent: HostGroupBy.Float64s returned %d results when one was expected", len(v)) +// WithNamedDependByHostToHostDependency tells the query-builder to eager-load the nodes that are connected to the "DependByHostToHostDependency" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (hq *HostQuery) WithNamedDependByHostToHostDependency(name string, opts ...func(*HostDependencyQuery)) *HostQuery { + query := (&HostDependencyClient{config: hq.config}).Query() + for _, opt := range opts { + opt(query) } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (hgb *HostGroupBy) Float64X(ctx context.Context) float64 { - v, err := hgb.Float64(ctx) - if err != nil { - panic(err) + if hq.withNamedDependByHostToHostDependency == nil { + hq.withNamedDependByHostToHostDependency = make(map[string]*HostDependencyQuery) } - return v + hq.withNamedDependByHostToHostDependency[name] = query + return hq } -// Bools returns list of bools from group-by. -// It is only allowed when executing a group-by query with one field. -func (hgb *HostGroupBy) Bools(ctx context.Context) ([]bool, error) { - if len(hgb.fields) > 1 { - return nil, errors.New("ent: HostGroupBy.Bools is not achievable when grouping more than 1 field") - } - var v []bool - if err := hgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil +// HostGroupBy is the group-by builder for Host entities. +type HostGroupBy struct { + selector + build *HostQuery } -// BoolsX is like Bools, but panics if an error occurs. -func (hgb *HostGroupBy) BoolsX(ctx context.Context) []bool { - v, err := hgb.Bools(ctx) - if err != nil { - panic(err) - } - return v +// Aggregate adds the given aggregation functions to the group-by query. +func (hgb *HostGroupBy) Aggregate(fns ...AggregateFunc) *HostGroupBy { + hgb.fns = append(hgb.fns, fns...) + return hgb } -// Bool returns a single bool from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (hgb *HostGroupBy) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = hgb.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{host.Label} - default: - err = fmt.Errorf("ent: HostGroupBy.Bools returned %d results when one was expected", len(v)) +// Scan applies the selector query and scans the result into the given value. +func (hgb *HostGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, hgb.build.ctx, "GroupBy") + if err := hgb.build.prepareQuery(ctx); err != nil { + return err } - return + return scanWithInterceptors[*HostQuery, *HostGroupBy](ctx, hgb.build, hgb, hgb.build.inters, v) } -// BoolX is like Bool, but panics if an error occurs. -func (hgb *HostGroupBy) BoolX(ctx context.Context) bool { - v, err := hgb.Bool(ctx) - if err != nil { - panic(err) +func (hgb *HostGroupBy) sqlScan(ctx context.Context, root *HostQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(hgb.fns)) + for _, fn := range hgb.fns { + aggregation = append(aggregation, fn(selector)) } - return v -} - -func (hgb *HostGroupBy) sqlScan(ctx context.Context, v interface{}) error { - for _, f := range hgb.fields { - if !host.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*hgb.flds)+len(hgb.fns)) + for _, f := range *hgb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := hgb.sqlQuery() + selector.GroupBy(selector.Columns(*hgb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := hgb.driver.Query(ctx, query, args, rows); err != nil { + if err := hgb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (hgb *HostGroupBy) sqlQuery() *sql.Selector { - selector := hgb.sql.Select() - aggregation := make([]string, 0, len(hgb.fns)) - for _, fn := range hgb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(hgb.fields)+len(hgb.fns)) - for _, f := range hgb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(hgb.fields...)...) -} - // HostSelect is the builder for selecting fields of Host entities. type HostSelect struct { *HostQuery - // intermediate query (i.e. traversal path). - sql *sql.Selector + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (hs *HostSelect) Aggregate(fns ...AggregateFunc) *HostSelect { + hs.fns = append(hs.fns, fns...) + return hs } // Scan applies the selector query and scans the result into the given value. -func (hs *HostSelect) Scan(ctx context.Context, v interface{}) error { +func (hs *HostSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, hs.ctx, "Select") if err := hs.prepareQuery(ctx); err != nil { return err } - hs.sql = hs.HostQuery.sqlQuery(ctx) - return hs.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (hs *HostSelect) ScanX(ctx context.Context, v interface{}) { - if err := hs.Scan(ctx, v); err != nil { - panic(err) - } + return scanWithInterceptors[*HostQuery, *HostSelect](ctx, hs.HostQuery, hs, hs.inters, v) } -// Strings returns list of strings from a selector. It is only allowed when selecting one field. -func (hs *HostSelect) Strings(ctx context.Context) ([]string, error) { - if len(hs.fields) > 1 { - return nil, errors.New("ent: HostSelect.Strings is not achievable when selecting more than 1 field") - } - var v []string - if err := hs.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (hs *HostSelect) StringsX(ctx context.Context) []string { - v, err := hs.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a selector. It is only allowed when selecting one field. -func (hs *HostSelect) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = hs.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{host.Label} - default: - err = fmt.Errorf("ent: HostSelect.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (hs *HostSelect) StringX(ctx context.Context) string { - v, err := hs.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from a selector. It is only allowed when selecting one field. -func (hs *HostSelect) Ints(ctx context.Context) ([]int, error) { - if len(hs.fields) > 1 { - return nil, errors.New("ent: HostSelect.Ints is not achievable when selecting more than 1 field") - } - var v []int - if err := hs.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (hs *HostSelect) IntsX(ctx context.Context) []int { - v, err := hs.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a selector. It is only allowed when selecting one field. -func (hs *HostSelect) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = hs.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{host.Label} - default: - err = fmt.Errorf("ent: HostSelect.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (hs *HostSelect) IntX(ctx context.Context) int { - v, err := hs.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. -func (hs *HostSelect) Float64s(ctx context.Context) ([]float64, error) { - if len(hs.fields) > 1 { - return nil, errors.New("ent: HostSelect.Float64s is not achievable when selecting more than 1 field") - } - var v []float64 - if err := hs.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (hs *HostSelect) Float64sX(ctx context.Context) []float64 { - v, err := hs.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. -func (hs *HostSelect) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = hs.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{host.Label} - default: - err = fmt.Errorf("ent: HostSelect.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (hs *HostSelect) Float64X(ctx context.Context) float64 { - v, err := hs.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from a selector. It is only allowed when selecting one field. -func (hs *HostSelect) Bools(ctx context.Context) ([]bool, error) { - if len(hs.fields) > 1 { - return nil, errors.New("ent: HostSelect.Bools is not achievable when selecting more than 1 field") - } - var v []bool - if err := hs.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (hs *HostSelect) BoolsX(ctx context.Context) []bool { - v, err := hs.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a selector. It is only allowed when selecting one field. -func (hs *HostSelect) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = hs.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{host.Label} - default: - err = fmt.Errorf("ent: HostSelect.Bools returned %d results when one was expected", len(v)) +func (hs *HostSelect) sqlScan(ctx context.Context, root *HostQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(hs.fns)) + for _, fn := range hs.fns { + aggregation = append(aggregation, fn(selector)) } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (hs *HostSelect) BoolX(ctx context.Context) bool { - v, err := hs.Bool(ctx) - if err != nil { - panic(err) + switch n := len(*hs.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) } - return v -} - -func (hs *HostSelect) sqlScan(ctx context.Context, v interface{}) error { rows := &sql.Rows{} - query, args := hs.sql.Query() + query, args := selector.Query() if err := hs.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/ent/host_update.go b/ent/host_update.go index aad94e2f..d908e684 100755 --- a/ent/host_update.go +++ b/ent/host_update.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -9,6 +9,7 @@ import ( "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/dialect/sql/sqljson" "entgo.io/ent/schema/field" "github.com/gen0cide/laforge/ent/disk" "github.com/gen0cide/laforge/ent/environment" @@ -33,9 +34,17 @@ func (hu *HostUpdate) Where(ps ...predicate.Host) *HostUpdate { return hu } -// SetHclID sets the "hcl_id" field. -func (hu *HostUpdate) SetHclID(s string) *HostUpdate { - hu.mutation.SetHclID(s) +// SetHCLID sets the "hcl_id" field. +func (hu *HostUpdate) SetHCLID(s string) *HostUpdate { + hu.mutation.SetHCLID(s) + return hu +} + +// SetNillableHCLID sets the "hcl_id" field if the given value is not nil. +func (hu *HostUpdate) SetNillableHCLID(s *string) *HostUpdate { + if s != nil { + hu.SetHCLID(*s) + } return hu } @@ -45,18 +54,42 @@ func (hu *HostUpdate) SetHostname(s string) *HostUpdate { return hu } +// SetNillableHostname sets the "hostname" field if the given value is not nil. +func (hu *HostUpdate) SetNillableHostname(s *string) *HostUpdate { + if s != nil { + hu.SetHostname(*s) + } + return hu +} + // SetDescription sets the "description" field. func (hu *HostUpdate) SetDescription(s string) *HostUpdate { hu.mutation.SetDescription(s) return hu } +// SetNillableDescription sets the "description" field if the given value is not nil. +func (hu *HostUpdate) SetNillableDescription(s *string) *HostUpdate { + if s != nil { + hu.SetDescription(*s) + } + return hu +} + // SetOS sets the "OS" field. func (hu *HostUpdate) SetOS(s string) *HostUpdate { hu.mutation.SetOS(s) return hu } +// SetNillableOS sets the "OS" field if the given value is not nil. +func (hu *HostUpdate) SetNillableOS(s *string) *HostUpdate { + if s != nil { + hu.SetOS(*s) + } + return hu +} + // SetLastOctet sets the "last_octet" field. func (hu *HostUpdate) SetLastOctet(i int) *HostUpdate { hu.mutation.ResetLastOctet() @@ -64,6 +97,14 @@ func (hu *HostUpdate) SetLastOctet(i int) *HostUpdate { return hu } +// SetNillableLastOctet sets the "last_octet" field if the given value is not nil. +func (hu *HostUpdate) SetNillableLastOctet(i *int) *HostUpdate { + if i != nil { + hu.SetLastOctet(*i) + } + return hu +} + // AddLastOctet adds i to the "last_octet" field. func (hu *HostUpdate) AddLastOctet(i int) *HostUpdate { hu.mutation.AddLastOctet(i) @@ -76,30 +117,66 @@ func (hu *HostUpdate) SetInstanceSize(s string) *HostUpdate { return hu } +// SetNillableInstanceSize sets the "instance_size" field if the given value is not nil. +func (hu *HostUpdate) SetNillableInstanceSize(s *string) *HostUpdate { + if s != nil { + hu.SetInstanceSize(*s) + } + return hu +} + // SetAllowMACChanges sets the "allow_mac_changes" field. func (hu *HostUpdate) SetAllowMACChanges(b bool) *HostUpdate { hu.mutation.SetAllowMACChanges(b) return hu } +// SetNillableAllowMACChanges sets the "allow_mac_changes" field if the given value is not nil. +func (hu *HostUpdate) SetNillableAllowMACChanges(b *bool) *HostUpdate { + if b != nil { + hu.SetAllowMACChanges(*b) + } + return hu +} + // SetExposedTCPPorts sets the "exposed_tcp_ports" field. func (hu *HostUpdate) SetExposedTCPPorts(s []string) *HostUpdate { hu.mutation.SetExposedTCPPorts(s) return hu } +// AppendExposedTCPPorts appends s to the "exposed_tcp_ports" field. +func (hu *HostUpdate) AppendExposedTCPPorts(s []string) *HostUpdate { + hu.mutation.AppendExposedTCPPorts(s) + return hu +} + // SetExposedUDPPorts sets the "exposed_udp_ports" field. func (hu *HostUpdate) SetExposedUDPPorts(s []string) *HostUpdate { hu.mutation.SetExposedUDPPorts(s) return hu } +// AppendExposedUDPPorts appends s to the "exposed_udp_ports" field. +func (hu *HostUpdate) AppendExposedUDPPorts(s []string) *HostUpdate { + hu.mutation.AppendExposedUDPPorts(s) + return hu +} + // SetOverridePassword sets the "override_password" field. func (hu *HostUpdate) SetOverridePassword(s string) *HostUpdate { hu.mutation.SetOverridePassword(s) return hu } +// SetNillableOverridePassword sets the "override_password" field if the given value is not nil. +func (hu *HostUpdate) SetNillableOverridePassword(s *string) *HostUpdate { + if s != nil { + hu.SetOverridePassword(*s) + } + return hu +} + // SetVars sets the "vars" field. func (hu *HostUpdate) SetVars(m map[string]string) *HostUpdate { hu.mutation.SetVars(m) @@ -112,12 +189,24 @@ func (hu *HostUpdate) SetUserGroups(s []string) *HostUpdate { return hu } +// AppendUserGroups appends s to the "user_groups" field. +func (hu *HostUpdate) AppendUserGroups(s []string) *HostUpdate { + hu.mutation.AppendUserGroups(s) + return hu +} + // SetProvisionSteps sets the "provision_steps" field. func (hu *HostUpdate) SetProvisionSteps(s []string) *HostUpdate { hu.mutation.SetProvisionSteps(s) return hu } +// AppendProvisionSteps appends s to the "provision_steps" field. +func (hu *HostUpdate) AppendProvisionSteps(s []string) *HostUpdate { + hu.mutation.AppendProvisionSteps(s) + return hu +} + // ClearProvisionSteps clears the value of the "provision_steps" field. func (hu *HostUpdate) ClearProvisionSteps() *HostUpdate { hu.mutation.ClearProvisionSteps() @@ -331,34 +420,7 @@ func (hu *HostUpdate) RemoveDependByHostToHostDependency(h ...*HostDependency) * // Save executes the query and returns the number of nodes affected by the update operation. func (hu *HostUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(hu.hooks) == 0 { - affected, err = hu.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*HostMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - hu.mutation = mutation - affected, err = hu.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(hu.hooks) - 1; i >= 0; i-- { - if hu.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = hu.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, hu.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, hu.sqlSave, hu.mutation, hu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -384,16 +446,7 @@ func (hu *HostUpdate) ExecX(ctx context.Context) { } func (hu *HostUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: host.Table, - Columns: host.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: host.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(host.Table, host.Columns, sqlgraph.NewFieldSpec(host.FieldID, field.TypeUUID)) if ps := hu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -401,116 +454,73 @@ func (hu *HostUpdate) sqlSave(ctx context.Context) (n int, err error) { } } } - if value, ok := hu.mutation.HclID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: host.FieldHclID, - }) + if value, ok := hu.mutation.HCLID(); ok { + _spec.SetField(host.FieldHCLID, field.TypeString, value) } if value, ok := hu.mutation.Hostname(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: host.FieldHostname, - }) + _spec.SetField(host.FieldHostname, field.TypeString, value) } if value, ok := hu.mutation.Description(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: host.FieldDescription, - }) + _spec.SetField(host.FieldDescription, field.TypeString, value) } if value, ok := hu.mutation.OS(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: host.FieldOS, - }) + _spec.SetField(host.FieldOS, field.TypeString, value) } if value, ok := hu.mutation.LastOctet(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: host.FieldLastOctet, - }) + _spec.SetField(host.FieldLastOctet, field.TypeInt, value) } if value, ok := hu.mutation.AddedLastOctet(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: host.FieldLastOctet, - }) + _spec.AddField(host.FieldLastOctet, field.TypeInt, value) } if value, ok := hu.mutation.InstanceSize(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: host.FieldInstanceSize, - }) + _spec.SetField(host.FieldInstanceSize, field.TypeString, value) } if value, ok := hu.mutation.AllowMACChanges(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: host.FieldAllowMACChanges, - }) + _spec.SetField(host.FieldAllowMACChanges, field.TypeBool, value) } if value, ok := hu.mutation.ExposedTCPPorts(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: host.FieldExposedTCPPorts, + _spec.SetField(host.FieldExposedTCPPorts, field.TypeJSON, value) + } + if value, ok := hu.mutation.AppendedExposedTCPPorts(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, host.FieldExposedTCPPorts, value) }) } if value, ok := hu.mutation.ExposedUDPPorts(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: host.FieldExposedUDPPorts, + _spec.SetField(host.FieldExposedUDPPorts, field.TypeJSON, value) + } + if value, ok := hu.mutation.AppendedExposedUDPPorts(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, host.FieldExposedUDPPorts, value) }) } if value, ok := hu.mutation.OverridePassword(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: host.FieldOverridePassword, - }) + _spec.SetField(host.FieldOverridePassword, field.TypeString, value) } if value, ok := hu.mutation.Vars(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: host.FieldVars, - }) + _spec.SetField(host.FieldVars, field.TypeJSON, value) } if value, ok := hu.mutation.UserGroups(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: host.FieldUserGroups, + _spec.SetField(host.FieldUserGroups, field.TypeJSON, value) + } + if value, ok := hu.mutation.AppendedUserGroups(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, host.FieldUserGroups, value) }) } if value, ok := hu.mutation.ProvisionSteps(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: host.FieldProvisionSteps, + _spec.SetField(host.FieldProvisionSteps, field.TypeJSON, value) + } + if value, ok := hu.mutation.AppendedProvisionSteps(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, host.FieldProvisionSteps, value) }) } if hu.mutation.ProvisionStepsCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Column: host.FieldProvisionSteps, - }) + _spec.ClearField(host.FieldProvisionSteps, field.TypeJSON) } if value, ok := hu.mutation.Tags(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: host.FieldTags, - }) + _spec.SetField(host.FieldTags, field.TypeJSON, value) } if hu.mutation.HostToDiskCleared() { edge := &sqlgraph.EdgeSpec{ @@ -520,10 +530,7 @@ func (hu *HostUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{host.HostToDiskColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: disk.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(disk.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -536,10 +543,7 @@ func (hu *HostUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{host.HostToDiskColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: disk.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(disk.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -555,10 +559,7 @@ func (hu *HostUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{host.HostToUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -571,10 +572,7 @@ func (hu *HostUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{host.HostToUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -590,10 +588,7 @@ func (hu *HostUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{host.HostToUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -609,10 +604,7 @@ func (hu *HostUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{host.HostToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -625,10 +617,7 @@ func (hu *HostUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{host.HostToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -644,10 +633,7 @@ func (hu *HostUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: host.HostToIncludedNetworkPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: includednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(includednetwork.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -660,10 +646,7 @@ func (hu *HostUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: host.HostToIncludedNetworkPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: includednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(includednetwork.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -679,10 +662,7 @@ func (hu *HostUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: host.HostToIncludedNetworkPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: includednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(includednetwork.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -698,10 +678,7 @@ func (hu *HostUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{host.DependOnHostToHostDependencyColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: hostdependency.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(hostdependency.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -714,10 +691,7 @@ func (hu *HostUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{host.DependOnHostToHostDependencyColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: hostdependency.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(hostdependency.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -733,10 +707,7 @@ func (hu *HostUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{host.DependOnHostToHostDependencyColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: hostdependency.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(hostdependency.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -752,10 +723,7 @@ func (hu *HostUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{host.DependByHostToHostDependencyColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: hostdependency.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(hostdependency.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -768,10 +736,7 @@ func (hu *HostUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{host.DependByHostToHostDependencyColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: hostdependency.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(hostdependency.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -787,10 +752,7 @@ func (hu *HostUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{host.DependByHostToHostDependencyColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: hostdependency.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(hostdependency.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -802,10 +764,11 @@ func (hu *HostUpdate) sqlSave(ctx context.Context) (n int, err error) { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{host.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return 0, err } + hu.mutation.done = true return n, nil } @@ -817,9 +780,17 @@ type HostUpdateOne struct { mutation *HostMutation } -// SetHclID sets the "hcl_id" field. -func (huo *HostUpdateOne) SetHclID(s string) *HostUpdateOne { - huo.mutation.SetHclID(s) +// SetHCLID sets the "hcl_id" field. +func (huo *HostUpdateOne) SetHCLID(s string) *HostUpdateOne { + huo.mutation.SetHCLID(s) + return huo +} + +// SetNillableHCLID sets the "hcl_id" field if the given value is not nil. +func (huo *HostUpdateOne) SetNillableHCLID(s *string) *HostUpdateOne { + if s != nil { + huo.SetHCLID(*s) + } return huo } @@ -829,18 +800,42 @@ func (huo *HostUpdateOne) SetHostname(s string) *HostUpdateOne { return huo } +// SetNillableHostname sets the "hostname" field if the given value is not nil. +func (huo *HostUpdateOne) SetNillableHostname(s *string) *HostUpdateOne { + if s != nil { + huo.SetHostname(*s) + } + return huo +} + // SetDescription sets the "description" field. func (huo *HostUpdateOne) SetDescription(s string) *HostUpdateOne { huo.mutation.SetDescription(s) return huo } +// SetNillableDescription sets the "description" field if the given value is not nil. +func (huo *HostUpdateOne) SetNillableDescription(s *string) *HostUpdateOne { + if s != nil { + huo.SetDescription(*s) + } + return huo +} + // SetOS sets the "OS" field. func (huo *HostUpdateOne) SetOS(s string) *HostUpdateOne { huo.mutation.SetOS(s) return huo } +// SetNillableOS sets the "OS" field if the given value is not nil. +func (huo *HostUpdateOne) SetNillableOS(s *string) *HostUpdateOne { + if s != nil { + huo.SetOS(*s) + } + return huo +} + // SetLastOctet sets the "last_octet" field. func (huo *HostUpdateOne) SetLastOctet(i int) *HostUpdateOne { huo.mutation.ResetLastOctet() @@ -848,6 +843,14 @@ func (huo *HostUpdateOne) SetLastOctet(i int) *HostUpdateOne { return huo } +// SetNillableLastOctet sets the "last_octet" field if the given value is not nil. +func (huo *HostUpdateOne) SetNillableLastOctet(i *int) *HostUpdateOne { + if i != nil { + huo.SetLastOctet(*i) + } + return huo +} + // AddLastOctet adds i to the "last_octet" field. func (huo *HostUpdateOne) AddLastOctet(i int) *HostUpdateOne { huo.mutation.AddLastOctet(i) @@ -860,30 +863,66 @@ func (huo *HostUpdateOne) SetInstanceSize(s string) *HostUpdateOne { return huo } +// SetNillableInstanceSize sets the "instance_size" field if the given value is not nil. +func (huo *HostUpdateOne) SetNillableInstanceSize(s *string) *HostUpdateOne { + if s != nil { + huo.SetInstanceSize(*s) + } + return huo +} + // SetAllowMACChanges sets the "allow_mac_changes" field. func (huo *HostUpdateOne) SetAllowMACChanges(b bool) *HostUpdateOne { huo.mutation.SetAllowMACChanges(b) return huo } +// SetNillableAllowMACChanges sets the "allow_mac_changes" field if the given value is not nil. +func (huo *HostUpdateOne) SetNillableAllowMACChanges(b *bool) *HostUpdateOne { + if b != nil { + huo.SetAllowMACChanges(*b) + } + return huo +} + // SetExposedTCPPorts sets the "exposed_tcp_ports" field. func (huo *HostUpdateOne) SetExposedTCPPorts(s []string) *HostUpdateOne { huo.mutation.SetExposedTCPPorts(s) return huo } +// AppendExposedTCPPorts appends s to the "exposed_tcp_ports" field. +func (huo *HostUpdateOne) AppendExposedTCPPorts(s []string) *HostUpdateOne { + huo.mutation.AppendExposedTCPPorts(s) + return huo +} + // SetExposedUDPPorts sets the "exposed_udp_ports" field. func (huo *HostUpdateOne) SetExposedUDPPorts(s []string) *HostUpdateOne { huo.mutation.SetExposedUDPPorts(s) return huo } +// AppendExposedUDPPorts appends s to the "exposed_udp_ports" field. +func (huo *HostUpdateOne) AppendExposedUDPPorts(s []string) *HostUpdateOne { + huo.mutation.AppendExposedUDPPorts(s) + return huo +} + // SetOverridePassword sets the "override_password" field. func (huo *HostUpdateOne) SetOverridePassword(s string) *HostUpdateOne { huo.mutation.SetOverridePassword(s) return huo } +// SetNillableOverridePassword sets the "override_password" field if the given value is not nil. +func (huo *HostUpdateOne) SetNillableOverridePassword(s *string) *HostUpdateOne { + if s != nil { + huo.SetOverridePassword(*s) + } + return huo +} + // SetVars sets the "vars" field. func (huo *HostUpdateOne) SetVars(m map[string]string) *HostUpdateOne { huo.mutation.SetVars(m) @@ -896,12 +935,24 @@ func (huo *HostUpdateOne) SetUserGroups(s []string) *HostUpdateOne { return huo } +// AppendUserGroups appends s to the "user_groups" field. +func (huo *HostUpdateOne) AppendUserGroups(s []string) *HostUpdateOne { + huo.mutation.AppendUserGroups(s) + return huo +} + // SetProvisionSteps sets the "provision_steps" field. func (huo *HostUpdateOne) SetProvisionSteps(s []string) *HostUpdateOne { huo.mutation.SetProvisionSteps(s) return huo } +// AppendProvisionSteps appends s to the "provision_steps" field. +func (huo *HostUpdateOne) AppendProvisionSteps(s []string) *HostUpdateOne { + huo.mutation.AppendProvisionSteps(s) + return huo +} + // ClearProvisionSteps clears the value of the "provision_steps" field. func (huo *HostUpdateOne) ClearProvisionSteps() *HostUpdateOne { huo.mutation.ClearProvisionSteps() @@ -1113,6 +1164,12 @@ func (huo *HostUpdateOne) RemoveDependByHostToHostDependency(h ...*HostDependenc return huo.RemoveDependByHostToHostDependencyIDs(ids...) } +// Where appends a list predicates to the HostUpdate builder. +func (huo *HostUpdateOne) Where(ps ...predicate.Host) *HostUpdateOne { + huo.mutation.Where(ps...) + return huo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (huo *HostUpdateOne) Select(field string, fields ...string) *HostUpdateOne { @@ -1122,34 +1179,7 @@ func (huo *HostUpdateOne) Select(field string, fields ...string) *HostUpdateOne // Save executes the query and returns the updated Host entity. func (huo *HostUpdateOne) Save(ctx context.Context) (*Host, error) { - var ( - err error - node *Host - ) - if len(huo.hooks) == 0 { - node, err = huo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*HostMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - huo.mutation = mutation - node, err = huo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(huo.hooks) - 1; i >= 0; i-- { - if huo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = huo.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, huo.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, huo.sqlSave, huo.mutation, huo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -1175,16 +1205,7 @@ func (huo *HostUpdateOne) ExecX(ctx context.Context) { } func (huo *HostUpdateOne) sqlSave(ctx context.Context) (_node *Host, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: host.Table, - Columns: host.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: host.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(host.Table, host.Columns, sqlgraph.NewFieldSpec(host.FieldID, field.TypeUUID)) id, ok := huo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Host.id" for update`)} @@ -1209,116 +1230,73 @@ func (huo *HostUpdateOne) sqlSave(ctx context.Context) (_node *Host, err error) } } } - if value, ok := huo.mutation.HclID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: host.FieldHclID, - }) + if value, ok := huo.mutation.HCLID(); ok { + _spec.SetField(host.FieldHCLID, field.TypeString, value) } if value, ok := huo.mutation.Hostname(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: host.FieldHostname, - }) + _spec.SetField(host.FieldHostname, field.TypeString, value) } if value, ok := huo.mutation.Description(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: host.FieldDescription, - }) + _spec.SetField(host.FieldDescription, field.TypeString, value) } if value, ok := huo.mutation.OS(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: host.FieldOS, - }) + _spec.SetField(host.FieldOS, field.TypeString, value) } if value, ok := huo.mutation.LastOctet(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: host.FieldLastOctet, - }) + _spec.SetField(host.FieldLastOctet, field.TypeInt, value) } if value, ok := huo.mutation.AddedLastOctet(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: host.FieldLastOctet, - }) + _spec.AddField(host.FieldLastOctet, field.TypeInt, value) } if value, ok := huo.mutation.InstanceSize(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: host.FieldInstanceSize, - }) + _spec.SetField(host.FieldInstanceSize, field.TypeString, value) } if value, ok := huo.mutation.AllowMACChanges(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: host.FieldAllowMACChanges, - }) + _spec.SetField(host.FieldAllowMACChanges, field.TypeBool, value) } if value, ok := huo.mutation.ExposedTCPPorts(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: host.FieldExposedTCPPorts, + _spec.SetField(host.FieldExposedTCPPorts, field.TypeJSON, value) + } + if value, ok := huo.mutation.AppendedExposedTCPPorts(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, host.FieldExposedTCPPorts, value) }) } if value, ok := huo.mutation.ExposedUDPPorts(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: host.FieldExposedUDPPorts, + _spec.SetField(host.FieldExposedUDPPorts, field.TypeJSON, value) + } + if value, ok := huo.mutation.AppendedExposedUDPPorts(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, host.FieldExposedUDPPorts, value) }) } if value, ok := huo.mutation.OverridePassword(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: host.FieldOverridePassword, - }) + _spec.SetField(host.FieldOverridePassword, field.TypeString, value) } if value, ok := huo.mutation.Vars(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: host.FieldVars, - }) + _spec.SetField(host.FieldVars, field.TypeJSON, value) } if value, ok := huo.mutation.UserGroups(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: host.FieldUserGroups, + _spec.SetField(host.FieldUserGroups, field.TypeJSON, value) + } + if value, ok := huo.mutation.AppendedUserGroups(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, host.FieldUserGroups, value) }) } if value, ok := huo.mutation.ProvisionSteps(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: host.FieldProvisionSteps, + _spec.SetField(host.FieldProvisionSteps, field.TypeJSON, value) + } + if value, ok := huo.mutation.AppendedProvisionSteps(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, host.FieldProvisionSteps, value) }) } if huo.mutation.ProvisionStepsCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Column: host.FieldProvisionSteps, - }) + _spec.ClearField(host.FieldProvisionSteps, field.TypeJSON) } if value, ok := huo.mutation.Tags(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: host.FieldTags, - }) + _spec.SetField(host.FieldTags, field.TypeJSON, value) } if huo.mutation.HostToDiskCleared() { edge := &sqlgraph.EdgeSpec{ @@ -1328,10 +1306,7 @@ func (huo *HostUpdateOne) sqlSave(ctx context.Context) (_node *Host, err error) Columns: []string{host.HostToDiskColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: disk.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(disk.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1344,10 +1319,7 @@ func (huo *HostUpdateOne) sqlSave(ctx context.Context) (_node *Host, err error) Columns: []string{host.HostToDiskColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: disk.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(disk.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1363,10 +1335,7 @@ func (huo *HostUpdateOne) sqlSave(ctx context.Context) (_node *Host, err error) Columns: []string{host.HostToUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1379,10 +1348,7 @@ func (huo *HostUpdateOne) sqlSave(ctx context.Context) (_node *Host, err error) Columns: []string{host.HostToUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1398,10 +1364,7 @@ func (huo *HostUpdateOne) sqlSave(ctx context.Context) (_node *Host, err error) Columns: []string{host.HostToUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1417,10 +1380,7 @@ func (huo *HostUpdateOne) sqlSave(ctx context.Context) (_node *Host, err error) Columns: []string{host.HostToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1433,10 +1393,7 @@ func (huo *HostUpdateOne) sqlSave(ctx context.Context) (_node *Host, err error) Columns: []string{host.HostToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1452,10 +1409,7 @@ func (huo *HostUpdateOne) sqlSave(ctx context.Context) (_node *Host, err error) Columns: host.HostToIncludedNetworkPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: includednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(includednetwork.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1468,10 +1422,7 @@ func (huo *HostUpdateOne) sqlSave(ctx context.Context) (_node *Host, err error) Columns: host.HostToIncludedNetworkPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: includednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(includednetwork.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1487,10 +1438,7 @@ func (huo *HostUpdateOne) sqlSave(ctx context.Context) (_node *Host, err error) Columns: host.HostToIncludedNetworkPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: includednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(includednetwork.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1506,10 +1454,7 @@ func (huo *HostUpdateOne) sqlSave(ctx context.Context) (_node *Host, err error) Columns: []string{host.DependOnHostToHostDependencyColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: hostdependency.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(hostdependency.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1522,10 +1467,7 @@ func (huo *HostUpdateOne) sqlSave(ctx context.Context) (_node *Host, err error) Columns: []string{host.DependOnHostToHostDependencyColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: hostdependency.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(hostdependency.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1541,10 +1483,7 @@ func (huo *HostUpdateOne) sqlSave(ctx context.Context) (_node *Host, err error) Columns: []string{host.DependOnHostToHostDependencyColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: hostdependency.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(hostdependency.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1560,10 +1499,7 @@ func (huo *HostUpdateOne) sqlSave(ctx context.Context) (_node *Host, err error) Columns: []string{host.DependByHostToHostDependencyColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: hostdependency.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(hostdependency.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1576,10 +1512,7 @@ func (huo *HostUpdateOne) sqlSave(ctx context.Context) (_node *Host, err error) Columns: []string{host.DependByHostToHostDependencyColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: hostdependency.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(hostdependency.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1595,10 +1528,7 @@ func (huo *HostUpdateOne) sqlSave(ctx context.Context) (_node *Host, err error) Columns: []string{host.DependByHostToHostDependencyColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: hostdependency.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(hostdependency.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1613,9 +1543,10 @@ func (huo *HostUpdateOne) sqlSave(ctx context.Context) (_node *Host, err error) if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{host.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } + huo.mutation.done = true return _node, nil } diff --git a/ent/hostdependency.go b/ent/hostdependency.go index 2131e606..68df06fc 100755 --- a/ent/hostdependency.go +++ b/ent/hostdependency.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -6,6 +6,7 @@ import ( "fmt" "strings" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/gen0cide/laforge/ent/environment" "github.com/gen0cide/laforge/ent/host" @@ -27,6 +28,7 @@ type HostDependency struct { // The values are being populated by the HostDependencyQuery when eager-loading is set. Edges HostDependencyEdges `json:"edges"` + // vvvvvvvvvvvv CUSTOM vvvvvvvvvvvv // Edges put into the main struct to be loaded via hcl // HostDependencyToDependOnHost holds the value of the HostDependencyToDependOnHost edge. HCLHostDependencyToDependOnHost *Host `json:"HostDependencyToDependOnHost,omitempty"` @@ -36,11 +38,12 @@ type HostDependency struct { HCLHostDependencyToNetwork *Network `json:"HostDependencyToNetwork,omitempty"` // HostDependencyToEnvironment holds the value of the HostDependencyToEnvironment edge. HCLHostDependencyToEnvironment *Environment `json:"HostDependencyToEnvironment,omitempty"` - // + // ^^^^^^^^^^^^ CUSTOM ^^^^^^^^^^^^^ environment_environment_to_host_dependency *uuid.UUID host_dependency_host_dependency_to_depend_on_host *uuid.UUID host_dependency_host_dependency_to_depend_by_host *uuid.UUID host_dependency_host_dependency_to_network *uuid.UUID + selectValues sql.SelectValues } // HostDependencyEdges holds the relations/edges for other nodes in the graph. @@ -56,6 +59,8 @@ type HostDependencyEdges struct { // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. loadedTypes [4]bool + // totalCount holds the count of the edges above. + totalCount [4]map[string]int } // HostDependencyToDependOnHostOrErr returns the HostDependencyToDependOnHost value or an error if the edge @@ -63,8 +68,7 @@ type HostDependencyEdges struct { func (e HostDependencyEdges) HostDependencyToDependOnHostOrErr() (*Host, error) { if e.loadedTypes[0] { if e.HostDependencyToDependOnHost == nil { - // The edge HostDependencyToDependOnHost was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: host.Label} } return e.HostDependencyToDependOnHost, nil @@ -77,8 +81,7 @@ func (e HostDependencyEdges) HostDependencyToDependOnHostOrErr() (*Host, error) func (e HostDependencyEdges) HostDependencyToDependByHostOrErr() (*Host, error) { if e.loadedTypes[1] { if e.HostDependencyToDependByHost == nil { - // The edge HostDependencyToDependByHost was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: host.Label} } return e.HostDependencyToDependByHost, nil @@ -91,8 +94,7 @@ func (e HostDependencyEdges) HostDependencyToDependByHostOrErr() (*Host, error) func (e HostDependencyEdges) HostDependencyToNetworkOrErr() (*Network, error) { if e.loadedTypes[2] { if e.HostDependencyToNetwork == nil { - // The edge HostDependencyToNetwork was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: network.Label} } return e.HostDependencyToNetwork, nil @@ -105,8 +107,7 @@ func (e HostDependencyEdges) HostDependencyToNetworkOrErr() (*Network, error) { func (e HostDependencyEdges) HostDependencyToEnvironmentOrErr() (*Environment, error) { if e.loadedTypes[3] { if e.HostDependencyToEnvironment == nil { - // The edge HostDependencyToEnvironment was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: environment.Label} } return e.HostDependencyToEnvironment, nil @@ -115,8 +116,8 @@ func (e HostDependencyEdges) HostDependencyToEnvironmentOrErr() (*Environment, e } // scanValues returns the types for scanning values from sql.Rows. -func (*HostDependency) scanValues(columns []string) ([]interface{}, error) { - values := make([]interface{}, len(columns)) +func (*HostDependency) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) for i := range columns { switch columns[i] { case hostdependency.FieldHostID, hostdependency.FieldNetworkID: @@ -132,7 +133,7 @@ func (*HostDependency) scanValues(columns []string) ([]interface{}, error) { case hostdependency.ForeignKeys[3]: // host_dependency_host_dependency_to_network values[i] = &sql.NullScanner{S: new(uuid.UUID)} default: - return nil, fmt.Errorf("unexpected column %q for type HostDependency", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -140,7 +141,7 @@ func (*HostDependency) scanValues(columns []string) ([]interface{}, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the HostDependency fields. -func (hd *HostDependency) assignValues(columns []string, values []interface{}) error { +func (hd *HostDependency) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -192,46 +193,54 @@ func (hd *HostDependency) assignValues(columns []string, values []interface{}) e hd.host_dependency_host_dependency_to_network = new(uuid.UUID) *hd.host_dependency_host_dependency_to_network = *value.S.(*uuid.UUID) } + default: + hd.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the HostDependency. +// This includes values selected through modifiers, order, etc. +func (hd *HostDependency) Value(name string) (ent.Value, error) { + return hd.selectValues.Get(name) +} + // QueryHostDependencyToDependOnHost queries the "HostDependencyToDependOnHost" edge of the HostDependency entity. func (hd *HostDependency) QueryHostDependencyToDependOnHost() *HostQuery { - return (&HostDependencyClient{config: hd.config}).QueryHostDependencyToDependOnHost(hd) + return NewHostDependencyClient(hd.config).QueryHostDependencyToDependOnHost(hd) } // QueryHostDependencyToDependByHost queries the "HostDependencyToDependByHost" edge of the HostDependency entity. func (hd *HostDependency) QueryHostDependencyToDependByHost() *HostQuery { - return (&HostDependencyClient{config: hd.config}).QueryHostDependencyToDependByHost(hd) + return NewHostDependencyClient(hd.config).QueryHostDependencyToDependByHost(hd) } // QueryHostDependencyToNetwork queries the "HostDependencyToNetwork" edge of the HostDependency entity. func (hd *HostDependency) QueryHostDependencyToNetwork() *NetworkQuery { - return (&HostDependencyClient{config: hd.config}).QueryHostDependencyToNetwork(hd) + return NewHostDependencyClient(hd.config).QueryHostDependencyToNetwork(hd) } // QueryHostDependencyToEnvironment queries the "HostDependencyToEnvironment" edge of the HostDependency entity. func (hd *HostDependency) QueryHostDependencyToEnvironment() *EnvironmentQuery { - return (&HostDependencyClient{config: hd.config}).QueryHostDependencyToEnvironment(hd) + return NewHostDependencyClient(hd.config).QueryHostDependencyToEnvironment(hd) } // Update returns a builder for updating this HostDependency. // Note that you need to call HostDependency.Unwrap() before calling this method if this HostDependency // was returned from a transaction, and the transaction was committed or rolled back. func (hd *HostDependency) Update() *HostDependencyUpdateOne { - return (&HostDependencyClient{config: hd.config}).UpdateOne(hd) + return NewHostDependencyClient(hd.config).UpdateOne(hd) } // Unwrap unwraps the HostDependency entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. func (hd *HostDependency) Unwrap() *HostDependency { - tx, ok := hd.config.driver.(*txDriver) + _tx, ok := hd.config.driver.(*txDriver) if !ok { panic("ent: HostDependency is not a transactional entity") } - hd.config.driver = tx.drv + hd.config.driver = _tx.drv return hd } @@ -239,10 +248,11 @@ func (hd *HostDependency) Unwrap() *HostDependency { func (hd *HostDependency) String() string { var builder strings.Builder builder.WriteString("HostDependency(") - builder.WriteString(fmt.Sprintf("id=%v", hd.ID)) - builder.WriteString(", host_id=") + builder.WriteString(fmt.Sprintf("id=%v, ", hd.ID)) + builder.WriteString("host_id=") builder.WriteString(hd.HostID) - builder.WriteString(", network_id=") + builder.WriteString(", ") + builder.WriteString("network_id=") builder.WriteString(hd.NetworkID) builder.WriteByte(')') return builder.String() @@ -250,9 +260,3 @@ func (hd *HostDependency) String() string { // HostDependencies is a parsable slice of HostDependency. type HostDependencies []*HostDependency - -func (hd HostDependencies) config(cfg config) { - for _i := range hd { - hd[_i].config = cfg - } -} diff --git a/ent/hostdependency/hostdependency.go b/ent/hostdependency/hostdependency.go index bfbe6adb..bb3306b2 100755 --- a/ent/hostdependency/hostdependency.go +++ b/ent/hostdependency/hostdependency.go @@ -1,8 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package hostdependency import ( + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -90,3 +92,77 @@ var ( // DefaultID holds the default value on creation for the "id" field. DefaultID func() uuid.UUID ) + +// OrderOption defines the ordering options for the HostDependency queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByHostID orders the results by the host_id field. +func ByHostID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldHostID, opts...).ToFunc() +} + +// ByNetworkID orders the results by the network_id field. +func ByNetworkID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldNetworkID, opts...).ToFunc() +} + +// ByHostDependencyToDependOnHostField orders the results by HostDependencyToDependOnHost field. +func ByHostDependencyToDependOnHostField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newHostDependencyToDependOnHostStep(), sql.OrderByField(field, opts...)) + } +} + +// ByHostDependencyToDependByHostField orders the results by HostDependencyToDependByHost field. +func ByHostDependencyToDependByHostField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newHostDependencyToDependByHostStep(), sql.OrderByField(field, opts...)) + } +} + +// ByHostDependencyToNetworkField orders the results by HostDependencyToNetwork field. +func ByHostDependencyToNetworkField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newHostDependencyToNetworkStep(), sql.OrderByField(field, opts...)) + } +} + +// ByHostDependencyToEnvironmentField orders the results by HostDependencyToEnvironment field. +func ByHostDependencyToEnvironmentField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newHostDependencyToEnvironmentStep(), sql.OrderByField(field, opts...)) + } +} +func newHostDependencyToDependOnHostStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(HostDependencyToDependOnHostInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, HostDependencyToDependOnHostTable, HostDependencyToDependOnHostColumn), + ) +} +func newHostDependencyToDependByHostStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(HostDependencyToDependByHostInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, HostDependencyToDependByHostTable, HostDependencyToDependByHostColumn), + ) +} +func newHostDependencyToNetworkStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(HostDependencyToNetworkInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, HostDependencyToNetworkTable, HostDependencyToNetworkColumn), + ) +} +func newHostDependencyToEnvironmentStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(HostDependencyToEnvironmentInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, HostDependencyToEnvironmentTable, HostDependencyToEnvironmentColumn), + ) +} diff --git a/ent/hostdependency/where.go b/ent/hostdependency/where.go index 71b2a156..016afa10 100755 --- a/ent/hostdependency/where.go +++ b/ent/hostdependency/where.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package hostdependency @@ -11,321 +11,187 @@ import ( // ID filters vertices based on their ID field. func ID(id uuid.UUID) predicate.HostDependency { - return predicate.HostDependency(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.HostDependency(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id uuid.UUID) predicate.HostDependency { - return predicate.HostDependency(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.HostDependency(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id uuid.UUID) predicate.HostDependency { - return predicate.HostDependency(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.HostDependency(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...uuid.UUID) predicate.HostDependency { - return predicate.HostDependency(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.HostDependency(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...uuid.UUID) predicate.HostDependency { - return predicate.HostDependency(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.HostDependency(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id uuid.UUID) predicate.HostDependency { - return predicate.HostDependency(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.HostDependency(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id uuid.UUID) predicate.HostDependency { - return predicate.HostDependency(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.HostDependency(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id uuid.UUID) predicate.HostDependency { - return predicate.HostDependency(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.HostDependency(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id uuid.UUID) predicate.HostDependency { - return predicate.HostDependency(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.HostDependency(sql.FieldLTE(FieldID, id)) } // HostID applies equality check predicate on the "host_id" field. It's identical to HostIDEQ. func HostID(v string) predicate.HostDependency { - return predicate.HostDependency(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldHostID), v)) - }) + return predicate.HostDependency(sql.FieldEQ(FieldHostID, v)) } // NetworkID applies equality check predicate on the "network_id" field. It's identical to NetworkIDEQ. func NetworkID(v string) predicate.HostDependency { - return predicate.HostDependency(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldNetworkID), v)) - }) + return predicate.HostDependency(sql.FieldEQ(FieldNetworkID, v)) } // HostIDEQ applies the EQ predicate on the "host_id" field. func HostIDEQ(v string) predicate.HostDependency { - return predicate.HostDependency(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldHostID), v)) - }) + return predicate.HostDependency(sql.FieldEQ(FieldHostID, v)) } // HostIDNEQ applies the NEQ predicate on the "host_id" field. func HostIDNEQ(v string) predicate.HostDependency { - return predicate.HostDependency(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldHostID), v)) - }) + return predicate.HostDependency(sql.FieldNEQ(FieldHostID, v)) } // HostIDIn applies the In predicate on the "host_id" field. func HostIDIn(vs ...string) predicate.HostDependency { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.HostDependency(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldHostID), v...)) - }) + return predicate.HostDependency(sql.FieldIn(FieldHostID, vs...)) } // HostIDNotIn applies the NotIn predicate on the "host_id" field. func HostIDNotIn(vs ...string) predicate.HostDependency { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.HostDependency(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldHostID), v...)) - }) + return predicate.HostDependency(sql.FieldNotIn(FieldHostID, vs...)) } // HostIDGT applies the GT predicate on the "host_id" field. func HostIDGT(v string) predicate.HostDependency { - return predicate.HostDependency(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldHostID), v)) - }) + return predicate.HostDependency(sql.FieldGT(FieldHostID, v)) } // HostIDGTE applies the GTE predicate on the "host_id" field. func HostIDGTE(v string) predicate.HostDependency { - return predicate.HostDependency(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldHostID), v)) - }) + return predicate.HostDependency(sql.FieldGTE(FieldHostID, v)) } // HostIDLT applies the LT predicate on the "host_id" field. func HostIDLT(v string) predicate.HostDependency { - return predicate.HostDependency(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldHostID), v)) - }) + return predicate.HostDependency(sql.FieldLT(FieldHostID, v)) } // HostIDLTE applies the LTE predicate on the "host_id" field. func HostIDLTE(v string) predicate.HostDependency { - return predicate.HostDependency(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldHostID), v)) - }) + return predicate.HostDependency(sql.FieldLTE(FieldHostID, v)) } // HostIDContains applies the Contains predicate on the "host_id" field. func HostIDContains(v string) predicate.HostDependency { - return predicate.HostDependency(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldHostID), v)) - }) + return predicate.HostDependency(sql.FieldContains(FieldHostID, v)) } // HostIDHasPrefix applies the HasPrefix predicate on the "host_id" field. func HostIDHasPrefix(v string) predicate.HostDependency { - return predicate.HostDependency(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldHostID), v)) - }) + return predicate.HostDependency(sql.FieldHasPrefix(FieldHostID, v)) } // HostIDHasSuffix applies the HasSuffix predicate on the "host_id" field. func HostIDHasSuffix(v string) predicate.HostDependency { - return predicate.HostDependency(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldHostID), v)) - }) + return predicate.HostDependency(sql.FieldHasSuffix(FieldHostID, v)) } // HostIDEqualFold applies the EqualFold predicate on the "host_id" field. func HostIDEqualFold(v string) predicate.HostDependency { - return predicate.HostDependency(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldHostID), v)) - }) + return predicate.HostDependency(sql.FieldEqualFold(FieldHostID, v)) } // HostIDContainsFold applies the ContainsFold predicate on the "host_id" field. func HostIDContainsFold(v string) predicate.HostDependency { - return predicate.HostDependency(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldHostID), v)) - }) + return predicate.HostDependency(sql.FieldContainsFold(FieldHostID, v)) } // NetworkIDEQ applies the EQ predicate on the "network_id" field. func NetworkIDEQ(v string) predicate.HostDependency { - return predicate.HostDependency(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldNetworkID), v)) - }) + return predicate.HostDependency(sql.FieldEQ(FieldNetworkID, v)) } // NetworkIDNEQ applies the NEQ predicate on the "network_id" field. func NetworkIDNEQ(v string) predicate.HostDependency { - return predicate.HostDependency(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldNetworkID), v)) - }) + return predicate.HostDependency(sql.FieldNEQ(FieldNetworkID, v)) } // NetworkIDIn applies the In predicate on the "network_id" field. func NetworkIDIn(vs ...string) predicate.HostDependency { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.HostDependency(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldNetworkID), v...)) - }) + return predicate.HostDependency(sql.FieldIn(FieldNetworkID, vs...)) } // NetworkIDNotIn applies the NotIn predicate on the "network_id" field. func NetworkIDNotIn(vs ...string) predicate.HostDependency { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.HostDependency(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldNetworkID), v...)) - }) + return predicate.HostDependency(sql.FieldNotIn(FieldNetworkID, vs...)) } // NetworkIDGT applies the GT predicate on the "network_id" field. func NetworkIDGT(v string) predicate.HostDependency { - return predicate.HostDependency(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldNetworkID), v)) - }) + return predicate.HostDependency(sql.FieldGT(FieldNetworkID, v)) } // NetworkIDGTE applies the GTE predicate on the "network_id" field. func NetworkIDGTE(v string) predicate.HostDependency { - return predicate.HostDependency(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldNetworkID), v)) - }) + return predicate.HostDependency(sql.FieldGTE(FieldNetworkID, v)) } // NetworkIDLT applies the LT predicate on the "network_id" field. func NetworkIDLT(v string) predicate.HostDependency { - return predicate.HostDependency(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldNetworkID), v)) - }) + return predicate.HostDependency(sql.FieldLT(FieldNetworkID, v)) } // NetworkIDLTE applies the LTE predicate on the "network_id" field. func NetworkIDLTE(v string) predicate.HostDependency { - return predicate.HostDependency(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldNetworkID), v)) - }) + return predicate.HostDependency(sql.FieldLTE(FieldNetworkID, v)) } // NetworkIDContains applies the Contains predicate on the "network_id" field. func NetworkIDContains(v string) predicate.HostDependency { - return predicate.HostDependency(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldNetworkID), v)) - }) + return predicate.HostDependency(sql.FieldContains(FieldNetworkID, v)) } // NetworkIDHasPrefix applies the HasPrefix predicate on the "network_id" field. func NetworkIDHasPrefix(v string) predicate.HostDependency { - return predicate.HostDependency(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldNetworkID), v)) - }) + return predicate.HostDependency(sql.FieldHasPrefix(FieldNetworkID, v)) } // NetworkIDHasSuffix applies the HasSuffix predicate on the "network_id" field. func NetworkIDHasSuffix(v string) predicate.HostDependency { - return predicate.HostDependency(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldNetworkID), v)) - }) + return predicate.HostDependency(sql.FieldHasSuffix(FieldNetworkID, v)) } // NetworkIDEqualFold applies the EqualFold predicate on the "network_id" field. func NetworkIDEqualFold(v string) predicate.HostDependency { - return predicate.HostDependency(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldNetworkID), v)) - }) + return predicate.HostDependency(sql.FieldEqualFold(FieldNetworkID, v)) } // NetworkIDContainsFold applies the ContainsFold predicate on the "network_id" field. func NetworkIDContainsFold(v string) predicate.HostDependency { - return predicate.HostDependency(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldNetworkID), v)) - }) + return predicate.HostDependency(sql.FieldContainsFold(FieldNetworkID, v)) } // HasHostDependencyToDependOnHost applies the HasEdge predicate on the "HostDependencyToDependOnHost" edge. @@ -333,7 +199,6 @@ func HasHostDependencyToDependOnHost() predicate.HostDependency { return predicate.HostDependency(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(HostDependencyToDependOnHostTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, false, HostDependencyToDependOnHostTable, HostDependencyToDependOnHostColumn), ) sqlgraph.HasNeighbors(s, step) @@ -343,11 +208,7 @@ func HasHostDependencyToDependOnHost() predicate.HostDependency { // HasHostDependencyToDependOnHostWith applies the HasEdge predicate on the "HostDependencyToDependOnHost" edge with a given conditions (other predicates). func HasHostDependencyToDependOnHostWith(preds ...predicate.Host) predicate.HostDependency { return predicate.HostDependency(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(HostDependencyToDependOnHostInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, HostDependencyToDependOnHostTable, HostDependencyToDependOnHostColumn), - ) + step := newHostDependencyToDependOnHostStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -361,7 +222,6 @@ func HasHostDependencyToDependByHost() predicate.HostDependency { return predicate.HostDependency(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(HostDependencyToDependByHostTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, false, HostDependencyToDependByHostTable, HostDependencyToDependByHostColumn), ) sqlgraph.HasNeighbors(s, step) @@ -371,11 +231,7 @@ func HasHostDependencyToDependByHost() predicate.HostDependency { // HasHostDependencyToDependByHostWith applies the HasEdge predicate on the "HostDependencyToDependByHost" edge with a given conditions (other predicates). func HasHostDependencyToDependByHostWith(preds ...predicate.Host) predicate.HostDependency { return predicate.HostDependency(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(HostDependencyToDependByHostInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, HostDependencyToDependByHostTable, HostDependencyToDependByHostColumn), - ) + step := newHostDependencyToDependByHostStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -389,7 +245,6 @@ func HasHostDependencyToNetwork() predicate.HostDependency { return predicate.HostDependency(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(HostDependencyToNetworkTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, false, HostDependencyToNetworkTable, HostDependencyToNetworkColumn), ) sqlgraph.HasNeighbors(s, step) @@ -399,11 +254,7 @@ func HasHostDependencyToNetwork() predicate.HostDependency { // HasHostDependencyToNetworkWith applies the HasEdge predicate on the "HostDependencyToNetwork" edge with a given conditions (other predicates). func HasHostDependencyToNetworkWith(preds ...predicate.Network) predicate.HostDependency { return predicate.HostDependency(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(HostDependencyToNetworkInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, HostDependencyToNetworkTable, HostDependencyToNetworkColumn), - ) + step := newHostDependencyToNetworkStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -417,7 +268,6 @@ func HasHostDependencyToEnvironment() predicate.HostDependency { return predicate.HostDependency(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(HostDependencyToEnvironmentTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, true, HostDependencyToEnvironmentTable, HostDependencyToEnvironmentColumn), ) sqlgraph.HasNeighbors(s, step) @@ -427,11 +277,7 @@ func HasHostDependencyToEnvironment() predicate.HostDependency { // HasHostDependencyToEnvironmentWith applies the HasEdge predicate on the "HostDependencyToEnvironment" edge with a given conditions (other predicates). func HasHostDependencyToEnvironmentWith(preds ...predicate.Environment) predicate.HostDependency { return predicate.HostDependency(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(HostDependencyToEnvironmentInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, HostDependencyToEnvironmentTable, HostDependencyToEnvironmentColumn), - ) + step := newHostDependencyToEnvironmentStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -442,32 +288,15 @@ func HasHostDependencyToEnvironmentWith(preds ...predicate.Environment) predicat // And groups predicates with the AND operator between them. func And(predicates ...predicate.HostDependency) predicate.HostDependency { - return predicate.HostDependency(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.HostDependency(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.HostDependency) predicate.HostDependency { - return predicate.HostDependency(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.HostDependency(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.HostDependency) predicate.HostDependency { - return predicate.HostDependency(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.HostDependency(sql.NotPredicates(p)) } diff --git a/ent/hostdependency_create.go b/ent/hostdependency_create.go index aca4193f..6e24f873 100755 --- a/ent/hostdependency_create.go +++ b/ent/hostdependency_create.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -132,44 +132,8 @@ func (hdc *HostDependencyCreate) Mutation() *HostDependencyMutation { // Save creates the HostDependency in the database. func (hdc *HostDependencyCreate) Save(ctx context.Context) (*HostDependency, error) { - var ( - err error - node *HostDependency - ) hdc.defaults() - if len(hdc.hooks) == 0 { - if err = hdc.check(); err != nil { - return nil, err - } - node, err = hdc.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*HostDependencyMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = hdc.check(); err != nil { - return nil, err - } - hdc.mutation = mutation - if node, err = hdc.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(hdc.hooks) - 1; i >= 0; i-- { - if hdc.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = hdc.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, hdc.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, hdc.sqlSave, hdc.mutation, hdc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -214,10 +178,13 @@ func (hdc *HostDependencyCreate) check() error { } func (hdc *HostDependencyCreate) sqlSave(ctx context.Context) (*HostDependency, error) { + if err := hdc.check(); err != nil { + return nil, err + } _node, _spec := hdc.createSpec() if err := sqlgraph.CreateNode(ctx, hdc.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } @@ -228,38 +195,26 @@ func (hdc *HostDependencyCreate) sqlSave(ctx context.Context) (*HostDependency, return nil, err } } + hdc.mutation.id = &_node.ID + hdc.mutation.done = true return _node, nil } func (hdc *HostDependencyCreate) createSpec() (*HostDependency, *sqlgraph.CreateSpec) { var ( _node = &HostDependency{config: hdc.config} - _spec = &sqlgraph.CreateSpec{ - Table: hostdependency.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: hostdependency.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(hostdependency.Table, sqlgraph.NewFieldSpec(hostdependency.FieldID, field.TypeUUID)) ) if id, ok := hdc.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } if value, ok := hdc.mutation.HostID(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: hostdependency.FieldHostID, - }) + _spec.SetField(hostdependency.FieldHostID, field.TypeString, value) _node.HostID = value } if value, ok := hdc.mutation.NetworkID(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: hostdependency.FieldNetworkID, - }) + _spec.SetField(hostdependency.FieldNetworkID, field.TypeString, value) _node.NetworkID = value } if nodes := hdc.mutation.HostDependencyToDependOnHostIDs(); len(nodes) > 0 { @@ -270,10 +225,7 @@ func (hdc *HostDependencyCreate) createSpec() (*HostDependency, *sqlgraph.Create Columns: []string{hostdependency.HostDependencyToDependOnHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: host.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(host.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -290,10 +242,7 @@ func (hdc *HostDependencyCreate) createSpec() (*HostDependency, *sqlgraph.Create Columns: []string{hostdependency.HostDependencyToDependByHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: host.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(host.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -310,10 +259,7 @@ func (hdc *HostDependencyCreate) createSpec() (*HostDependency, *sqlgraph.Create Columns: []string{hostdependency.HostDependencyToNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: network.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(network.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -330,10 +276,7 @@ func (hdc *HostDependencyCreate) createSpec() (*HostDependency, *sqlgraph.Create Columns: []string{hostdependency.HostDependencyToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -348,11 +291,15 @@ func (hdc *HostDependencyCreate) createSpec() (*HostDependency, *sqlgraph.Create // HostDependencyCreateBulk is the builder for creating many HostDependency entities in bulk. type HostDependencyCreateBulk struct { config + err error builders []*HostDependencyCreate } // Save creates the HostDependency entities in the database. func (hdcb *HostDependencyCreateBulk) Save(ctx context.Context) ([]*HostDependency, error) { + if hdcb.err != nil { + return nil, hdcb.err + } specs := make([]*sqlgraph.CreateSpec, len(hdcb.builders)) nodes := make([]*HostDependency, len(hdcb.builders)) mutators := make([]Mutator, len(hdcb.builders)) @@ -369,8 +316,8 @@ func (hdcb *HostDependencyCreateBulk) Save(ctx context.Context) ([]*HostDependen return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, hdcb.builders[i+1].mutation) } else { @@ -378,7 +325,7 @@ func (hdcb *HostDependencyCreateBulk) Save(ctx context.Context) ([]*HostDependen // Invoke the actual operation on the latest mutation in the chain. if err = sqlgraph.BatchCreate(ctx, hdcb.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } } } diff --git a/ent/hostdependency_delete.go b/ent/hostdependency_delete.go index 06501668..45bc0bc1 100755 --- a/ent/hostdependency_delete.go +++ b/ent/hostdependency_delete.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (hdd *HostDependencyDelete) Where(ps ...predicate.HostDependency) *HostDepe // Exec executes the deletion query and returns how many vertices were deleted. func (hdd *HostDependencyDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(hdd.hooks) == 0 { - affected, err = hdd.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*HostDependencyMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - hdd.mutation = mutation - affected, err = hdd.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(hdd.hooks) - 1; i >= 0; i-- { - if hdd.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = hdd.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, hdd.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, hdd.sqlExec, hdd.mutation, hdd.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (hdd *HostDependencyDelete) ExecX(ctx context.Context) int { } func (hdd *HostDependencyDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: hostdependency.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: hostdependency.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(hostdependency.Table, sqlgraph.NewFieldSpec(hostdependency.FieldID, field.TypeUUID)) if ps := hdd.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -84,7 +48,12 @@ func (hdd *HostDependencyDelete) sqlExec(ctx context.Context) (int, error) { } } } - return sqlgraph.DeleteNodes(ctx, hdd.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, hdd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + hdd.mutation.done = true + return affected, err } // HostDependencyDeleteOne is the builder for deleting a single HostDependency entity. @@ -92,6 +61,12 @@ type HostDependencyDeleteOne struct { hdd *HostDependencyDelete } +// Where appends a list predicates to the HostDependencyDelete builder. +func (hddo *HostDependencyDeleteOne) Where(ps ...predicate.HostDependency) *HostDependencyDeleteOne { + hddo.hdd.mutation.Where(ps...) + return hddo +} + // Exec executes the deletion query. func (hddo *HostDependencyDeleteOne) Exec(ctx context.Context) error { n, err := hddo.hdd.Exec(ctx) @@ -107,5 +82,7 @@ func (hddo *HostDependencyDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (hddo *HostDependencyDeleteOne) ExecX(ctx context.Context) { - hddo.hdd.ExecX(ctx) + if err := hddo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/ent/hostdependency_query.go b/ent/hostdependency_query.go index a7b3b6ed..1e644f0e 100755 --- a/ent/hostdependency_query.go +++ b/ent/hostdependency_query.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "errors" "fmt" "math" @@ -22,18 +21,17 @@ import ( // HostDependencyQuery is the builder for querying HostDependency entities. type HostDependencyQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string - predicates []predicate.HostDependency - // eager-loading edges. + ctx *QueryContext + order []hostdependency.OrderOption + inters []Interceptor + predicates []predicate.HostDependency withHostDependencyToDependOnHost *HostQuery withHostDependencyToDependByHost *HostQuery withHostDependencyToNetwork *NetworkQuery withHostDependencyToEnvironment *EnvironmentQuery withFKs bool + modifiers []func(*sql.Selector) + loadTotal []func(context.Context, []*HostDependency) error // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -45,34 +43,34 @@ func (hdq *HostDependencyQuery) Where(ps ...predicate.HostDependency) *HostDepen return hdq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (hdq *HostDependencyQuery) Limit(limit int) *HostDependencyQuery { - hdq.limit = &limit + hdq.ctx.Limit = &limit return hdq } -// Offset adds an offset step to the query. +// Offset to start from. func (hdq *HostDependencyQuery) Offset(offset int) *HostDependencyQuery { - hdq.offset = &offset + hdq.ctx.Offset = &offset return hdq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (hdq *HostDependencyQuery) Unique(unique bool) *HostDependencyQuery { - hdq.unique = &unique + hdq.ctx.Unique = &unique return hdq } -// Order adds an order step to the query. -func (hdq *HostDependencyQuery) Order(o ...OrderFunc) *HostDependencyQuery { +// Order specifies how the records should be ordered. +func (hdq *HostDependencyQuery) Order(o ...hostdependency.OrderOption) *HostDependencyQuery { hdq.order = append(hdq.order, o...) return hdq } // QueryHostDependencyToDependOnHost chains the current query on the "HostDependencyToDependOnHost" edge. func (hdq *HostDependencyQuery) QueryHostDependencyToDependOnHost() *HostQuery { - query := &HostQuery{config: hdq.config} + query := (&HostClient{config: hdq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := hdq.prepareQuery(ctx); err != nil { return nil, err @@ -94,7 +92,7 @@ func (hdq *HostDependencyQuery) QueryHostDependencyToDependOnHost() *HostQuery { // QueryHostDependencyToDependByHost chains the current query on the "HostDependencyToDependByHost" edge. func (hdq *HostDependencyQuery) QueryHostDependencyToDependByHost() *HostQuery { - query := &HostQuery{config: hdq.config} + query := (&HostClient{config: hdq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := hdq.prepareQuery(ctx); err != nil { return nil, err @@ -116,7 +114,7 @@ func (hdq *HostDependencyQuery) QueryHostDependencyToDependByHost() *HostQuery { // QueryHostDependencyToNetwork chains the current query on the "HostDependencyToNetwork" edge. func (hdq *HostDependencyQuery) QueryHostDependencyToNetwork() *NetworkQuery { - query := &NetworkQuery{config: hdq.config} + query := (&NetworkClient{config: hdq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := hdq.prepareQuery(ctx); err != nil { return nil, err @@ -138,7 +136,7 @@ func (hdq *HostDependencyQuery) QueryHostDependencyToNetwork() *NetworkQuery { // QueryHostDependencyToEnvironment chains the current query on the "HostDependencyToEnvironment" edge. func (hdq *HostDependencyQuery) QueryHostDependencyToEnvironment() *EnvironmentQuery { - query := &EnvironmentQuery{config: hdq.config} + query := (&EnvironmentClient{config: hdq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := hdq.prepareQuery(ctx); err != nil { return nil, err @@ -161,7 +159,7 @@ func (hdq *HostDependencyQuery) QueryHostDependencyToEnvironment() *EnvironmentQ // First returns the first HostDependency entity from the query. // Returns a *NotFoundError when no HostDependency was found. func (hdq *HostDependencyQuery) First(ctx context.Context) (*HostDependency, error) { - nodes, err := hdq.Limit(1).All(ctx) + nodes, err := hdq.Limit(1).All(setContextOp(ctx, hdq.ctx, "First")) if err != nil { return nil, err } @@ -184,7 +182,7 @@ func (hdq *HostDependencyQuery) FirstX(ctx context.Context) *HostDependency { // Returns a *NotFoundError when no HostDependency ID was found. func (hdq *HostDependencyQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = hdq.Limit(1).IDs(ctx); err != nil { + if ids, err = hdq.Limit(1).IDs(setContextOp(ctx, hdq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -207,7 +205,7 @@ func (hdq *HostDependencyQuery) FirstIDX(ctx context.Context) uuid.UUID { // Returns a *NotSingularError when more than one HostDependency entity is found. // Returns a *NotFoundError when no HostDependency entities are found. func (hdq *HostDependencyQuery) Only(ctx context.Context) (*HostDependency, error) { - nodes, err := hdq.Limit(2).All(ctx) + nodes, err := hdq.Limit(2).All(setContextOp(ctx, hdq.ctx, "Only")) if err != nil { return nil, err } @@ -235,7 +233,7 @@ func (hdq *HostDependencyQuery) OnlyX(ctx context.Context) *HostDependency { // Returns a *NotFoundError when no entities are found. func (hdq *HostDependencyQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = hdq.Limit(2).IDs(ctx); err != nil { + if ids, err = hdq.Limit(2).IDs(setContextOp(ctx, hdq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -260,10 +258,12 @@ func (hdq *HostDependencyQuery) OnlyIDX(ctx context.Context) uuid.UUID { // All executes the query and returns a list of HostDependencies. func (hdq *HostDependencyQuery) All(ctx context.Context) ([]*HostDependency, error) { + ctx = setContextOp(ctx, hdq.ctx, "All") if err := hdq.prepareQuery(ctx); err != nil { return nil, err } - return hdq.sqlAll(ctx) + qr := querierAll[[]*HostDependency, *HostDependencyQuery]() + return withInterceptors[[]*HostDependency](ctx, hdq, qr, hdq.inters) } // AllX is like All, but panics if an error occurs. @@ -276,9 +276,12 @@ func (hdq *HostDependencyQuery) AllX(ctx context.Context) []*HostDependency { } // IDs executes the query and returns a list of HostDependency IDs. -func (hdq *HostDependencyQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { - var ids []uuid.UUID - if err := hdq.Select(hostdependency.FieldID).Scan(ctx, &ids); err != nil { +func (hdq *HostDependencyQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if hdq.ctx.Unique == nil && hdq.path != nil { + hdq.Unique(true) + } + ctx = setContextOp(ctx, hdq.ctx, "IDs") + if err = hdq.Select(hostdependency.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -295,10 +298,11 @@ func (hdq *HostDependencyQuery) IDsX(ctx context.Context) []uuid.UUID { // Count returns the count of the given query. func (hdq *HostDependencyQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, hdq.ctx, "Count") if err := hdq.prepareQuery(ctx); err != nil { return 0, err } - return hdq.sqlCount(ctx) + return withInterceptors[int](ctx, hdq, querierCount[*HostDependencyQuery](), hdq.inters) } // CountX is like Count, but panics if an error occurs. @@ -312,10 +316,15 @@ func (hdq *HostDependencyQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (hdq *HostDependencyQuery) Exist(ctx context.Context) (bool, error) { - if err := hdq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, hdq.ctx, "Exist") + switch _, err := hdq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return hdq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -335,25 +344,24 @@ func (hdq *HostDependencyQuery) Clone() *HostDependencyQuery { } return &HostDependencyQuery{ config: hdq.config, - limit: hdq.limit, - offset: hdq.offset, - order: append([]OrderFunc{}, hdq.order...), + ctx: hdq.ctx.Clone(), + order: append([]hostdependency.OrderOption{}, hdq.order...), + inters: append([]Interceptor{}, hdq.inters...), predicates: append([]predicate.HostDependency{}, hdq.predicates...), withHostDependencyToDependOnHost: hdq.withHostDependencyToDependOnHost.Clone(), withHostDependencyToDependByHost: hdq.withHostDependencyToDependByHost.Clone(), withHostDependencyToNetwork: hdq.withHostDependencyToNetwork.Clone(), withHostDependencyToEnvironment: hdq.withHostDependencyToEnvironment.Clone(), // clone intermediate query. - sql: hdq.sql.Clone(), - path: hdq.path, - unique: hdq.unique, + sql: hdq.sql.Clone(), + path: hdq.path, } } // WithHostDependencyToDependOnHost tells the query-builder to eager-load the nodes that are connected to // the "HostDependencyToDependOnHost" edge. The optional arguments are used to configure the query builder of the edge. func (hdq *HostDependencyQuery) WithHostDependencyToDependOnHost(opts ...func(*HostQuery)) *HostDependencyQuery { - query := &HostQuery{config: hdq.config} + query := (&HostClient{config: hdq.config}).Query() for _, opt := range opts { opt(query) } @@ -364,7 +372,7 @@ func (hdq *HostDependencyQuery) WithHostDependencyToDependOnHost(opts ...func(*H // WithHostDependencyToDependByHost tells the query-builder to eager-load the nodes that are connected to // the "HostDependencyToDependByHost" edge. The optional arguments are used to configure the query builder of the edge. func (hdq *HostDependencyQuery) WithHostDependencyToDependByHost(opts ...func(*HostQuery)) *HostDependencyQuery { - query := &HostQuery{config: hdq.config} + query := (&HostClient{config: hdq.config}).Query() for _, opt := range opts { opt(query) } @@ -375,7 +383,7 @@ func (hdq *HostDependencyQuery) WithHostDependencyToDependByHost(opts ...func(*H // WithHostDependencyToNetwork tells the query-builder to eager-load the nodes that are connected to // the "HostDependencyToNetwork" edge. The optional arguments are used to configure the query builder of the edge. func (hdq *HostDependencyQuery) WithHostDependencyToNetwork(opts ...func(*NetworkQuery)) *HostDependencyQuery { - query := &NetworkQuery{config: hdq.config} + query := (&NetworkClient{config: hdq.config}).Query() for _, opt := range opts { opt(query) } @@ -386,7 +394,7 @@ func (hdq *HostDependencyQuery) WithHostDependencyToNetwork(opts ...func(*Networ // WithHostDependencyToEnvironment tells the query-builder to eager-load the nodes that are connected to // the "HostDependencyToEnvironment" edge. The optional arguments are used to configure the query builder of the edge. func (hdq *HostDependencyQuery) WithHostDependencyToEnvironment(opts ...func(*EnvironmentQuery)) *HostDependencyQuery { - query := &EnvironmentQuery{config: hdq.config} + query := (&EnvironmentClient{config: hdq.config}).Query() for _, opt := range opts { opt(query) } @@ -408,17 +416,13 @@ func (hdq *HostDependencyQuery) WithHostDependencyToEnvironment(opts ...func(*En // GroupBy(hostdependency.FieldHostID). // Aggregate(ent.Count()). // Scan(ctx, &v) -// func (hdq *HostDependencyQuery) GroupBy(field string, fields ...string) *HostDependencyGroupBy { - group := &HostDependencyGroupBy{config: hdq.config} - group.fields = append([]string{field}, fields...) - group.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := hdq.prepareQuery(ctx); err != nil { - return nil, err - } - return hdq.sqlQuery(ctx), nil - } - return group + hdq.ctx.Fields = append([]string{field}, fields...) + grbuild := &HostDependencyGroupBy{build: hdq} + grbuild.flds = &hdq.ctx.Fields + grbuild.label = hostdependency.Label + grbuild.scan = grbuild.Scan + return grbuild } // Select allows the selection one or more fields/columns for the given query, @@ -433,14 +437,31 @@ func (hdq *HostDependencyQuery) GroupBy(field string, fields ...string) *HostDep // client.HostDependency.Query(). // Select(hostdependency.FieldHostID). // Scan(ctx, &v) -// func (hdq *HostDependencyQuery) Select(fields ...string) *HostDependencySelect { - hdq.fields = append(hdq.fields, fields...) - return &HostDependencySelect{HostDependencyQuery: hdq} + hdq.ctx.Fields = append(hdq.ctx.Fields, fields...) + sbuild := &HostDependencySelect{HostDependencyQuery: hdq} + sbuild.label = hostdependency.Label + sbuild.flds, sbuild.scan = &hdq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a HostDependencySelect configured with the given aggregations. +func (hdq *HostDependencyQuery) Aggregate(fns ...AggregateFunc) *HostDependencySelect { + return hdq.Select().Aggregate(fns...) } func (hdq *HostDependencyQuery) prepareQuery(ctx context.Context) error { - for _, f := range hdq.fields { + for _, inter := range hdq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, hdq); err != nil { + return err + } + } + } + for _, f := range hdq.ctx.Fields { if !hostdependency.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -455,7 +476,7 @@ func (hdq *HostDependencyQuery) prepareQuery(ctx context.Context) error { return nil } -func (hdq *HostDependencyQuery) sqlAll(ctx context.Context) ([]*HostDependency, error) { +func (hdq *HostDependencyQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*HostDependency, error) { var ( nodes = []*HostDependency{} withFKs = hdq.withFKs @@ -473,179 +494,209 @@ func (hdq *HostDependencyQuery) sqlAll(ctx context.Context) ([]*HostDependency, if withFKs { _spec.Node.Columns = append(_spec.Node.Columns, hostdependency.ForeignKeys...) } - _spec.ScanValues = func(columns []string) ([]interface{}, error) { + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*HostDependency).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { node := &HostDependency{config: hdq.config} nodes = append(nodes, node) - return node.scanValues(columns) - } - _spec.Assign = func(columns []string, values []interface{}) error { - if len(nodes) == 0 { - return fmt.Errorf("ent: Assign called without calling ScanValues") - } - node := nodes[len(nodes)-1] node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(hdq.modifiers) > 0 { + _spec.Modifiers = hdq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } if err := sqlgraph.QueryNodes(ctx, hdq.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := hdq.withHostDependencyToDependOnHost; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*HostDependency) - for i := range nodes { - if nodes[i].host_dependency_host_dependency_to_depend_on_host == nil { - continue - } - fk := *nodes[i].host_dependency_host_dependency_to_depend_on_host - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + if err := hdq.loadHostDependencyToDependOnHost(ctx, query, nodes, nil, + func(n *HostDependency, e *Host) { n.Edges.HostDependencyToDependOnHost = e }); err != nil { + return nil, err } - query.Where(host.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + } + if query := hdq.withHostDependencyToDependByHost; query != nil { + if err := hdq.loadHostDependencyToDependByHost(ctx, query, nodes, nil, + func(n *HostDependency, e *Host) { n.Edges.HostDependencyToDependByHost = e }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "host_dependency_host_dependency_to_depend_on_host" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.HostDependencyToDependOnHost = n - } + } + if query := hdq.withHostDependencyToNetwork; query != nil { + if err := hdq.loadHostDependencyToNetwork(ctx, query, nodes, nil, + func(n *HostDependency, e *Network) { n.Edges.HostDependencyToNetwork = e }); err != nil { + return nil, err + } + } + if query := hdq.withHostDependencyToEnvironment; query != nil { + if err := hdq.loadHostDependencyToEnvironment(ctx, query, nodes, nil, + func(n *HostDependency, e *Environment) { n.Edges.HostDependencyToEnvironment = e }); err != nil { + return nil, err } } + for i := range hdq.loadTotal { + if err := hdq.loadTotal[i](ctx, nodes); err != nil { + return nil, err + } + } + return nodes, nil +} - if query := hdq.withHostDependencyToDependByHost; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*HostDependency) +func (hdq *HostDependencyQuery) loadHostDependencyToDependOnHost(ctx context.Context, query *HostQuery, nodes []*HostDependency, init func(*HostDependency), assign func(*HostDependency, *Host)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*HostDependency) + for i := range nodes { + if nodes[i].host_dependency_host_dependency_to_depend_on_host == nil { + continue + } + fk := *nodes[i].host_dependency_host_dependency_to_depend_on_host + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(host.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "host_dependency_host_dependency_to_depend_on_host" returned %v`, n.ID) + } for i := range nodes { - if nodes[i].host_dependency_host_dependency_to_depend_by_host == nil { - continue - } - fk := *nodes[i].host_dependency_host_dependency_to_depend_by_host - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + assign(nodes[i], n) } - query.Where(host.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + } + return nil +} +func (hdq *HostDependencyQuery) loadHostDependencyToDependByHost(ctx context.Context, query *HostQuery, nodes []*HostDependency, init func(*HostDependency), assign func(*HostDependency, *Host)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*HostDependency) + for i := range nodes { + if nodes[i].host_dependency_host_dependency_to_depend_by_host == nil { + continue } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "host_dependency_host_dependency_to_depend_by_host" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.HostDependencyToDependByHost = n - } + fk := *nodes[i].host_dependency_host_dependency_to_depend_by_host + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) } + nodeids[fk] = append(nodeids[fk], nodes[i]) } - - if query := hdq.withHostDependencyToNetwork; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*HostDependency) + if len(ids) == 0 { + return nil + } + query.Where(host.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "host_dependency_host_dependency_to_depend_by_host" returned %v`, n.ID) + } for i := range nodes { - if nodes[i].host_dependency_host_dependency_to_network == nil { - continue - } - fk := *nodes[i].host_dependency_host_dependency_to_network - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + assign(nodes[i], n) } - query.Where(network.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + } + return nil +} +func (hdq *HostDependencyQuery) loadHostDependencyToNetwork(ctx context.Context, query *NetworkQuery, nodes []*HostDependency, init func(*HostDependency), assign func(*HostDependency, *Network)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*HostDependency) + for i := range nodes { + if nodes[i].host_dependency_host_dependency_to_network == nil { + continue } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "host_dependency_host_dependency_to_network" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.HostDependencyToNetwork = n - } + fk := *nodes[i].host_dependency_host_dependency_to_network + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) } + nodeids[fk] = append(nodeids[fk], nodes[i]) } - - if query := hdq.withHostDependencyToEnvironment; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*HostDependency) + if len(ids) == 0 { + return nil + } + query.Where(network.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "host_dependency_host_dependency_to_network" returned %v`, n.ID) + } for i := range nodes { - if nodes[i].environment_environment_to_host_dependency == nil { - continue - } - fk := *nodes[i].environment_environment_to_host_dependency - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + assign(nodes[i], n) } - query.Where(environment.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + } + return nil +} +func (hdq *HostDependencyQuery) loadHostDependencyToEnvironment(ctx context.Context, query *EnvironmentQuery, nodes []*HostDependency, init func(*HostDependency), assign func(*HostDependency, *Environment)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*HostDependency) + for i := range nodes { + if nodes[i].environment_environment_to_host_dependency == nil { + continue } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "environment_environment_to_host_dependency" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.HostDependencyToEnvironment = n - } + fk := *nodes[i].environment_environment_to_host_dependency + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) } + nodeids[fk] = append(nodeids[fk], nodes[i]) } - - return nodes, nil + if len(ids) == 0 { + return nil + } + query.Where(environment.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "environment_environment_to_host_dependency" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil } func (hdq *HostDependencyQuery) sqlCount(ctx context.Context) (int, error) { _spec := hdq.querySpec() - _spec.Node.Columns = hdq.fields - if len(hdq.fields) > 0 { - _spec.Unique = hdq.unique != nil && *hdq.unique + if len(hdq.modifiers) > 0 { + _spec.Modifiers = hdq.modifiers } - return sqlgraph.CountNodes(ctx, hdq.driver, _spec) -} - -func (hdq *HostDependencyQuery) sqlExist(ctx context.Context) (bool, error) { - n, err := hdq.sqlCount(ctx) - if err != nil { - return false, fmt.Errorf("ent: check existence: %w", err) + _spec.Node.Columns = hdq.ctx.Fields + if len(hdq.ctx.Fields) > 0 { + _spec.Unique = hdq.ctx.Unique != nil && *hdq.ctx.Unique } - return n > 0, nil + return sqlgraph.CountNodes(ctx, hdq.driver, _spec) } func (hdq *HostDependencyQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: hostdependency.Table, - Columns: hostdependency.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: hostdependency.FieldID, - }, - }, - From: hdq.sql, - Unique: true, - } - if unique := hdq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(hostdependency.Table, hostdependency.Columns, sqlgraph.NewFieldSpec(hostdependency.FieldID, field.TypeUUID)) + _spec.From = hdq.sql + if unique := hdq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if hdq.path != nil { + _spec.Unique = true } - if fields := hdq.fields; len(fields) > 0 { + if fields := hdq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, hostdependency.FieldID) for i := range fields { @@ -661,10 +712,10 @@ func (hdq *HostDependencyQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := hdq.limit; limit != nil { + if limit := hdq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := hdq.offset; offset != nil { + if offset := hdq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := hdq.order; len(ps) > 0 { @@ -680,7 +731,7 @@ func (hdq *HostDependencyQuery) querySpec() *sqlgraph.QuerySpec { func (hdq *HostDependencyQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(hdq.driver.Dialect()) t1 := builder.Table(hostdependency.Table) - columns := hdq.fields + columns := hdq.ctx.Fields if len(columns) == 0 { columns = hostdependency.Columns } @@ -689,7 +740,7 @@ func (hdq *HostDependencyQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = hdq.sql selector.Select(selector.Columns(columns...)...) } - if hdq.unique != nil && *hdq.unique { + if hdq.ctx.Unique != nil && *hdq.ctx.Unique { selector.Distinct() } for _, p := range hdq.predicates { @@ -698,12 +749,12 @@ func (hdq *HostDependencyQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range hdq.order { p(selector) } - if offset := hdq.offset; offset != nil { + if offset := hdq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := hdq.limit; limit != nil { + if limit := hdq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -711,12 +762,8 @@ func (hdq *HostDependencyQuery) sqlQuery(ctx context.Context) *sql.Selector { // HostDependencyGroupBy is the group-by builder for HostDependency entities. type HostDependencyGroupBy struct { - config - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) + selector + build *HostDependencyQuery } // Aggregate adds the given aggregation functions to the group-by query. @@ -725,471 +772,77 @@ func (hdgb *HostDependencyGroupBy) Aggregate(fns ...AggregateFunc) *HostDependen return hdgb } -// Scan applies the group-by query and scans the result into the given value. -func (hdgb *HostDependencyGroupBy) Scan(ctx context.Context, v interface{}) error { - query, err := hdgb.path(ctx) - if err != nil { +// Scan applies the selector query and scans the result into the given value. +func (hdgb *HostDependencyGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, hdgb.build.ctx, "GroupBy") + if err := hdgb.build.prepareQuery(ctx); err != nil { return err } - hdgb.sql = query - return hdgb.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (hdgb *HostDependencyGroupBy) ScanX(ctx context.Context, v interface{}) { - if err := hdgb.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from group-by. -// It is only allowed when executing a group-by query with one field. -func (hdgb *HostDependencyGroupBy) Strings(ctx context.Context) ([]string, error) { - if len(hdgb.fields) > 1 { - return nil, errors.New("ent: HostDependencyGroupBy.Strings is not achievable when grouping more than 1 field") - } - var v []string - if err := hdgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (hdgb *HostDependencyGroupBy) StringsX(ctx context.Context) []string { - v, err := hdgb.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (hdgb *HostDependencyGroupBy) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = hdgb.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{hostdependency.Label} - default: - err = fmt.Errorf("ent: HostDependencyGroupBy.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (hdgb *HostDependencyGroupBy) StringX(ctx context.Context) string { - v, err := hdgb.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from group-by. -// It is only allowed when executing a group-by query with one field. -func (hdgb *HostDependencyGroupBy) Ints(ctx context.Context) ([]int, error) { - if len(hdgb.fields) > 1 { - return nil, errors.New("ent: HostDependencyGroupBy.Ints is not achievable when grouping more than 1 field") - } - var v []int - if err := hdgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (hdgb *HostDependencyGroupBy) IntsX(ctx context.Context) []int { - v, err := hdgb.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (hdgb *HostDependencyGroupBy) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = hdgb.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{hostdependency.Label} - default: - err = fmt.Errorf("ent: HostDependencyGroupBy.Ints returned %d results when one was expected", len(v)) - } - return + return scanWithInterceptors[*HostDependencyQuery, *HostDependencyGroupBy](ctx, hdgb.build, hdgb, hdgb.build.inters, v) } -// IntX is like Int, but panics if an error occurs. -func (hdgb *HostDependencyGroupBy) IntX(ctx context.Context) int { - v, err := hdgb.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from group-by. -// It is only allowed when executing a group-by query with one field. -func (hdgb *HostDependencyGroupBy) Float64s(ctx context.Context) ([]float64, error) { - if len(hdgb.fields) > 1 { - return nil, errors.New("ent: HostDependencyGroupBy.Float64s is not achievable when grouping more than 1 field") - } - var v []float64 - if err := hdgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (hdgb *HostDependencyGroupBy) Float64sX(ctx context.Context) []float64 { - v, err := hdgb.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (hdgb *HostDependencyGroupBy) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = hdgb.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{hostdependency.Label} - default: - err = fmt.Errorf("ent: HostDependencyGroupBy.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (hdgb *HostDependencyGroupBy) Float64X(ctx context.Context) float64 { - v, err := hdgb.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from group-by. -// It is only allowed when executing a group-by query with one field. -func (hdgb *HostDependencyGroupBy) Bools(ctx context.Context) ([]bool, error) { - if len(hdgb.fields) > 1 { - return nil, errors.New("ent: HostDependencyGroupBy.Bools is not achievable when grouping more than 1 field") - } - var v []bool - if err := hdgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (hdgb *HostDependencyGroupBy) BoolsX(ctx context.Context) []bool { - v, err := hdgb.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (hdgb *HostDependencyGroupBy) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = hdgb.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{hostdependency.Label} - default: - err = fmt.Errorf("ent: HostDependencyGroupBy.Bools returned %d results when one was expected", len(v)) - } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (hdgb *HostDependencyGroupBy) BoolX(ctx context.Context) bool { - v, err := hdgb.Bool(ctx) - if err != nil { - panic(err) +func (hdgb *HostDependencyGroupBy) sqlScan(ctx context.Context, root *HostDependencyQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(hdgb.fns)) + for _, fn := range hdgb.fns { + aggregation = append(aggregation, fn(selector)) } - return v -} - -func (hdgb *HostDependencyGroupBy) sqlScan(ctx context.Context, v interface{}) error { - for _, f := range hdgb.fields { - if !hostdependency.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*hdgb.flds)+len(hdgb.fns)) + for _, f := range *hdgb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := hdgb.sqlQuery() + selector.GroupBy(selector.Columns(*hdgb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := hdgb.driver.Query(ctx, query, args, rows); err != nil { + if err := hdgb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (hdgb *HostDependencyGroupBy) sqlQuery() *sql.Selector { - selector := hdgb.sql.Select() - aggregation := make([]string, 0, len(hdgb.fns)) - for _, fn := range hdgb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(hdgb.fields)+len(hdgb.fns)) - for _, f := range hdgb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(hdgb.fields...)...) -} - // HostDependencySelect is the builder for selecting fields of HostDependency entities. type HostDependencySelect struct { *HostDependencyQuery - // intermediate query (i.e. traversal path). - sql *sql.Selector + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (hds *HostDependencySelect) Aggregate(fns ...AggregateFunc) *HostDependencySelect { + hds.fns = append(hds.fns, fns...) + return hds } // Scan applies the selector query and scans the result into the given value. -func (hds *HostDependencySelect) Scan(ctx context.Context, v interface{}) error { +func (hds *HostDependencySelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, hds.ctx, "Select") if err := hds.prepareQuery(ctx); err != nil { return err } - hds.sql = hds.HostDependencyQuery.sqlQuery(ctx) - return hds.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (hds *HostDependencySelect) ScanX(ctx context.Context, v interface{}) { - if err := hds.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from a selector. It is only allowed when selecting one field. -func (hds *HostDependencySelect) Strings(ctx context.Context) ([]string, error) { - if len(hds.fields) > 1 { - return nil, errors.New("ent: HostDependencySelect.Strings is not achievable when selecting more than 1 field") - } - var v []string - if err := hds.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil + return scanWithInterceptors[*HostDependencyQuery, *HostDependencySelect](ctx, hds.HostDependencyQuery, hds, hds.inters, v) } -// StringsX is like Strings, but panics if an error occurs. -func (hds *HostDependencySelect) StringsX(ctx context.Context) []string { - v, err := hds.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a selector. It is only allowed when selecting one field. -func (hds *HostDependencySelect) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = hds.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{hostdependency.Label} - default: - err = fmt.Errorf("ent: HostDependencySelect.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (hds *HostDependencySelect) StringX(ctx context.Context) string { - v, err := hds.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from a selector. It is only allowed when selecting one field. -func (hds *HostDependencySelect) Ints(ctx context.Context) ([]int, error) { - if len(hds.fields) > 1 { - return nil, errors.New("ent: HostDependencySelect.Ints is not achievable when selecting more than 1 field") - } - var v []int - if err := hds.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (hds *HostDependencySelect) IntsX(ctx context.Context) []int { - v, err := hds.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a selector. It is only allowed when selecting one field. -func (hds *HostDependencySelect) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = hds.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{hostdependency.Label} - default: - err = fmt.Errorf("ent: HostDependencySelect.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (hds *HostDependencySelect) IntX(ctx context.Context) int { - v, err := hds.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. -func (hds *HostDependencySelect) Float64s(ctx context.Context) ([]float64, error) { - if len(hds.fields) > 1 { - return nil, errors.New("ent: HostDependencySelect.Float64s is not achievable when selecting more than 1 field") - } - var v []float64 - if err := hds.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (hds *HostDependencySelect) Float64sX(ctx context.Context) []float64 { - v, err := hds.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. -func (hds *HostDependencySelect) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = hds.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{hostdependency.Label} - default: - err = fmt.Errorf("ent: HostDependencySelect.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (hds *HostDependencySelect) Float64X(ctx context.Context) float64 { - v, err := hds.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from a selector. It is only allowed when selecting one field. -func (hds *HostDependencySelect) Bools(ctx context.Context) ([]bool, error) { - if len(hds.fields) > 1 { - return nil, errors.New("ent: HostDependencySelect.Bools is not achievable when selecting more than 1 field") - } - var v []bool - if err := hds.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (hds *HostDependencySelect) BoolsX(ctx context.Context) []bool { - v, err := hds.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a selector. It is only allowed when selecting one field. -func (hds *HostDependencySelect) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = hds.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{hostdependency.Label} - default: - err = fmt.Errorf("ent: HostDependencySelect.Bools returned %d results when one was expected", len(v)) +func (hds *HostDependencySelect) sqlScan(ctx context.Context, root *HostDependencyQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(hds.fns)) + for _, fn := range hds.fns { + aggregation = append(aggregation, fn(selector)) } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (hds *HostDependencySelect) BoolX(ctx context.Context) bool { - v, err := hds.Bool(ctx) - if err != nil { - panic(err) + switch n := len(*hds.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) } - return v -} - -func (hds *HostDependencySelect) sqlScan(ctx context.Context, v interface{}) error { rows := &sql.Rows{} - query, args := hds.sql.Query() + query, args := selector.Query() if err := hds.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/ent/hostdependency_update.go b/ent/hostdependency_update.go index 568241db..a1758450 100755 --- a/ent/hostdependency_update.go +++ b/ent/hostdependency_update.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -37,12 +37,28 @@ func (hdu *HostDependencyUpdate) SetHostID(s string) *HostDependencyUpdate { return hdu } +// SetNillableHostID sets the "host_id" field if the given value is not nil. +func (hdu *HostDependencyUpdate) SetNillableHostID(s *string) *HostDependencyUpdate { + if s != nil { + hdu.SetHostID(*s) + } + return hdu +} + // SetNetworkID sets the "network_id" field. func (hdu *HostDependencyUpdate) SetNetworkID(s string) *HostDependencyUpdate { hdu.mutation.SetNetworkID(s) return hdu } +// SetNillableNetworkID sets the "network_id" field if the given value is not nil. +func (hdu *HostDependencyUpdate) SetNillableNetworkID(s *string) *HostDependencyUpdate { + if s != nil { + hdu.SetNetworkID(*s) + } + return hdu +} + // SetHostDependencyToDependOnHostID sets the "HostDependencyToDependOnHost" edge to the Host entity by ID. func (hdu *HostDependencyUpdate) SetHostDependencyToDependOnHostID(id uuid.UUID) *HostDependencyUpdate { hdu.mutation.SetHostDependencyToDependOnHostID(id) @@ -150,34 +166,7 @@ func (hdu *HostDependencyUpdate) ClearHostDependencyToEnvironment() *HostDepende // Save executes the query and returns the number of nodes affected by the update operation. func (hdu *HostDependencyUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(hdu.hooks) == 0 { - affected, err = hdu.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*HostDependencyMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - hdu.mutation = mutation - affected, err = hdu.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(hdu.hooks) - 1; i >= 0; i-- { - if hdu.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = hdu.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, hdu.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, hdu.sqlSave, hdu.mutation, hdu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -203,16 +192,7 @@ func (hdu *HostDependencyUpdate) ExecX(ctx context.Context) { } func (hdu *HostDependencyUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: hostdependency.Table, - Columns: hostdependency.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: hostdependency.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(hostdependency.Table, hostdependency.Columns, sqlgraph.NewFieldSpec(hostdependency.FieldID, field.TypeUUID)) if ps := hdu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -221,18 +201,10 @@ func (hdu *HostDependencyUpdate) sqlSave(ctx context.Context) (n int, err error) } } if value, ok := hdu.mutation.HostID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: hostdependency.FieldHostID, - }) + _spec.SetField(hostdependency.FieldHostID, field.TypeString, value) } if value, ok := hdu.mutation.NetworkID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: hostdependency.FieldNetworkID, - }) + _spec.SetField(hostdependency.FieldNetworkID, field.TypeString, value) } if hdu.mutation.HostDependencyToDependOnHostCleared() { edge := &sqlgraph.EdgeSpec{ @@ -242,10 +214,7 @@ func (hdu *HostDependencyUpdate) sqlSave(ctx context.Context) (n int, err error) Columns: []string{hostdependency.HostDependencyToDependOnHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: host.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(host.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -258,10 +227,7 @@ func (hdu *HostDependencyUpdate) sqlSave(ctx context.Context) (n int, err error) Columns: []string{hostdependency.HostDependencyToDependOnHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: host.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(host.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -277,10 +243,7 @@ func (hdu *HostDependencyUpdate) sqlSave(ctx context.Context) (n int, err error) Columns: []string{hostdependency.HostDependencyToDependByHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: host.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(host.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -293,10 +256,7 @@ func (hdu *HostDependencyUpdate) sqlSave(ctx context.Context) (n int, err error) Columns: []string{hostdependency.HostDependencyToDependByHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: host.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(host.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -312,10 +272,7 @@ func (hdu *HostDependencyUpdate) sqlSave(ctx context.Context) (n int, err error) Columns: []string{hostdependency.HostDependencyToNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: network.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(network.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -328,10 +285,7 @@ func (hdu *HostDependencyUpdate) sqlSave(ctx context.Context) (n int, err error) Columns: []string{hostdependency.HostDependencyToNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: network.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(network.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -347,10 +301,7 @@ func (hdu *HostDependencyUpdate) sqlSave(ctx context.Context) (n int, err error) Columns: []string{hostdependency.HostDependencyToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -363,10 +314,7 @@ func (hdu *HostDependencyUpdate) sqlSave(ctx context.Context) (n int, err error) Columns: []string{hostdependency.HostDependencyToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -378,10 +326,11 @@ func (hdu *HostDependencyUpdate) sqlSave(ctx context.Context) (n int, err error) if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{hostdependency.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return 0, err } + hdu.mutation.done = true return n, nil } @@ -399,12 +348,28 @@ func (hduo *HostDependencyUpdateOne) SetHostID(s string) *HostDependencyUpdateOn return hduo } +// SetNillableHostID sets the "host_id" field if the given value is not nil. +func (hduo *HostDependencyUpdateOne) SetNillableHostID(s *string) *HostDependencyUpdateOne { + if s != nil { + hduo.SetHostID(*s) + } + return hduo +} + // SetNetworkID sets the "network_id" field. func (hduo *HostDependencyUpdateOne) SetNetworkID(s string) *HostDependencyUpdateOne { hduo.mutation.SetNetworkID(s) return hduo } +// SetNillableNetworkID sets the "network_id" field if the given value is not nil. +func (hduo *HostDependencyUpdateOne) SetNillableNetworkID(s *string) *HostDependencyUpdateOne { + if s != nil { + hduo.SetNetworkID(*s) + } + return hduo +} + // SetHostDependencyToDependOnHostID sets the "HostDependencyToDependOnHost" edge to the Host entity by ID. func (hduo *HostDependencyUpdateOne) SetHostDependencyToDependOnHostID(id uuid.UUID) *HostDependencyUpdateOne { hduo.mutation.SetHostDependencyToDependOnHostID(id) @@ -510,6 +475,12 @@ func (hduo *HostDependencyUpdateOne) ClearHostDependencyToEnvironment() *HostDep return hduo } +// Where appends a list predicates to the HostDependencyUpdate builder. +func (hduo *HostDependencyUpdateOne) Where(ps ...predicate.HostDependency) *HostDependencyUpdateOne { + hduo.mutation.Where(ps...) + return hduo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (hduo *HostDependencyUpdateOne) Select(field string, fields ...string) *HostDependencyUpdateOne { @@ -519,34 +490,7 @@ func (hduo *HostDependencyUpdateOne) Select(field string, fields ...string) *Hos // Save executes the query and returns the updated HostDependency entity. func (hduo *HostDependencyUpdateOne) Save(ctx context.Context) (*HostDependency, error) { - var ( - err error - node *HostDependency - ) - if len(hduo.hooks) == 0 { - node, err = hduo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*HostDependencyMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - hduo.mutation = mutation - node, err = hduo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(hduo.hooks) - 1; i >= 0; i-- { - if hduo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = hduo.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, hduo.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, hduo.sqlSave, hduo.mutation, hduo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -572,16 +516,7 @@ func (hduo *HostDependencyUpdateOne) ExecX(ctx context.Context) { } func (hduo *HostDependencyUpdateOne) sqlSave(ctx context.Context) (_node *HostDependency, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: hostdependency.Table, - Columns: hostdependency.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: hostdependency.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(hostdependency.Table, hostdependency.Columns, sqlgraph.NewFieldSpec(hostdependency.FieldID, field.TypeUUID)) id, ok := hduo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "HostDependency.id" for update`)} @@ -607,18 +542,10 @@ func (hduo *HostDependencyUpdateOne) sqlSave(ctx context.Context) (_node *HostDe } } if value, ok := hduo.mutation.HostID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: hostdependency.FieldHostID, - }) + _spec.SetField(hostdependency.FieldHostID, field.TypeString, value) } if value, ok := hduo.mutation.NetworkID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: hostdependency.FieldNetworkID, - }) + _spec.SetField(hostdependency.FieldNetworkID, field.TypeString, value) } if hduo.mutation.HostDependencyToDependOnHostCleared() { edge := &sqlgraph.EdgeSpec{ @@ -628,10 +555,7 @@ func (hduo *HostDependencyUpdateOne) sqlSave(ctx context.Context) (_node *HostDe Columns: []string{hostdependency.HostDependencyToDependOnHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: host.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(host.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -644,10 +568,7 @@ func (hduo *HostDependencyUpdateOne) sqlSave(ctx context.Context) (_node *HostDe Columns: []string{hostdependency.HostDependencyToDependOnHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: host.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(host.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -663,10 +584,7 @@ func (hduo *HostDependencyUpdateOne) sqlSave(ctx context.Context) (_node *HostDe Columns: []string{hostdependency.HostDependencyToDependByHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: host.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(host.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -679,10 +597,7 @@ func (hduo *HostDependencyUpdateOne) sqlSave(ctx context.Context) (_node *HostDe Columns: []string{hostdependency.HostDependencyToDependByHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: host.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(host.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -698,10 +613,7 @@ func (hduo *HostDependencyUpdateOne) sqlSave(ctx context.Context) (_node *HostDe Columns: []string{hostdependency.HostDependencyToNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: network.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(network.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -714,10 +626,7 @@ func (hduo *HostDependencyUpdateOne) sqlSave(ctx context.Context) (_node *HostDe Columns: []string{hostdependency.HostDependencyToNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: network.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(network.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -733,10 +642,7 @@ func (hduo *HostDependencyUpdateOne) sqlSave(ctx context.Context) (_node *HostDe Columns: []string{hostdependency.HostDependencyToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -749,10 +655,7 @@ func (hduo *HostDependencyUpdateOne) sqlSave(ctx context.Context) (_node *HostDe Columns: []string{hostdependency.HostDependencyToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -767,9 +670,10 @@ func (hduo *HostDependencyUpdateOne) sqlSave(ctx context.Context) (_node *HostDe if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{hostdependency.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } + hduo.mutation.done = true return _node, nil } diff --git a/ent/identity.go b/ent/identity.go index 7bf66c1e..3e6698b9 100755 --- a/ent/identity.go +++ b/ent/identity.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -7,6 +7,7 @@ import ( "fmt" "strings" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/gen0cide/laforge/ent/environment" "github.com/gen0cide/laforge/ent/identity" @@ -18,8 +19,8 @@ type Identity struct { config ` json:"-"` // ID of the ent. ID uuid.UUID `json:"id,omitempty"` - // HclID holds the value of the "hcl_id" field. - HclID string `json:"hcl_id,omitempty" hcl:"id,label"` + // HCLID holds the value of the "hcl_id" field. + HCLID string `json:"hcl_id,omitempty" hcl:"id,label"` // FirstName holds the value of the "first_name" field. FirstName string `json:"first_name,omitempty" hcl:"firstname,attr"` // LastName holds the value of the "last_name" field. @@ -40,11 +41,13 @@ type Identity struct { // The values are being populated by the IdentityQuery when eager-loading is set. Edges IdentityEdges `json:"edges"` + // vvvvvvvvvvvv CUSTOM vvvvvvvvvvvv // Edges put into the main struct to be loaded via hcl // IdentityToEnvironment holds the value of the IdentityToEnvironment edge. HCLIdentityToEnvironment *Environment `json:"IdentityToEnvironment,omitempty"` - // + // ^^^^^^^^^^^^ CUSTOM ^^^^^^^^^^^^^ environment_environment_to_identity *uuid.UUID + selectValues sql.SelectValues } // IdentityEdges holds the relations/edges for other nodes in the graph. @@ -54,6 +57,8 @@ type IdentityEdges struct { // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. loadedTypes [1]bool + // totalCount holds the count of the edges above. + totalCount [1]map[string]int } // IdentityToEnvironmentOrErr returns the IdentityToEnvironment value or an error if the edge @@ -61,8 +66,7 @@ type IdentityEdges struct { func (e IdentityEdges) IdentityToEnvironmentOrErr() (*Environment, error) { if e.loadedTypes[0] { if e.IdentityToEnvironment == nil { - // The edge IdentityToEnvironment was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: environment.Label} } return e.IdentityToEnvironment, nil @@ -71,20 +75,20 @@ func (e IdentityEdges) IdentityToEnvironmentOrErr() (*Environment, error) { } // scanValues returns the types for scanning values from sql.Rows. -func (*Identity) scanValues(columns []string) ([]interface{}, error) { - values := make([]interface{}, len(columns)) +func (*Identity) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) for i := range columns { switch columns[i] { case identity.FieldVars, identity.FieldTags: values[i] = new([]byte) - case identity.FieldHclID, identity.FieldFirstName, identity.FieldLastName, identity.FieldEmail, identity.FieldPassword, identity.FieldDescription, identity.FieldAvatarFile: + case identity.FieldHCLID, identity.FieldFirstName, identity.FieldLastName, identity.FieldEmail, identity.FieldPassword, identity.FieldDescription, identity.FieldAvatarFile: values[i] = new(sql.NullString) case identity.FieldID: values[i] = new(uuid.UUID) case identity.ForeignKeys[0]: // environment_environment_to_identity values[i] = &sql.NullScanner{S: new(uuid.UUID)} default: - return nil, fmt.Errorf("unexpected column %q for type Identity", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -92,7 +96,7 @@ func (*Identity) scanValues(columns []string) ([]interface{}, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the Identity fields. -func (i *Identity) assignValues(columns []string, values []interface{}) error { +func (i *Identity) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -104,11 +108,11 @@ func (i *Identity) assignValues(columns []string, values []interface{}) error { } else if value != nil { i.ID = *value } - case identity.FieldHclID: + case identity.FieldHCLID: if value, ok := values[j].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field hcl_id", values[j]) } else if value.Valid { - i.HclID = value.String + i.HCLID = value.String } case identity.FieldFirstName: if value, ok := values[j].(*sql.NullString); !ok { @@ -169,31 +173,39 @@ func (i *Identity) assignValues(columns []string, values []interface{}) error { i.environment_environment_to_identity = new(uuid.UUID) *i.environment_environment_to_identity = *value.S.(*uuid.UUID) } + default: + i.selectValues.Set(columns[j], values[j]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the Identity. +// This includes values selected through modifiers, order, etc. +func (i *Identity) Value(name string) (ent.Value, error) { + return i.selectValues.Get(name) +} + // QueryIdentityToEnvironment queries the "IdentityToEnvironment" edge of the Identity entity. func (i *Identity) QueryIdentityToEnvironment() *EnvironmentQuery { - return (&IdentityClient{config: i.config}).QueryIdentityToEnvironment(i) + return NewIdentityClient(i.config).QueryIdentityToEnvironment(i) } // Update returns a builder for updating this Identity. // Note that you need to call Identity.Unwrap() before calling this method if this Identity // was returned from a transaction, and the transaction was committed or rolled back. func (i *Identity) Update() *IdentityUpdateOne { - return (&IdentityClient{config: i.config}).UpdateOne(i) + return NewIdentityClient(i.config).UpdateOne(i) } // Unwrap unwraps the Identity entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. func (i *Identity) Unwrap() *Identity { - tx, ok := i.config.driver.(*txDriver) + _tx, ok := i.config.driver.(*txDriver) if !ok { panic("ent: Identity is not a transactional entity") } - i.config.driver = tx.drv + i.config.driver = _tx.drv return i } @@ -201,24 +213,32 @@ func (i *Identity) Unwrap() *Identity { func (i *Identity) String() string { var builder strings.Builder builder.WriteString("Identity(") - builder.WriteString(fmt.Sprintf("id=%v", i.ID)) - builder.WriteString(", hcl_id=") - builder.WriteString(i.HclID) - builder.WriteString(", first_name=") + builder.WriteString(fmt.Sprintf("id=%v, ", i.ID)) + builder.WriteString("hcl_id=") + builder.WriteString(i.HCLID) + builder.WriteString(", ") + builder.WriteString("first_name=") builder.WriteString(i.FirstName) - builder.WriteString(", last_name=") + builder.WriteString(", ") + builder.WriteString("last_name=") builder.WriteString(i.LastName) - builder.WriteString(", email=") + builder.WriteString(", ") + builder.WriteString("email=") builder.WriteString(i.Email) - builder.WriteString(", password=") + builder.WriteString(", ") + builder.WriteString("password=") builder.WriteString(i.Password) - builder.WriteString(", description=") + builder.WriteString(", ") + builder.WriteString("description=") builder.WriteString(i.Description) - builder.WriteString(", avatar_file=") + builder.WriteString(", ") + builder.WriteString("avatar_file=") builder.WriteString(i.AvatarFile) - builder.WriteString(", vars=") + builder.WriteString(", ") + builder.WriteString("vars=") builder.WriteString(fmt.Sprintf("%v", i.Vars)) - builder.WriteString(", tags=") + builder.WriteString(", ") + builder.WriteString("tags=") builder.WriteString(fmt.Sprintf("%v", i.Tags)) builder.WriteByte(')') return builder.String() @@ -226,9 +246,3 @@ func (i *Identity) String() string { // Identities is a parsable slice of Identity. type Identities []*Identity - -func (i Identities) config(cfg config) { - for _i := range i { - i[_i].config = cfg - } -} diff --git a/ent/identity/identity.go b/ent/identity/identity.go index 4947f62a..d62f359a 100755 --- a/ent/identity/identity.go +++ b/ent/identity/identity.go @@ -1,8 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package identity import ( + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -11,8 +13,8 @@ const ( Label = "identity" // FieldID holds the string denoting the id field in the database. FieldID = "id" - // FieldHclID holds the string denoting the hcl_id field in the database. - FieldHclID = "hcl_id" + // FieldHCLID holds the string denoting the hcl_id field in the database. + FieldHCLID = "hcl_id" // FieldFirstName holds the string denoting the first_name field in the database. FieldFirstName = "first_name" // FieldLastName holds the string denoting the last_name field in the database. @@ -45,7 +47,7 @@ const ( // Columns holds all SQL columns for identity fields. var Columns = []string{ FieldID, - FieldHclID, + FieldHCLID, FieldFirstName, FieldLastName, FieldEmail, @@ -81,3 +83,60 @@ var ( // DefaultID holds the default value on creation for the "id" field. DefaultID func() uuid.UUID ) + +// OrderOption defines the ordering options for the Identity queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByHCLID orders the results by the hcl_id field. +func ByHCLID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldHCLID, opts...).ToFunc() +} + +// ByFirstName orders the results by the first_name field. +func ByFirstName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldFirstName, opts...).ToFunc() +} + +// ByLastName orders the results by the last_name field. +func ByLastName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldLastName, opts...).ToFunc() +} + +// ByEmail orders the results by the email field. +func ByEmail(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldEmail, opts...).ToFunc() +} + +// ByPassword orders the results by the password field. +func ByPassword(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPassword, opts...).ToFunc() +} + +// ByDescription orders the results by the description field. +func ByDescription(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDescription, opts...).ToFunc() +} + +// ByAvatarFile orders the results by the avatar_file field. +func ByAvatarFile(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAvatarFile, opts...).ToFunc() +} + +// ByIdentityToEnvironmentField orders the results by IdentityToEnvironment field. +func ByIdentityToEnvironmentField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newIdentityToEnvironmentStep(), sql.OrderByField(field, opts...)) + } +} +func newIdentityToEnvironmentStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(IdentityToEnvironmentInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, IdentityToEnvironmentTable, IdentityToEnvironmentColumn), + ) +} diff --git a/ent/identity/where.go b/ent/identity/where.go index 076575b5..cd21462d 100755 --- a/ent/identity/where.go +++ b/ent/identity/where.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package identity @@ -11,911 +11,537 @@ import ( // ID filters vertices based on their ID field. func ID(id uuid.UUID) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Identity(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id uuid.UUID) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Identity(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id uuid.UUID) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.Identity(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...uuid.UUID) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.Identity(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...uuid.UUID) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.Identity(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id uuid.UUID) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.Identity(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id uuid.UUID) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.Identity(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id uuid.UUID) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.Identity(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id uuid.UUID) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.Identity(sql.FieldLTE(FieldID, id)) } -// HclID applies equality check predicate on the "hcl_id" field. It's identical to HclIDEQ. -func HclID(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldHclID), v)) - }) +// HCLID applies equality check predicate on the "hcl_id" field. It's identical to HCLIDEQ. +func HCLID(v string) predicate.Identity { + return predicate.Identity(sql.FieldEQ(FieldHCLID, v)) } // FirstName applies equality check predicate on the "first_name" field. It's identical to FirstNameEQ. func FirstName(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldFirstName), v)) - }) + return predicate.Identity(sql.FieldEQ(FieldFirstName, v)) } // LastName applies equality check predicate on the "last_name" field. It's identical to LastNameEQ. func LastName(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldLastName), v)) - }) + return predicate.Identity(sql.FieldEQ(FieldLastName, v)) } // Email applies equality check predicate on the "email" field. It's identical to EmailEQ. func Email(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldEmail), v)) - }) + return predicate.Identity(sql.FieldEQ(FieldEmail, v)) } // Password applies equality check predicate on the "password" field. It's identical to PasswordEQ. func Password(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldPassword), v)) - }) + return predicate.Identity(sql.FieldEQ(FieldPassword, v)) } // Description applies equality check predicate on the "description" field. It's identical to DescriptionEQ. func Description(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldDescription), v)) - }) + return predicate.Identity(sql.FieldEQ(FieldDescription, v)) } // AvatarFile applies equality check predicate on the "avatar_file" field. It's identical to AvatarFileEQ. func AvatarFile(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldAvatarFile), v)) - }) + return predicate.Identity(sql.FieldEQ(FieldAvatarFile, v)) } -// HclIDEQ applies the EQ predicate on the "hcl_id" field. -func HclIDEQ(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldHclID), v)) - }) +// HCLIDEQ applies the EQ predicate on the "hcl_id" field. +func HCLIDEQ(v string) predicate.Identity { + return predicate.Identity(sql.FieldEQ(FieldHCLID, v)) } -// HclIDNEQ applies the NEQ predicate on the "hcl_id" field. -func HclIDNEQ(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldHclID), v)) - }) +// HCLIDNEQ applies the NEQ predicate on the "hcl_id" field. +func HCLIDNEQ(v string) predicate.Identity { + return predicate.Identity(sql.FieldNEQ(FieldHCLID, v)) } -// HclIDIn applies the In predicate on the "hcl_id" field. -func HclIDIn(vs ...string) predicate.Identity { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Identity(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldHclID), v...)) - }) +// HCLIDIn applies the In predicate on the "hcl_id" field. +func HCLIDIn(vs ...string) predicate.Identity { + return predicate.Identity(sql.FieldIn(FieldHCLID, vs...)) } -// HclIDNotIn applies the NotIn predicate on the "hcl_id" field. -func HclIDNotIn(vs ...string) predicate.Identity { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Identity(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldHclID), v...)) - }) +// HCLIDNotIn applies the NotIn predicate on the "hcl_id" field. +func HCLIDNotIn(vs ...string) predicate.Identity { + return predicate.Identity(sql.FieldNotIn(FieldHCLID, vs...)) } -// HclIDGT applies the GT predicate on the "hcl_id" field. -func HclIDGT(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldHclID), v)) - }) +// HCLIDGT applies the GT predicate on the "hcl_id" field. +func HCLIDGT(v string) predicate.Identity { + return predicate.Identity(sql.FieldGT(FieldHCLID, v)) } -// HclIDGTE applies the GTE predicate on the "hcl_id" field. -func HclIDGTE(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldHclID), v)) - }) +// HCLIDGTE applies the GTE predicate on the "hcl_id" field. +func HCLIDGTE(v string) predicate.Identity { + return predicate.Identity(sql.FieldGTE(FieldHCLID, v)) } -// HclIDLT applies the LT predicate on the "hcl_id" field. -func HclIDLT(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldHclID), v)) - }) +// HCLIDLT applies the LT predicate on the "hcl_id" field. +func HCLIDLT(v string) predicate.Identity { + return predicate.Identity(sql.FieldLT(FieldHCLID, v)) } -// HclIDLTE applies the LTE predicate on the "hcl_id" field. -func HclIDLTE(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldHclID), v)) - }) +// HCLIDLTE applies the LTE predicate on the "hcl_id" field. +func HCLIDLTE(v string) predicate.Identity { + return predicate.Identity(sql.FieldLTE(FieldHCLID, v)) } -// HclIDContains applies the Contains predicate on the "hcl_id" field. -func HclIDContains(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldHclID), v)) - }) +// HCLIDContains applies the Contains predicate on the "hcl_id" field. +func HCLIDContains(v string) predicate.Identity { + return predicate.Identity(sql.FieldContains(FieldHCLID, v)) } -// HclIDHasPrefix applies the HasPrefix predicate on the "hcl_id" field. -func HclIDHasPrefix(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldHclID), v)) - }) +// HCLIDHasPrefix applies the HasPrefix predicate on the "hcl_id" field. +func HCLIDHasPrefix(v string) predicate.Identity { + return predicate.Identity(sql.FieldHasPrefix(FieldHCLID, v)) } -// HclIDHasSuffix applies the HasSuffix predicate on the "hcl_id" field. -func HclIDHasSuffix(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldHclID), v)) - }) +// HCLIDHasSuffix applies the HasSuffix predicate on the "hcl_id" field. +func HCLIDHasSuffix(v string) predicate.Identity { + return predicate.Identity(sql.FieldHasSuffix(FieldHCLID, v)) } -// HclIDEqualFold applies the EqualFold predicate on the "hcl_id" field. -func HclIDEqualFold(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldHclID), v)) - }) +// HCLIDEqualFold applies the EqualFold predicate on the "hcl_id" field. +func HCLIDEqualFold(v string) predicate.Identity { + return predicate.Identity(sql.FieldEqualFold(FieldHCLID, v)) } -// HclIDContainsFold applies the ContainsFold predicate on the "hcl_id" field. -func HclIDContainsFold(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldHclID), v)) - }) +// HCLIDContainsFold applies the ContainsFold predicate on the "hcl_id" field. +func HCLIDContainsFold(v string) predicate.Identity { + return predicate.Identity(sql.FieldContainsFold(FieldHCLID, v)) } // FirstNameEQ applies the EQ predicate on the "first_name" field. func FirstNameEQ(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldFirstName), v)) - }) + return predicate.Identity(sql.FieldEQ(FieldFirstName, v)) } // FirstNameNEQ applies the NEQ predicate on the "first_name" field. func FirstNameNEQ(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldFirstName), v)) - }) + return predicate.Identity(sql.FieldNEQ(FieldFirstName, v)) } // FirstNameIn applies the In predicate on the "first_name" field. func FirstNameIn(vs ...string) predicate.Identity { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Identity(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldFirstName), v...)) - }) + return predicate.Identity(sql.FieldIn(FieldFirstName, vs...)) } // FirstNameNotIn applies the NotIn predicate on the "first_name" field. func FirstNameNotIn(vs ...string) predicate.Identity { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Identity(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldFirstName), v...)) - }) + return predicate.Identity(sql.FieldNotIn(FieldFirstName, vs...)) } // FirstNameGT applies the GT predicate on the "first_name" field. func FirstNameGT(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldFirstName), v)) - }) + return predicate.Identity(sql.FieldGT(FieldFirstName, v)) } // FirstNameGTE applies the GTE predicate on the "first_name" field. func FirstNameGTE(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldFirstName), v)) - }) + return predicate.Identity(sql.FieldGTE(FieldFirstName, v)) } // FirstNameLT applies the LT predicate on the "first_name" field. func FirstNameLT(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldFirstName), v)) - }) + return predicate.Identity(sql.FieldLT(FieldFirstName, v)) } // FirstNameLTE applies the LTE predicate on the "first_name" field. func FirstNameLTE(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldFirstName), v)) - }) + return predicate.Identity(sql.FieldLTE(FieldFirstName, v)) } // FirstNameContains applies the Contains predicate on the "first_name" field. func FirstNameContains(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldFirstName), v)) - }) + return predicate.Identity(sql.FieldContains(FieldFirstName, v)) } // FirstNameHasPrefix applies the HasPrefix predicate on the "first_name" field. func FirstNameHasPrefix(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldFirstName), v)) - }) + return predicate.Identity(sql.FieldHasPrefix(FieldFirstName, v)) } // FirstNameHasSuffix applies the HasSuffix predicate on the "first_name" field. func FirstNameHasSuffix(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldFirstName), v)) - }) + return predicate.Identity(sql.FieldHasSuffix(FieldFirstName, v)) } // FirstNameEqualFold applies the EqualFold predicate on the "first_name" field. func FirstNameEqualFold(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldFirstName), v)) - }) + return predicate.Identity(sql.FieldEqualFold(FieldFirstName, v)) } // FirstNameContainsFold applies the ContainsFold predicate on the "first_name" field. func FirstNameContainsFold(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldFirstName), v)) - }) + return predicate.Identity(sql.FieldContainsFold(FieldFirstName, v)) } // LastNameEQ applies the EQ predicate on the "last_name" field. func LastNameEQ(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldLastName), v)) - }) + return predicate.Identity(sql.FieldEQ(FieldLastName, v)) } // LastNameNEQ applies the NEQ predicate on the "last_name" field. func LastNameNEQ(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldLastName), v)) - }) + return predicate.Identity(sql.FieldNEQ(FieldLastName, v)) } // LastNameIn applies the In predicate on the "last_name" field. func LastNameIn(vs ...string) predicate.Identity { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Identity(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldLastName), v...)) - }) + return predicate.Identity(sql.FieldIn(FieldLastName, vs...)) } // LastNameNotIn applies the NotIn predicate on the "last_name" field. func LastNameNotIn(vs ...string) predicate.Identity { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Identity(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldLastName), v...)) - }) + return predicate.Identity(sql.FieldNotIn(FieldLastName, vs...)) } // LastNameGT applies the GT predicate on the "last_name" field. func LastNameGT(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldLastName), v)) - }) + return predicate.Identity(sql.FieldGT(FieldLastName, v)) } // LastNameGTE applies the GTE predicate on the "last_name" field. func LastNameGTE(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldLastName), v)) - }) + return predicate.Identity(sql.FieldGTE(FieldLastName, v)) } // LastNameLT applies the LT predicate on the "last_name" field. func LastNameLT(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldLastName), v)) - }) + return predicate.Identity(sql.FieldLT(FieldLastName, v)) } // LastNameLTE applies the LTE predicate on the "last_name" field. func LastNameLTE(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldLastName), v)) - }) + return predicate.Identity(sql.FieldLTE(FieldLastName, v)) } // LastNameContains applies the Contains predicate on the "last_name" field. func LastNameContains(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldLastName), v)) - }) + return predicate.Identity(sql.FieldContains(FieldLastName, v)) } // LastNameHasPrefix applies the HasPrefix predicate on the "last_name" field. func LastNameHasPrefix(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldLastName), v)) - }) + return predicate.Identity(sql.FieldHasPrefix(FieldLastName, v)) } // LastNameHasSuffix applies the HasSuffix predicate on the "last_name" field. func LastNameHasSuffix(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldLastName), v)) - }) + return predicate.Identity(sql.FieldHasSuffix(FieldLastName, v)) } // LastNameEqualFold applies the EqualFold predicate on the "last_name" field. func LastNameEqualFold(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldLastName), v)) - }) + return predicate.Identity(sql.FieldEqualFold(FieldLastName, v)) } // LastNameContainsFold applies the ContainsFold predicate on the "last_name" field. func LastNameContainsFold(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldLastName), v)) - }) + return predicate.Identity(sql.FieldContainsFold(FieldLastName, v)) } // EmailEQ applies the EQ predicate on the "email" field. func EmailEQ(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldEmail), v)) - }) + return predicate.Identity(sql.FieldEQ(FieldEmail, v)) } // EmailNEQ applies the NEQ predicate on the "email" field. func EmailNEQ(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldEmail), v)) - }) + return predicate.Identity(sql.FieldNEQ(FieldEmail, v)) } // EmailIn applies the In predicate on the "email" field. func EmailIn(vs ...string) predicate.Identity { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Identity(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldEmail), v...)) - }) + return predicate.Identity(sql.FieldIn(FieldEmail, vs...)) } // EmailNotIn applies the NotIn predicate on the "email" field. func EmailNotIn(vs ...string) predicate.Identity { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Identity(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldEmail), v...)) - }) + return predicate.Identity(sql.FieldNotIn(FieldEmail, vs...)) } // EmailGT applies the GT predicate on the "email" field. func EmailGT(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldEmail), v)) - }) + return predicate.Identity(sql.FieldGT(FieldEmail, v)) } // EmailGTE applies the GTE predicate on the "email" field. func EmailGTE(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldEmail), v)) - }) + return predicate.Identity(sql.FieldGTE(FieldEmail, v)) } // EmailLT applies the LT predicate on the "email" field. func EmailLT(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldEmail), v)) - }) + return predicate.Identity(sql.FieldLT(FieldEmail, v)) } // EmailLTE applies the LTE predicate on the "email" field. func EmailLTE(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldEmail), v)) - }) + return predicate.Identity(sql.FieldLTE(FieldEmail, v)) } // EmailContains applies the Contains predicate on the "email" field. func EmailContains(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldEmail), v)) - }) + return predicate.Identity(sql.FieldContains(FieldEmail, v)) } // EmailHasPrefix applies the HasPrefix predicate on the "email" field. func EmailHasPrefix(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldEmail), v)) - }) + return predicate.Identity(sql.FieldHasPrefix(FieldEmail, v)) } // EmailHasSuffix applies the HasSuffix predicate on the "email" field. func EmailHasSuffix(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldEmail), v)) - }) + return predicate.Identity(sql.FieldHasSuffix(FieldEmail, v)) } // EmailEqualFold applies the EqualFold predicate on the "email" field. func EmailEqualFold(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldEmail), v)) - }) + return predicate.Identity(sql.FieldEqualFold(FieldEmail, v)) } // EmailContainsFold applies the ContainsFold predicate on the "email" field. func EmailContainsFold(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldEmail), v)) - }) + return predicate.Identity(sql.FieldContainsFold(FieldEmail, v)) } // PasswordEQ applies the EQ predicate on the "password" field. func PasswordEQ(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldPassword), v)) - }) + return predicate.Identity(sql.FieldEQ(FieldPassword, v)) } // PasswordNEQ applies the NEQ predicate on the "password" field. func PasswordNEQ(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldPassword), v)) - }) + return predicate.Identity(sql.FieldNEQ(FieldPassword, v)) } // PasswordIn applies the In predicate on the "password" field. func PasswordIn(vs ...string) predicate.Identity { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Identity(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldPassword), v...)) - }) + return predicate.Identity(sql.FieldIn(FieldPassword, vs...)) } // PasswordNotIn applies the NotIn predicate on the "password" field. func PasswordNotIn(vs ...string) predicate.Identity { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Identity(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldPassword), v...)) - }) + return predicate.Identity(sql.FieldNotIn(FieldPassword, vs...)) } // PasswordGT applies the GT predicate on the "password" field. func PasswordGT(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldPassword), v)) - }) + return predicate.Identity(sql.FieldGT(FieldPassword, v)) } // PasswordGTE applies the GTE predicate on the "password" field. func PasswordGTE(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldPassword), v)) - }) + return predicate.Identity(sql.FieldGTE(FieldPassword, v)) } // PasswordLT applies the LT predicate on the "password" field. func PasswordLT(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldPassword), v)) - }) + return predicate.Identity(sql.FieldLT(FieldPassword, v)) } // PasswordLTE applies the LTE predicate on the "password" field. func PasswordLTE(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldPassword), v)) - }) + return predicate.Identity(sql.FieldLTE(FieldPassword, v)) } // PasswordContains applies the Contains predicate on the "password" field. func PasswordContains(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldPassword), v)) - }) + return predicate.Identity(sql.FieldContains(FieldPassword, v)) } // PasswordHasPrefix applies the HasPrefix predicate on the "password" field. func PasswordHasPrefix(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldPassword), v)) - }) + return predicate.Identity(sql.FieldHasPrefix(FieldPassword, v)) } // PasswordHasSuffix applies the HasSuffix predicate on the "password" field. func PasswordHasSuffix(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldPassword), v)) - }) + return predicate.Identity(sql.FieldHasSuffix(FieldPassword, v)) } // PasswordEqualFold applies the EqualFold predicate on the "password" field. func PasswordEqualFold(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldPassword), v)) - }) + return predicate.Identity(sql.FieldEqualFold(FieldPassword, v)) } // PasswordContainsFold applies the ContainsFold predicate on the "password" field. func PasswordContainsFold(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldPassword), v)) - }) + return predicate.Identity(sql.FieldContainsFold(FieldPassword, v)) } // DescriptionEQ applies the EQ predicate on the "description" field. func DescriptionEQ(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldDescription), v)) - }) + return predicate.Identity(sql.FieldEQ(FieldDescription, v)) } // DescriptionNEQ applies the NEQ predicate on the "description" field. func DescriptionNEQ(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldDescription), v)) - }) + return predicate.Identity(sql.FieldNEQ(FieldDescription, v)) } // DescriptionIn applies the In predicate on the "description" field. func DescriptionIn(vs ...string) predicate.Identity { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Identity(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldDescription), v...)) - }) + return predicate.Identity(sql.FieldIn(FieldDescription, vs...)) } // DescriptionNotIn applies the NotIn predicate on the "description" field. func DescriptionNotIn(vs ...string) predicate.Identity { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Identity(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldDescription), v...)) - }) + return predicate.Identity(sql.FieldNotIn(FieldDescription, vs...)) } // DescriptionGT applies the GT predicate on the "description" field. func DescriptionGT(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldDescription), v)) - }) + return predicate.Identity(sql.FieldGT(FieldDescription, v)) } // DescriptionGTE applies the GTE predicate on the "description" field. func DescriptionGTE(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldDescription), v)) - }) + return predicate.Identity(sql.FieldGTE(FieldDescription, v)) } // DescriptionLT applies the LT predicate on the "description" field. func DescriptionLT(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldDescription), v)) - }) + return predicate.Identity(sql.FieldLT(FieldDescription, v)) } // DescriptionLTE applies the LTE predicate on the "description" field. func DescriptionLTE(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldDescription), v)) - }) + return predicate.Identity(sql.FieldLTE(FieldDescription, v)) } // DescriptionContains applies the Contains predicate on the "description" field. func DescriptionContains(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldDescription), v)) - }) + return predicate.Identity(sql.FieldContains(FieldDescription, v)) } // DescriptionHasPrefix applies the HasPrefix predicate on the "description" field. func DescriptionHasPrefix(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldDescription), v)) - }) + return predicate.Identity(sql.FieldHasPrefix(FieldDescription, v)) } // DescriptionHasSuffix applies the HasSuffix predicate on the "description" field. func DescriptionHasSuffix(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldDescription), v)) - }) + return predicate.Identity(sql.FieldHasSuffix(FieldDescription, v)) } // DescriptionEqualFold applies the EqualFold predicate on the "description" field. func DescriptionEqualFold(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldDescription), v)) - }) + return predicate.Identity(sql.FieldEqualFold(FieldDescription, v)) } // DescriptionContainsFold applies the ContainsFold predicate on the "description" field. func DescriptionContainsFold(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldDescription), v)) - }) + return predicate.Identity(sql.FieldContainsFold(FieldDescription, v)) } // AvatarFileEQ applies the EQ predicate on the "avatar_file" field. func AvatarFileEQ(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldAvatarFile), v)) - }) + return predicate.Identity(sql.FieldEQ(FieldAvatarFile, v)) } // AvatarFileNEQ applies the NEQ predicate on the "avatar_file" field. func AvatarFileNEQ(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldAvatarFile), v)) - }) + return predicate.Identity(sql.FieldNEQ(FieldAvatarFile, v)) } // AvatarFileIn applies the In predicate on the "avatar_file" field. func AvatarFileIn(vs ...string) predicate.Identity { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Identity(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldAvatarFile), v...)) - }) + return predicate.Identity(sql.FieldIn(FieldAvatarFile, vs...)) } // AvatarFileNotIn applies the NotIn predicate on the "avatar_file" field. func AvatarFileNotIn(vs ...string) predicate.Identity { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Identity(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldAvatarFile), v...)) - }) + return predicate.Identity(sql.FieldNotIn(FieldAvatarFile, vs...)) } // AvatarFileGT applies the GT predicate on the "avatar_file" field. func AvatarFileGT(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldAvatarFile), v)) - }) + return predicate.Identity(sql.FieldGT(FieldAvatarFile, v)) } // AvatarFileGTE applies the GTE predicate on the "avatar_file" field. func AvatarFileGTE(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldAvatarFile), v)) - }) + return predicate.Identity(sql.FieldGTE(FieldAvatarFile, v)) } // AvatarFileLT applies the LT predicate on the "avatar_file" field. func AvatarFileLT(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldAvatarFile), v)) - }) + return predicate.Identity(sql.FieldLT(FieldAvatarFile, v)) } // AvatarFileLTE applies the LTE predicate on the "avatar_file" field. func AvatarFileLTE(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldAvatarFile), v)) - }) + return predicate.Identity(sql.FieldLTE(FieldAvatarFile, v)) } // AvatarFileContains applies the Contains predicate on the "avatar_file" field. func AvatarFileContains(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldAvatarFile), v)) - }) + return predicate.Identity(sql.FieldContains(FieldAvatarFile, v)) } // AvatarFileHasPrefix applies the HasPrefix predicate on the "avatar_file" field. func AvatarFileHasPrefix(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldAvatarFile), v)) - }) + return predicate.Identity(sql.FieldHasPrefix(FieldAvatarFile, v)) } // AvatarFileHasSuffix applies the HasSuffix predicate on the "avatar_file" field. func AvatarFileHasSuffix(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldAvatarFile), v)) - }) + return predicate.Identity(sql.FieldHasSuffix(FieldAvatarFile, v)) } // AvatarFileEqualFold applies the EqualFold predicate on the "avatar_file" field. func AvatarFileEqualFold(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldAvatarFile), v)) - }) + return predicate.Identity(sql.FieldEqualFold(FieldAvatarFile, v)) } // AvatarFileContainsFold applies the ContainsFold predicate on the "avatar_file" field. func AvatarFileContainsFold(v string) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldAvatarFile), v)) - }) + return predicate.Identity(sql.FieldContainsFold(FieldAvatarFile, v)) } // HasIdentityToEnvironment applies the HasEdge predicate on the "IdentityToEnvironment" edge. @@ -923,7 +549,6 @@ func HasIdentityToEnvironment() predicate.Identity { return predicate.Identity(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(IdentityToEnvironmentTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, true, IdentityToEnvironmentTable, IdentityToEnvironmentColumn), ) sqlgraph.HasNeighbors(s, step) @@ -933,11 +558,7 @@ func HasIdentityToEnvironment() predicate.Identity { // HasIdentityToEnvironmentWith applies the HasEdge predicate on the "IdentityToEnvironment" edge with a given conditions (other predicates). func HasIdentityToEnvironmentWith(preds ...predicate.Environment) predicate.Identity { return predicate.Identity(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(IdentityToEnvironmentInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, IdentityToEnvironmentTable, IdentityToEnvironmentColumn), - ) + step := newIdentityToEnvironmentStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -948,32 +569,15 @@ func HasIdentityToEnvironmentWith(preds ...predicate.Environment) predicate.Iden // And groups predicates with the AND operator between them. func And(predicates ...predicate.Identity) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Identity(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.Identity) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Identity(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.Identity) predicate.Identity { - return predicate.Identity(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.Identity(sql.NotPredicates(p)) } diff --git a/ent/identity_create.go b/ent/identity_create.go index 52de7140..c4f757d3 100755 --- a/ent/identity_create.go +++ b/ent/identity_create.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -21,9 +21,9 @@ type IdentityCreate struct { hooks []Hook } -// SetHclID sets the "hcl_id" field. -func (ic *IdentityCreate) SetHclID(s string) *IdentityCreate { - ic.mutation.SetHclID(s) +// SetHCLID sets the "hcl_id" field. +func (ic *IdentityCreate) SetHCLID(s string) *IdentityCreate { + ic.mutation.SetHCLID(s) return ic } @@ -115,44 +115,8 @@ func (ic *IdentityCreate) Mutation() *IdentityMutation { // Save creates the Identity in the database. func (ic *IdentityCreate) Save(ctx context.Context) (*Identity, error) { - var ( - err error - node *Identity - ) ic.defaults() - if len(ic.hooks) == 0 { - if err = ic.check(); err != nil { - return nil, err - } - node, err = ic.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*IdentityMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = ic.check(); err != nil { - return nil, err - } - ic.mutation = mutation - if node, err = ic.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(ic.hooks) - 1; i >= 0; i-- { - if ic.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = ic.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, ic.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, ic.sqlSave, ic.mutation, ic.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -187,7 +151,7 @@ func (ic *IdentityCreate) defaults() { // check runs all checks and user-defined validators on the builder. func (ic *IdentityCreate) check() error { - if _, ok := ic.mutation.HclID(); !ok { + if _, ok := ic.mutation.HCLID(); !ok { return &ValidationError{Name: "hcl_id", err: errors.New(`ent: missing required field "Identity.hcl_id"`)} } if _, ok := ic.mutation.FirstName(); !ok { @@ -218,10 +182,13 @@ func (ic *IdentityCreate) check() error { } func (ic *IdentityCreate) sqlSave(ctx context.Context) (*Identity, error) { + if err := ic.check(); err != nil { + return nil, err + } _node, _spec := ic.createSpec() if err := sqlgraph.CreateNode(ctx, ic.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } @@ -232,94 +199,54 @@ func (ic *IdentityCreate) sqlSave(ctx context.Context) (*Identity, error) { return nil, err } } + ic.mutation.id = &_node.ID + ic.mutation.done = true return _node, nil } func (ic *IdentityCreate) createSpec() (*Identity, *sqlgraph.CreateSpec) { var ( _node = &Identity{config: ic.config} - _spec = &sqlgraph.CreateSpec{ - Table: identity.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: identity.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(identity.Table, sqlgraph.NewFieldSpec(identity.FieldID, field.TypeUUID)) ) if id, ok := ic.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } - if value, ok := ic.mutation.HclID(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: identity.FieldHclID, - }) - _node.HclID = value + if value, ok := ic.mutation.HCLID(); ok { + _spec.SetField(identity.FieldHCLID, field.TypeString, value) + _node.HCLID = value } if value, ok := ic.mutation.FirstName(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: identity.FieldFirstName, - }) + _spec.SetField(identity.FieldFirstName, field.TypeString, value) _node.FirstName = value } if value, ok := ic.mutation.LastName(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: identity.FieldLastName, - }) + _spec.SetField(identity.FieldLastName, field.TypeString, value) _node.LastName = value } if value, ok := ic.mutation.Email(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: identity.FieldEmail, - }) + _spec.SetField(identity.FieldEmail, field.TypeString, value) _node.Email = value } if value, ok := ic.mutation.Password(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: identity.FieldPassword, - }) + _spec.SetField(identity.FieldPassword, field.TypeString, value) _node.Password = value } if value, ok := ic.mutation.Description(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: identity.FieldDescription, - }) + _spec.SetField(identity.FieldDescription, field.TypeString, value) _node.Description = value } if value, ok := ic.mutation.AvatarFile(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: identity.FieldAvatarFile, - }) + _spec.SetField(identity.FieldAvatarFile, field.TypeString, value) _node.AvatarFile = value } if value, ok := ic.mutation.Vars(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: identity.FieldVars, - }) + _spec.SetField(identity.FieldVars, field.TypeJSON, value) _node.Vars = value } if value, ok := ic.mutation.Tags(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: identity.FieldTags, - }) + _spec.SetField(identity.FieldTags, field.TypeJSON, value) _node.Tags = value } if nodes := ic.mutation.IdentityToEnvironmentIDs(); len(nodes) > 0 { @@ -330,10 +257,7 @@ func (ic *IdentityCreate) createSpec() (*Identity, *sqlgraph.CreateSpec) { Columns: []string{identity.IdentityToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -348,11 +272,15 @@ func (ic *IdentityCreate) createSpec() (*Identity, *sqlgraph.CreateSpec) { // IdentityCreateBulk is the builder for creating many Identity entities in bulk. type IdentityCreateBulk struct { config + err error builders []*IdentityCreate } // Save creates the Identity entities in the database. func (icb *IdentityCreateBulk) Save(ctx context.Context) ([]*Identity, error) { + if icb.err != nil { + return nil, icb.err + } specs := make([]*sqlgraph.CreateSpec, len(icb.builders)) nodes := make([]*Identity, len(icb.builders)) mutators := make([]Mutator, len(icb.builders)) @@ -369,8 +297,8 @@ func (icb *IdentityCreateBulk) Save(ctx context.Context) ([]*Identity, error) { return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, icb.builders[i+1].mutation) } else { @@ -378,7 +306,7 @@ func (icb *IdentityCreateBulk) Save(ctx context.Context) ([]*Identity, error) { // Invoke the actual operation on the latest mutation in the chain. if err = sqlgraph.BatchCreate(ctx, icb.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } } } diff --git a/ent/identity_delete.go b/ent/identity_delete.go index ea134ee1..2e5d5af8 100755 --- a/ent/identity_delete.go +++ b/ent/identity_delete.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (id *IdentityDelete) Where(ps ...predicate.Identity) *IdentityDelete { // Exec executes the deletion query and returns how many vertices were deleted. func (id *IdentityDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(id.hooks) == 0 { - affected, err = id.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*IdentityMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - id.mutation = mutation - affected, err = id.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(id.hooks) - 1; i >= 0; i-- { - if id.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = id.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, id.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, id.sqlExec, id.mutation, id.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (id *IdentityDelete) ExecX(ctx context.Context) int { } func (id *IdentityDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: identity.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: identity.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(identity.Table, sqlgraph.NewFieldSpec(identity.FieldID, field.TypeUUID)) if ps := id.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -84,7 +48,12 @@ func (id *IdentityDelete) sqlExec(ctx context.Context) (int, error) { } } } - return sqlgraph.DeleteNodes(ctx, id.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, id.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + id.mutation.done = true + return affected, err } // IdentityDeleteOne is the builder for deleting a single Identity entity. @@ -92,6 +61,12 @@ type IdentityDeleteOne struct { id *IdentityDelete } +// Where appends a list predicates to the IdentityDelete builder. +func (ido *IdentityDeleteOne) Where(ps ...predicate.Identity) *IdentityDeleteOne { + ido.id.mutation.Where(ps...) + return ido +} + // Exec executes the deletion query. func (ido *IdentityDeleteOne) Exec(ctx context.Context) error { n, err := ido.id.Exec(ctx) @@ -107,5 +82,7 @@ func (ido *IdentityDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (ido *IdentityDeleteOne) ExecX(ctx context.Context) { - ido.id.ExecX(ctx) + if err := ido.Exec(ctx); err != nil { + panic(err) + } } diff --git a/ent/identity_query.go b/ent/identity_query.go index ecbd4566..e69faace 100755 --- a/ent/identity_query.go +++ b/ent/identity_query.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "errors" "fmt" "math" @@ -20,15 +19,14 @@ import ( // IdentityQuery is the builder for querying Identity entities. type IdentityQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string - predicates []predicate.Identity - // eager-loading edges. + ctx *QueryContext + order []identity.OrderOption + inters []Interceptor + predicates []predicate.Identity withIdentityToEnvironment *EnvironmentQuery withFKs bool + modifiers []func(*sql.Selector) + loadTotal []func(context.Context, []*Identity) error // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -40,34 +38,34 @@ func (iq *IdentityQuery) Where(ps ...predicate.Identity) *IdentityQuery { return iq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (iq *IdentityQuery) Limit(limit int) *IdentityQuery { - iq.limit = &limit + iq.ctx.Limit = &limit return iq } -// Offset adds an offset step to the query. +// Offset to start from. func (iq *IdentityQuery) Offset(offset int) *IdentityQuery { - iq.offset = &offset + iq.ctx.Offset = &offset return iq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (iq *IdentityQuery) Unique(unique bool) *IdentityQuery { - iq.unique = &unique + iq.ctx.Unique = &unique return iq } -// Order adds an order step to the query. -func (iq *IdentityQuery) Order(o ...OrderFunc) *IdentityQuery { +// Order specifies how the records should be ordered. +func (iq *IdentityQuery) Order(o ...identity.OrderOption) *IdentityQuery { iq.order = append(iq.order, o...) return iq } // QueryIdentityToEnvironment chains the current query on the "IdentityToEnvironment" edge. func (iq *IdentityQuery) QueryIdentityToEnvironment() *EnvironmentQuery { - query := &EnvironmentQuery{config: iq.config} + query := (&EnvironmentClient{config: iq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := iq.prepareQuery(ctx); err != nil { return nil, err @@ -90,7 +88,7 @@ func (iq *IdentityQuery) QueryIdentityToEnvironment() *EnvironmentQuery { // First returns the first Identity entity from the query. // Returns a *NotFoundError when no Identity was found. func (iq *IdentityQuery) First(ctx context.Context) (*Identity, error) { - nodes, err := iq.Limit(1).All(ctx) + nodes, err := iq.Limit(1).All(setContextOp(ctx, iq.ctx, "First")) if err != nil { return nil, err } @@ -113,7 +111,7 @@ func (iq *IdentityQuery) FirstX(ctx context.Context) *Identity { // Returns a *NotFoundError when no Identity ID was found. func (iq *IdentityQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = iq.Limit(1).IDs(ctx); err != nil { + if ids, err = iq.Limit(1).IDs(setContextOp(ctx, iq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -136,7 +134,7 @@ func (iq *IdentityQuery) FirstIDX(ctx context.Context) uuid.UUID { // Returns a *NotSingularError when more than one Identity entity is found. // Returns a *NotFoundError when no Identity entities are found. func (iq *IdentityQuery) Only(ctx context.Context) (*Identity, error) { - nodes, err := iq.Limit(2).All(ctx) + nodes, err := iq.Limit(2).All(setContextOp(ctx, iq.ctx, "Only")) if err != nil { return nil, err } @@ -164,7 +162,7 @@ func (iq *IdentityQuery) OnlyX(ctx context.Context) *Identity { // Returns a *NotFoundError when no entities are found. func (iq *IdentityQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = iq.Limit(2).IDs(ctx); err != nil { + if ids, err = iq.Limit(2).IDs(setContextOp(ctx, iq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -189,10 +187,12 @@ func (iq *IdentityQuery) OnlyIDX(ctx context.Context) uuid.UUID { // All executes the query and returns a list of Identities. func (iq *IdentityQuery) All(ctx context.Context) ([]*Identity, error) { + ctx = setContextOp(ctx, iq.ctx, "All") if err := iq.prepareQuery(ctx); err != nil { return nil, err } - return iq.sqlAll(ctx) + qr := querierAll[[]*Identity, *IdentityQuery]() + return withInterceptors[[]*Identity](ctx, iq, qr, iq.inters) } // AllX is like All, but panics if an error occurs. @@ -205,9 +205,12 @@ func (iq *IdentityQuery) AllX(ctx context.Context) []*Identity { } // IDs executes the query and returns a list of Identity IDs. -func (iq *IdentityQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { - var ids []uuid.UUID - if err := iq.Select(identity.FieldID).Scan(ctx, &ids); err != nil { +func (iq *IdentityQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if iq.ctx.Unique == nil && iq.path != nil { + iq.Unique(true) + } + ctx = setContextOp(ctx, iq.ctx, "IDs") + if err = iq.Select(identity.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -224,10 +227,11 @@ func (iq *IdentityQuery) IDsX(ctx context.Context) []uuid.UUID { // Count returns the count of the given query. func (iq *IdentityQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, iq.ctx, "Count") if err := iq.prepareQuery(ctx); err != nil { return 0, err } - return iq.sqlCount(ctx) + return withInterceptors[int](ctx, iq, querierCount[*IdentityQuery](), iq.inters) } // CountX is like Count, but panics if an error occurs. @@ -241,10 +245,15 @@ func (iq *IdentityQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (iq *IdentityQuery) Exist(ctx context.Context) (bool, error) { - if err := iq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, iq.ctx, "Exist") + switch _, err := iq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return iq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -264,22 +273,21 @@ func (iq *IdentityQuery) Clone() *IdentityQuery { } return &IdentityQuery{ config: iq.config, - limit: iq.limit, - offset: iq.offset, - order: append([]OrderFunc{}, iq.order...), + ctx: iq.ctx.Clone(), + order: append([]identity.OrderOption{}, iq.order...), + inters: append([]Interceptor{}, iq.inters...), predicates: append([]predicate.Identity{}, iq.predicates...), withIdentityToEnvironment: iq.withIdentityToEnvironment.Clone(), // clone intermediate query. - sql: iq.sql.Clone(), - path: iq.path, - unique: iq.unique, + sql: iq.sql.Clone(), + path: iq.path, } } // WithIdentityToEnvironment tells the query-builder to eager-load the nodes that are connected to // the "IdentityToEnvironment" edge. The optional arguments are used to configure the query builder of the edge. func (iq *IdentityQuery) WithIdentityToEnvironment(opts ...func(*EnvironmentQuery)) *IdentityQuery { - query := &EnvironmentQuery{config: iq.config} + query := (&EnvironmentClient{config: iq.config}).Query() for _, opt := range opts { opt(query) } @@ -293,25 +301,21 @@ func (iq *IdentityQuery) WithIdentityToEnvironment(opts ...func(*EnvironmentQuer // Example: // // var v []struct { -// HclID string `json:"hcl_id,omitempty" hcl:"id,label"` +// HCLID string `json:"hcl_id,omitempty" hcl:"id,label"` // Count int `json:"count,omitempty"` // } // // client.Identity.Query(). -// GroupBy(identity.FieldHclID). +// GroupBy(identity.FieldHCLID). // Aggregate(ent.Count()). // Scan(ctx, &v) -// func (iq *IdentityQuery) GroupBy(field string, fields ...string) *IdentityGroupBy { - group := &IdentityGroupBy{config: iq.config} - group.fields = append([]string{field}, fields...) - group.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := iq.prepareQuery(ctx); err != nil { - return nil, err - } - return iq.sqlQuery(ctx), nil - } - return group + iq.ctx.Fields = append([]string{field}, fields...) + grbuild := &IdentityGroupBy{build: iq} + grbuild.flds = &iq.ctx.Fields + grbuild.label = identity.Label + grbuild.scan = grbuild.Scan + return grbuild } // Select allows the selection one or more fields/columns for the given query, @@ -320,20 +324,37 @@ func (iq *IdentityQuery) GroupBy(field string, fields ...string) *IdentityGroupB // Example: // // var v []struct { -// HclID string `json:"hcl_id,omitempty" hcl:"id,label"` +// HCLID string `json:"hcl_id,omitempty" hcl:"id,label"` // } // // client.Identity.Query(). -// Select(identity.FieldHclID). +// Select(identity.FieldHCLID). // Scan(ctx, &v) -// func (iq *IdentityQuery) Select(fields ...string) *IdentitySelect { - iq.fields = append(iq.fields, fields...) - return &IdentitySelect{IdentityQuery: iq} + iq.ctx.Fields = append(iq.ctx.Fields, fields...) + sbuild := &IdentitySelect{IdentityQuery: iq} + sbuild.label = identity.Label + sbuild.flds, sbuild.scan = &iq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a IdentitySelect configured with the given aggregations. +func (iq *IdentityQuery) Aggregate(fns ...AggregateFunc) *IdentitySelect { + return iq.Select().Aggregate(fns...) } func (iq *IdentityQuery) prepareQuery(ctx context.Context) error { - for _, f := range iq.fields { + for _, inter := range iq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, iq); err != nil { + return err + } + } + } + for _, f := range iq.ctx.Fields { if !identity.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -348,7 +369,7 @@ func (iq *IdentityQuery) prepareQuery(ctx context.Context) error { return nil } -func (iq *IdentityQuery) sqlAll(ctx context.Context) ([]*Identity, error) { +func (iq *IdentityQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Identity, error) { var ( nodes = []*Identity{} withFKs = iq.withFKs @@ -363,92 +384,95 @@ func (iq *IdentityQuery) sqlAll(ctx context.Context) ([]*Identity, error) { if withFKs { _spec.Node.Columns = append(_spec.Node.Columns, identity.ForeignKeys...) } - _spec.ScanValues = func(columns []string) ([]interface{}, error) { + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Identity).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { node := &Identity{config: iq.config} nodes = append(nodes, node) - return node.scanValues(columns) - } - _spec.Assign = func(columns []string, values []interface{}) error { - if len(nodes) == 0 { - return fmt.Errorf("ent: Assign called without calling ScanValues") - } - node := nodes[len(nodes)-1] node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(iq.modifiers) > 0 { + _spec.Modifiers = iq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } if err := sqlgraph.QueryNodes(ctx, iq.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := iq.withIdentityToEnvironment; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*Identity) - for i := range nodes { - if nodes[i].environment_environment_to_identity == nil { - continue - } - fk := *nodes[i].environment_environment_to_identity - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) - } - query.Where(environment.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + if err := iq.loadIdentityToEnvironment(ctx, query, nodes, nil, + func(n *Identity, e *Environment) { n.Edges.IdentityToEnvironment = e }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "environment_environment_to_identity" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.IdentityToEnvironment = n - } + } + for i := range iq.loadTotal { + if err := iq.loadTotal[i](ctx, nodes); err != nil { + return nil, err } } - return nodes, nil } -func (iq *IdentityQuery) sqlCount(ctx context.Context) (int, error) { - _spec := iq.querySpec() - _spec.Node.Columns = iq.fields - if len(iq.fields) > 0 { - _spec.Unique = iq.unique != nil && *iq.unique +func (iq *IdentityQuery) loadIdentityToEnvironment(ctx context.Context, query *EnvironmentQuery, nodes []*Identity, init func(*Identity), assign func(*Identity, *Environment)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*Identity) + for i := range nodes { + if nodes[i].environment_environment_to_identity == nil { + continue + } + fk := *nodes[i].environment_environment_to_identity + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) } - return sqlgraph.CountNodes(ctx, iq.driver, _spec) + if len(ids) == 0 { + return nil + } + query.Where(environment.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "environment_environment_to_identity" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil } -func (iq *IdentityQuery) sqlExist(ctx context.Context) (bool, error) { - n, err := iq.sqlCount(ctx) - if err != nil { - return false, fmt.Errorf("ent: check existence: %w", err) +func (iq *IdentityQuery) sqlCount(ctx context.Context) (int, error) { + _spec := iq.querySpec() + if len(iq.modifiers) > 0 { + _spec.Modifiers = iq.modifiers } - return n > 0, nil + _spec.Node.Columns = iq.ctx.Fields + if len(iq.ctx.Fields) > 0 { + _spec.Unique = iq.ctx.Unique != nil && *iq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, iq.driver, _spec) } func (iq *IdentityQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: identity.Table, - Columns: identity.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: identity.FieldID, - }, - }, - From: iq.sql, - Unique: true, - } - if unique := iq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(identity.Table, identity.Columns, sqlgraph.NewFieldSpec(identity.FieldID, field.TypeUUID)) + _spec.From = iq.sql + if unique := iq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if iq.path != nil { + _spec.Unique = true } - if fields := iq.fields; len(fields) > 0 { + if fields := iq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, identity.FieldID) for i := range fields { @@ -464,10 +488,10 @@ func (iq *IdentityQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := iq.limit; limit != nil { + if limit := iq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := iq.offset; offset != nil { + if offset := iq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := iq.order; len(ps) > 0 { @@ -483,7 +507,7 @@ func (iq *IdentityQuery) querySpec() *sqlgraph.QuerySpec { func (iq *IdentityQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(iq.driver.Dialect()) t1 := builder.Table(identity.Table) - columns := iq.fields + columns := iq.ctx.Fields if len(columns) == 0 { columns = identity.Columns } @@ -492,7 +516,7 @@ func (iq *IdentityQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = iq.sql selector.Select(selector.Columns(columns...)...) } - if iq.unique != nil && *iq.unique { + if iq.ctx.Unique != nil && *iq.ctx.Unique { selector.Distinct() } for _, p := range iq.predicates { @@ -501,12 +525,12 @@ func (iq *IdentityQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range iq.order { p(selector) } - if offset := iq.offset; offset != nil { + if offset := iq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := iq.limit; limit != nil { + if limit := iq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -514,12 +538,8 @@ func (iq *IdentityQuery) sqlQuery(ctx context.Context) *sql.Selector { // IdentityGroupBy is the group-by builder for Identity entities. type IdentityGroupBy struct { - config - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) + selector + build *IdentityQuery } // Aggregate adds the given aggregation functions to the group-by query. @@ -528,471 +548,77 @@ func (igb *IdentityGroupBy) Aggregate(fns ...AggregateFunc) *IdentityGroupBy { return igb } -// Scan applies the group-by query and scans the result into the given value. -func (igb *IdentityGroupBy) Scan(ctx context.Context, v interface{}) error { - query, err := igb.path(ctx) - if err != nil { +// Scan applies the selector query and scans the result into the given value. +func (igb *IdentityGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, igb.build.ctx, "GroupBy") + if err := igb.build.prepareQuery(ctx); err != nil { return err } - igb.sql = query - return igb.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (igb *IdentityGroupBy) ScanX(ctx context.Context, v interface{}) { - if err := igb.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from group-by. -// It is only allowed when executing a group-by query with one field. -func (igb *IdentityGroupBy) Strings(ctx context.Context) ([]string, error) { - if len(igb.fields) > 1 { - return nil, errors.New("ent: IdentityGroupBy.Strings is not achievable when grouping more than 1 field") - } - var v []string - if err := igb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (igb *IdentityGroupBy) StringsX(ctx context.Context) []string { - v, err := igb.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (igb *IdentityGroupBy) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = igb.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{identity.Label} - default: - err = fmt.Errorf("ent: IdentityGroupBy.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (igb *IdentityGroupBy) StringX(ctx context.Context) string { - v, err := igb.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from group-by. -// It is only allowed when executing a group-by query with one field. -func (igb *IdentityGroupBy) Ints(ctx context.Context) ([]int, error) { - if len(igb.fields) > 1 { - return nil, errors.New("ent: IdentityGroupBy.Ints is not achievable when grouping more than 1 field") - } - var v []int - if err := igb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (igb *IdentityGroupBy) IntsX(ctx context.Context) []int { - v, err := igb.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (igb *IdentityGroupBy) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = igb.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{identity.Label} - default: - err = fmt.Errorf("ent: IdentityGroupBy.Ints returned %d results when one was expected", len(v)) - } - return + return scanWithInterceptors[*IdentityQuery, *IdentityGroupBy](ctx, igb.build, igb, igb.build.inters, v) } -// IntX is like Int, but panics if an error occurs. -func (igb *IdentityGroupBy) IntX(ctx context.Context) int { - v, err := igb.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from group-by. -// It is only allowed when executing a group-by query with one field. -func (igb *IdentityGroupBy) Float64s(ctx context.Context) ([]float64, error) { - if len(igb.fields) > 1 { - return nil, errors.New("ent: IdentityGroupBy.Float64s is not achievable when grouping more than 1 field") - } - var v []float64 - if err := igb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (igb *IdentityGroupBy) Float64sX(ctx context.Context) []float64 { - v, err := igb.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (igb *IdentityGroupBy) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = igb.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{identity.Label} - default: - err = fmt.Errorf("ent: IdentityGroupBy.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (igb *IdentityGroupBy) Float64X(ctx context.Context) float64 { - v, err := igb.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from group-by. -// It is only allowed when executing a group-by query with one field. -func (igb *IdentityGroupBy) Bools(ctx context.Context) ([]bool, error) { - if len(igb.fields) > 1 { - return nil, errors.New("ent: IdentityGroupBy.Bools is not achievable when grouping more than 1 field") - } - var v []bool - if err := igb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (igb *IdentityGroupBy) BoolsX(ctx context.Context) []bool { - v, err := igb.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (igb *IdentityGroupBy) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = igb.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{identity.Label} - default: - err = fmt.Errorf("ent: IdentityGroupBy.Bools returned %d results when one was expected", len(v)) - } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (igb *IdentityGroupBy) BoolX(ctx context.Context) bool { - v, err := igb.Bool(ctx) - if err != nil { - panic(err) +func (igb *IdentityGroupBy) sqlScan(ctx context.Context, root *IdentityQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(igb.fns)) + for _, fn := range igb.fns { + aggregation = append(aggregation, fn(selector)) } - return v -} - -func (igb *IdentityGroupBy) sqlScan(ctx context.Context, v interface{}) error { - for _, f := range igb.fields { - if !identity.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*igb.flds)+len(igb.fns)) + for _, f := range *igb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := igb.sqlQuery() + selector.GroupBy(selector.Columns(*igb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := igb.driver.Query(ctx, query, args, rows); err != nil { + if err := igb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (igb *IdentityGroupBy) sqlQuery() *sql.Selector { - selector := igb.sql.Select() - aggregation := make([]string, 0, len(igb.fns)) - for _, fn := range igb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(igb.fields)+len(igb.fns)) - for _, f := range igb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(igb.fields...)...) -} - // IdentitySelect is the builder for selecting fields of Identity entities. type IdentitySelect struct { *IdentityQuery - // intermediate query (i.e. traversal path). - sql *sql.Selector + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (is *IdentitySelect) Aggregate(fns ...AggregateFunc) *IdentitySelect { + is.fns = append(is.fns, fns...) + return is } // Scan applies the selector query and scans the result into the given value. -func (is *IdentitySelect) Scan(ctx context.Context, v interface{}) error { +func (is *IdentitySelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, is.ctx, "Select") if err := is.prepareQuery(ctx); err != nil { return err } - is.sql = is.IdentityQuery.sqlQuery(ctx) - return is.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (is *IdentitySelect) ScanX(ctx context.Context, v interface{}) { - if err := is.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from a selector. It is only allowed when selecting one field. -func (is *IdentitySelect) Strings(ctx context.Context) ([]string, error) { - if len(is.fields) > 1 { - return nil, errors.New("ent: IdentitySelect.Strings is not achievable when selecting more than 1 field") - } - var v []string - if err := is.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (is *IdentitySelect) StringsX(ctx context.Context) []string { - v, err := is.Strings(ctx) - if err != nil { - panic(err) - } - return v + return scanWithInterceptors[*IdentityQuery, *IdentitySelect](ctx, is.IdentityQuery, is, is.inters, v) } -// String returns a single string from a selector. It is only allowed when selecting one field. -func (is *IdentitySelect) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = is.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{identity.Label} - default: - err = fmt.Errorf("ent: IdentitySelect.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (is *IdentitySelect) StringX(ctx context.Context) string { - v, err := is.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from a selector. It is only allowed when selecting one field. -func (is *IdentitySelect) Ints(ctx context.Context) ([]int, error) { - if len(is.fields) > 1 { - return nil, errors.New("ent: IdentitySelect.Ints is not achievable when selecting more than 1 field") - } - var v []int - if err := is.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (is *IdentitySelect) IntsX(ctx context.Context) []int { - v, err := is.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a selector. It is only allowed when selecting one field. -func (is *IdentitySelect) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = is.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{identity.Label} - default: - err = fmt.Errorf("ent: IdentitySelect.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (is *IdentitySelect) IntX(ctx context.Context) int { - v, err := is.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. -func (is *IdentitySelect) Float64s(ctx context.Context) ([]float64, error) { - if len(is.fields) > 1 { - return nil, errors.New("ent: IdentitySelect.Float64s is not achievable when selecting more than 1 field") - } - var v []float64 - if err := is.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (is *IdentitySelect) Float64sX(ctx context.Context) []float64 { - v, err := is.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. -func (is *IdentitySelect) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = is.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{identity.Label} - default: - err = fmt.Errorf("ent: IdentitySelect.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (is *IdentitySelect) Float64X(ctx context.Context) float64 { - v, err := is.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from a selector. It is only allowed when selecting one field. -func (is *IdentitySelect) Bools(ctx context.Context) ([]bool, error) { - if len(is.fields) > 1 { - return nil, errors.New("ent: IdentitySelect.Bools is not achievable when selecting more than 1 field") - } - var v []bool - if err := is.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (is *IdentitySelect) BoolsX(ctx context.Context) []bool { - v, err := is.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a selector. It is only allowed when selecting one field. -func (is *IdentitySelect) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = is.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{identity.Label} - default: - err = fmt.Errorf("ent: IdentitySelect.Bools returned %d results when one was expected", len(v)) +func (is *IdentitySelect) sqlScan(ctx context.Context, root *IdentityQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(is.fns)) + for _, fn := range is.fns { + aggregation = append(aggregation, fn(selector)) } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (is *IdentitySelect) BoolX(ctx context.Context) bool { - v, err := is.Bool(ctx) - if err != nil { - panic(err) + switch n := len(*is.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) } - return v -} - -func (is *IdentitySelect) sqlScan(ctx context.Context, v interface{}) error { rows := &sql.Rows{} - query, args := is.sql.Query() + query, args := selector.Query() if err := is.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/ent/identity_update.go b/ent/identity_update.go index b5f3932b..278ab1f5 100755 --- a/ent/identity_update.go +++ b/ent/identity_update.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -29,9 +29,17 @@ func (iu *IdentityUpdate) Where(ps ...predicate.Identity) *IdentityUpdate { return iu } -// SetHclID sets the "hcl_id" field. -func (iu *IdentityUpdate) SetHclID(s string) *IdentityUpdate { - iu.mutation.SetHclID(s) +// SetHCLID sets the "hcl_id" field. +func (iu *IdentityUpdate) SetHCLID(s string) *IdentityUpdate { + iu.mutation.SetHCLID(s) + return iu +} + +// SetNillableHCLID sets the "hcl_id" field if the given value is not nil. +func (iu *IdentityUpdate) SetNillableHCLID(s *string) *IdentityUpdate { + if s != nil { + iu.SetHCLID(*s) + } return iu } @@ -41,36 +49,84 @@ func (iu *IdentityUpdate) SetFirstName(s string) *IdentityUpdate { return iu } +// SetNillableFirstName sets the "first_name" field if the given value is not nil. +func (iu *IdentityUpdate) SetNillableFirstName(s *string) *IdentityUpdate { + if s != nil { + iu.SetFirstName(*s) + } + return iu +} + // SetLastName sets the "last_name" field. func (iu *IdentityUpdate) SetLastName(s string) *IdentityUpdate { iu.mutation.SetLastName(s) return iu } +// SetNillableLastName sets the "last_name" field if the given value is not nil. +func (iu *IdentityUpdate) SetNillableLastName(s *string) *IdentityUpdate { + if s != nil { + iu.SetLastName(*s) + } + return iu +} + // SetEmail sets the "email" field. func (iu *IdentityUpdate) SetEmail(s string) *IdentityUpdate { iu.mutation.SetEmail(s) return iu } +// SetNillableEmail sets the "email" field if the given value is not nil. +func (iu *IdentityUpdate) SetNillableEmail(s *string) *IdentityUpdate { + if s != nil { + iu.SetEmail(*s) + } + return iu +} + // SetPassword sets the "password" field. func (iu *IdentityUpdate) SetPassword(s string) *IdentityUpdate { iu.mutation.SetPassword(s) return iu } +// SetNillablePassword sets the "password" field if the given value is not nil. +func (iu *IdentityUpdate) SetNillablePassword(s *string) *IdentityUpdate { + if s != nil { + iu.SetPassword(*s) + } + return iu +} + // SetDescription sets the "description" field. func (iu *IdentityUpdate) SetDescription(s string) *IdentityUpdate { iu.mutation.SetDescription(s) return iu } +// SetNillableDescription sets the "description" field if the given value is not nil. +func (iu *IdentityUpdate) SetNillableDescription(s *string) *IdentityUpdate { + if s != nil { + iu.SetDescription(*s) + } + return iu +} + // SetAvatarFile sets the "avatar_file" field. func (iu *IdentityUpdate) SetAvatarFile(s string) *IdentityUpdate { iu.mutation.SetAvatarFile(s) return iu } +// SetNillableAvatarFile sets the "avatar_file" field if the given value is not nil. +func (iu *IdentityUpdate) SetNillableAvatarFile(s *string) *IdentityUpdate { + if s != nil { + iu.SetAvatarFile(*s) + } + return iu +} + // SetVars sets the "vars" field. func (iu *IdentityUpdate) SetVars(m map[string]string) *IdentityUpdate { iu.mutation.SetVars(m) @@ -115,34 +171,7 @@ func (iu *IdentityUpdate) ClearIdentityToEnvironment() *IdentityUpdate { // Save executes the query and returns the number of nodes affected by the update operation. func (iu *IdentityUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(iu.hooks) == 0 { - affected, err = iu.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*IdentityMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - iu.mutation = mutation - affected, err = iu.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(iu.hooks) - 1; i >= 0; i-- { - if iu.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = iu.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, iu.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, iu.sqlSave, iu.mutation, iu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -168,16 +197,7 @@ func (iu *IdentityUpdate) ExecX(ctx context.Context) { } func (iu *IdentityUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: identity.Table, - Columns: identity.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: identity.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(identity.Table, identity.Columns, sqlgraph.NewFieldSpec(identity.FieldID, field.TypeUUID)) if ps := iu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -185,68 +205,32 @@ func (iu *IdentityUpdate) sqlSave(ctx context.Context) (n int, err error) { } } } - if value, ok := iu.mutation.HclID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: identity.FieldHclID, - }) + if value, ok := iu.mutation.HCLID(); ok { + _spec.SetField(identity.FieldHCLID, field.TypeString, value) } if value, ok := iu.mutation.FirstName(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: identity.FieldFirstName, - }) + _spec.SetField(identity.FieldFirstName, field.TypeString, value) } if value, ok := iu.mutation.LastName(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: identity.FieldLastName, - }) + _spec.SetField(identity.FieldLastName, field.TypeString, value) } if value, ok := iu.mutation.Email(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: identity.FieldEmail, - }) + _spec.SetField(identity.FieldEmail, field.TypeString, value) } if value, ok := iu.mutation.Password(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: identity.FieldPassword, - }) + _spec.SetField(identity.FieldPassword, field.TypeString, value) } if value, ok := iu.mutation.Description(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: identity.FieldDescription, - }) + _spec.SetField(identity.FieldDescription, field.TypeString, value) } if value, ok := iu.mutation.AvatarFile(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: identity.FieldAvatarFile, - }) + _spec.SetField(identity.FieldAvatarFile, field.TypeString, value) } if value, ok := iu.mutation.Vars(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: identity.FieldVars, - }) + _spec.SetField(identity.FieldVars, field.TypeJSON, value) } if value, ok := iu.mutation.Tags(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: identity.FieldTags, - }) + _spec.SetField(identity.FieldTags, field.TypeJSON, value) } if iu.mutation.IdentityToEnvironmentCleared() { edge := &sqlgraph.EdgeSpec{ @@ -256,10 +240,7 @@ func (iu *IdentityUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{identity.IdentityToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -272,10 +253,7 @@ func (iu *IdentityUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{identity.IdentityToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -287,10 +265,11 @@ func (iu *IdentityUpdate) sqlSave(ctx context.Context) (n int, err error) { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{identity.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return 0, err } + iu.mutation.done = true return n, nil } @@ -302,9 +281,17 @@ type IdentityUpdateOne struct { mutation *IdentityMutation } -// SetHclID sets the "hcl_id" field. -func (iuo *IdentityUpdateOne) SetHclID(s string) *IdentityUpdateOne { - iuo.mutation.SetHclID(s) +// SetHCLID sets the "hcl_id" field. +func (iuo *IdentityUpdateOne) SetHCLID(s string) *IdentityUpdateOne { + iuo.mutation.SetHCLID(s) + return iuo +} + +// SetNillableHCLID sets the "hcl_id" field if the given value is not nil. +func (iuo *IdentityUpdateOne) SetNillableHCLID(s *string) *IdentityUpdateOne { + if s != nil { + iuo.SetHCLID(*s) + } return iuo } @@ -314,36 +301,84 @@ func (iuo *IdentityUpdateOne) SetFirstName(s string) *IdentityUpdateOne { return iuo } +// SetNillableFirstName sets the "first_name" field if the given value is not nil. +func (iuo *IdentityUpdateOne) SetNillableFirstName(s *string) *IdentityUpdateOne { + if s != nil { + iuo.SetFirstName(*s) + } + return iuo +} + // SetLastName sets the "last_name" field. func (iuo *IdentityUpdateOne) SetLastName(s string) *IdentityUpdateOne { iuo.mutation.SetLastName(s) return iuo } +// SetNillableLastName sets the "last_name" field if the given value is not nil. +func (iuo *IdentityUpdateOne) SetNillableLastName(s *string) *IdentityUpdateOne { + if s != nil { + iuo.SetLastName(*s) + } + return iuo +} + // SetEmail sets the "email" field. func (iuo *IdentityUpdateOne) SetEmail(s string) *IdentityUpdateOne { iuo.mutation.SetEmail(s) return iuo } +// SetNillableEmail sets the "email" field if the given value is not nil. +func (iuo *IdentityUpdateOne) SetNillableEmail(s *string) *IdentityUpdateOne { + if s != nil { + iuo.SetEmail(*s) + } + return iuo +} + // SetPassword sets the "password" field. func (iuo *IdentityUpdateOne) SetPassword(s string) *IdentityUpdateOne { iuo.mutation.SetPassword(s) return iuo } +// SetNillablePassword sets the "password" field if the given value is not nil. +func (iuo *IdentityUpdateOne) SetNillablePassword(s *string) *IdentityUpdateOne { + if s != nil { + iuo.SetPassword(*s) + } + return iuo +} + // SetDescription sets the "description" field. func (iuo *IdentityUpdateOne) SetDescription(s string) *IdentityUpdateOne { iuo.mutation.SetDescription(s) return iuo } +// SetNillableDescription sets the "description" field if the given value is not nil. +func (iuo *IdentityUpdateOne) SetNillableDescription(s *string) *IdentityUpdateOne { + if s != nil { + iuo.SetDescription(*s) + } + return iuo +} + // SetAvatarFile sets the "avatar_file" field. func (iuo *IdentityUpdateOne) SetAvatarFile(s string) *IdentityUpdateOne { iuo.mutation.SetAvatarFile(s) return iuo } +// SetNillableAvatarFile sets the "avatar_file" field if the given value is not nil. +func (iuo *IdentityUpdateOne) SetNillableAvatarFile(s *string) *IdentityUpdateOne { + if s != nil { + iuo.SetAvatarFile(*s) + } + return iuo +} + // SetVars sets the "vars" field. func (iuo *IdentityUpdateOne) SetVars(m map[string]string) *IdentityUpdateOne { iuo.mutation.SetVars(m) @@ -386,6 +421,12 @@ func (iuo *IdentityUpdateOne) ClearIdentityToEnvironment() *IdentityUpdateOne { return iuo } +// Where appends a list predicates to the IdentityUpdate builder. +func (iuo *IdentityUpdateOne) Where(ps ...predicate.Identity) *IdentityUpdateOne { + iuo.mutation.Where(ps...) + return iuo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (iuo *IdentityUpdateOne) Select(field string, fields ...string) *IdentityUpdateOne { @@ -395,34 +436,7 @@ func (iuo *IdentityUpdateOne) Select(field string, fields ...string) *IdentityUp // Save executes the query and returns the updated Identity entity. func (iuo *IdentityUpdateOne) Save(ctx context.Context) (*Identity, error) { - var ( - err error - node *Identity - ) - if len(iuo.hooks) == 0 { - node, err = iuo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*IdentityMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - iuo.mutation = mutation - node, err = iuo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(iuo.hooks) - 1; i >= 0; i-- { - if iuo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = iuo.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, iuo.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, iuo.sqlSave, iuo.mutation, iuo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -448,16 +462,7 @@ func (iuo *IdentityUpdateOne) ExecX(ctx context.Context) { } func (iuo *IdentityUpdateOne) sqlSave(ctx context.Context) (_node *Identity, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: identity.Table, - Columns: identity.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: identity.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(identity.Table, identity.Columns, sqlgraph.NewFieldSpec(identity.FieldID, field.TypeUUID)) id, ok := iuo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Identity.id" for update`)} @@ -482,68 +487,32 @@ func (iuo *IdentityUpdateOne) sqlSave(ctx context.Context) (_node *Identity, err } } } - if value, ok := iuo.mutation.HclID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: identity.FieldHclID, - }) + if value, ok := iuo.mutation.HCLID(); ok { + _spec.SetField(identity.FieldHCLID, field.TypeString, value) } if value, ok := iuo.mutation.FirstName(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: identity.FieldFirstName, - }) + _spec.SetField(identity.FieldFirstName, field.TypeString, value) } if value, ok := iuo.mutation.LastName(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: identity.FieldLastName, - }) + _spec.SetField(identity.FieldLastName, field.TypeString, value) } if value, ok := iuo.mutation.Email(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: identity.FieldEmail, - }) + _spec.SetField(identity.FieldEmail, field.TypeString, value) } if value, ok := iuo.mutation.Password(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: identity.FieldPassword, - }) + _spec.SetField(identity.FieldPassword, field.TypeString, value) } if value, ok := iuo.mutation.Description(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: identity.FieldDescription, - }) + _spec.SetField(identity.FieldDescription, field.TypeString, value) } if value, ok := iuo.mutation.AvatarFile(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: identity.FieldAvatarFile, - }) + _spec.SetField(identity.FieldAvatarFile, field.TypeString, value) } if value, ok := iuo.mutation.Vars(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: identity.FieldVars, - }) + _spec.SetField(identity.FieldVars, field.TypeJSON, value) } if value, ok := iuo.mutation.Tags(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: identity.FieldTags, - }) + _spec.SetField(identity.FieldTags, field.TypeJSON, value) } if iuo.mutation.IdentityToEnvironmentCleared() { edge := &sqlgraph.EdgeSpec{ @@ -553,10 +522,7 @@ func (iuo *IdentityUpdateOne) sqlSave(ctx context.Context) (_node *Identity, err Columns: []string{identity.IdentityToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -569,10 +535,7 @@ func (iuo *IdentityUpdateOne) sqlSave(ctx context.Context) (_node *Identity, err Columns: []string{identity.IdentityToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -587,9 +550,10 @@ func (iuo *IdentityUpdateOne) sqlSave(ctx context.Context) (_node *Identity, err if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{identity.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } + iuo.mutation.done = true return _node, nil } diff --git a/ent/includednetwork.go b/ent/includednetwork.go index e7afe8f1..2135aa9a 100755 --- a/ent/includednetwork.go +++ b/ent/includednetwork.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -7,6 +7,7 @@ import ( "fmt" "strings" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/gen0cide/laforge/ent/includednetwork" "github.com/gen0cide/laforge/ent/network" @@ -26,6 +27,7 @@ type IncludedNetwork struct { // The values are being populated by the IncludedNetworkQuery when eager-loading is set. Edges IncludedNetworkEdges `json:"edges"` + // vvvvvvvvvvvv CUSTOM vvvvvvvvvvvv // Edges put into the main struct to be loaded via hcl // IncludedNetworkToTag holds the value of the IncludedNetworkToTag edge. HCLIncludedNetworkToTag []*Tag `json:"IncludedNetworkToTag,omitempty"` @@ -35,8 +37,9 @@ type IncludedNetwork struct { HCLIncludedNetworkToNetwork *Network `json:"IncludedNetworkToNetwork,omitempty"` // IncludedNetworkToEnvironment holds the value of the IncludedNetworkToEnvironment edge. HCLIncludedNetworkToEnvironment []*Environment `json:"IncludedNetworkToEnvironment,omitempty"` - // + // ^^^^^^^^^^^^ CUSTOM ^^^^^^^^^^^^^ included_network_included_network_to_network *uuid.UUID + selectValues sql.SelectValues } // IncludedNetworkEdges holds the relations/edges for other nodes in the graph. @@ -52,6 +55,12 @@ type IncludedNetworkEdges struct { // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. loadedTypes [4]bool + // totalCount holds the count of the edges above. + totalCount [4]map[string]int + + namedIncludedNetworkToTag map[string][]*Tag + namedIncludedNetworkToHost map[string][]*Host + namedIncludedNetworkToEnvironment map[string][]*Environment } // IncludedNetworkToTagOrErr returns the IncludedNetworkToTag value or an error if the edge @@ -77,8 +86,7 @@ func (e IncludedNetworkEdges) IncludedNetworkToHostOrErr() ([]*Host, error) { func (e IncludedNetworkEdges) IncludedNetworkToNetworkOrErr() (*Network, error) { if e.loadedTypes[2] { if e.IncludedNetworkToNetwork == nil { - // The edge IncludedNetworkToNetwork was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: network.Label} } return e.IncludedNetworkToNetwork, nil @@ -96,8 +104,8 @@ func (e IncludedNetworkEdges) IncludedNetworkToEnvironmentOrErr() ([]*Environmen } // scanValues returns the types for scanning values from sql.Rows. -func (*IncludedNetwork) scanValues(columns []string) ([]interface{}, error) { - values := make([]interface{}, len(columns)) +func (*IncludedNetwork) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) for i := range columns { switch columns[i] { case includednetwork.FieldHosts: @@ -109,7 +117,7 @@ func (*IncludedNetwork) scanValues(columns []string) ([]interface{}, error) { case includednetwork.ForeignKeys[0]: // included_network_included_network_to_network values[i] = &sql.NullScanner{S: new(uuid.UUID)} default: - return nil, fmt.Errorf("unexpected column %q for type IncludedNetwork", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -117,7 +125,7 @@ func (*IncludedNetwork) scanValues(columns []string) ([]interface{}, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the IncludedNetwork fields. -func (in *IncludedNetwork) assignValues(columns []string, values []interface{}) error { +func (in *IncludedNetwork) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -150,46 +158,54 @@ func (in *IncludedNetwork) assignValues(columns []string, values []interface{}) in.included_network_included_network_to_network = new(uuid.UUID) *in.included_network_included_network_to_network = *value.S.(*uuid.UUID) } + default: + in.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the IncludedNetwork. +// This includes values selected through modifiers, order, etc. +func (in *IncludedNetwork) Value(name string) (ent.Value, error) { + return in.selectValues.Get(name) +} + // QueryIncludedNetworkToTag queries the "IncludedNetworkToTag" edge of the IncludedNetwork entity. func (in *IncludedNetwork) QueryIncludedNetworkToTag() *TagQuery { - return (&IncludedNetworkClient{config: in.config}).QueryIncludedNetworkToTag(in) + return NewIncludedNetworkClient(in.config).QueryIncludedNetworkToTag(in) } // QueryIncludedNetworkToHost queries the "IncludedNetworkToHost" edge of the IncludedNetwork entity. func (in *IncludedNetwork) QueryIncludedNetworkToHost() *HostQuery { - return (&IncludedNetworkClient{config: in.config}).QueryIncludedNetworkToHost(in) + return NewIncludedNetworkClient(in.config).QueryIncludedNetworkToHost(in) } // QueryIncludedNetworkToNetwork queries the "IncludedNetworkToNetwork" edge of the IncludedNetwork entity. func (in *IncludedNetwork) QueryIncludedNetworkToNetwork() *NetworkQuery { - return (&IncludedNetworkClient{config: in.config}).QueryIncludedNetworkToNetwork(in) + return NewIncludedNetworkClient(in.config).QueryIncludedNetworkToNetwork(in) } // QueryIncludedNetworkToEnvironment queries the "IncludedNetworkToEnvironment" edge of the IncludedNetwork entity. func (in *IncludedNetwork) QueryIncludedNetworkToEnvironment() *EnvironmentQuery { - return (&IncludedNetworkClient{config: in.config}).QueryIncludedNetworkToEnvironment(in) + return NewIncludedNetworkClient(in.config).QueryIncludedNetworkToEnvironment(in) } // Update returns a builder for updating this IncludedNetwork. // Note that you need to call IncludedNetwork.Unwrap() before calling this method if this IncludedNetwork // was returned from a transaction, and the transaction was committed or rolled back. func (in *IncludedNetwork) Update() *IncludedNetworkUpdateOne { - return (&IncludedNetworkClient{config: in.config}).UpdateOne(in) + return NewIncludedNetworkClient(in.config).UpdateOne(in) } // Unwrap unwraps the IncludedNetwork entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. func (in *IncludedNetwork) Unwrap() *IncludedNetwork { - tx, ok := in.config.driver.(*txDriver) + _tx, ok := in.config.driver.(*txDriver) if !ok { panic("ent: IncludedNetwork is not a transactional entity") } - in.config.driver = tx.drv + in.config.driver = _tx.drv return in } @@ -197,20 +213,87 @@ func (in *IncludedNetwork) Unwrap() *IncludedNetwork { func (in *IncludedNetwork) String() string { var builder strings.Builder builder.WriteString("IncludedNetwork(") - builder.WriteString(fmt.Sprintf("id=%v", in.ID)) - builder.WriteString(", name=") + builder.WriteString(fmt.Sprintf("id=%v, ", in.ID)) + builder.WriteString("name=") builder.WriteString(in.Name) - builder.WriteString(", hosts=") + builder.WriteString(", ") + builder.WriteString("hosts=") builder.WriteString(fmt.Sprintf("%v", in.Hosts)) builder.WriteByte(')') return builder.String() } -// IncludedNetworks is a parsable slice of IncludedNetwork. -type IncludedNetworks []*IncludedNetwork +// NamedIncludedNetworkToTag returns the IncludedNetworkToTag named value or an error if the edge was not +// loaded in eager-loading with this name. +func (in *IncludedNetwork) NamedIncludedNetworkToTag(name string) ([]*Tag, error) { + if in.Edges.namedIncludedNetworkToTag == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := in.Edges.namedIncludedNetworkToTag[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} + +func (in *IncludedNetwork) appendNamedIncludedNetworkToTag(name string, edges ...*Tag) { + if in.Edges.namedIncludedNetworkToTag == nil { + in.Edges.namedIncludedNetworkToTag = make(map[string][]*Tag) + } + if len(edges) == 0 { + in.Edges.namedIncludedNetworkToTag[name] = []*Tag{} + } else { + in.Edges.namedIncludedNetworkToTag[name] = append(in.Edges.namedIncludedNetworkToTag[name], edges...) + } +} + +// NamedIncludedNetworkToHost returns the IncludedNetworkToHost named value or an error if the edge was not +// loaded in eager-loading with this name. +func (in *IncludedNetwork) NamedIncludedNetworkToHost(name string) ([]*Host, error) { + if in.Edges.namedIncludedNetworkToHost == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := in.Edges.namedIncludedNetworkToHost[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} + +func (in *IncludedNetwork) appendNamedIncludedNetworkToHost(name string, edges ...*Host) { + if in.Edges.namedIncludedNetworkToHost == nil { + in.Edges.namedIncludedNetworkToHost = make(map[string][]*Host) + } + if len(edges) == 0 { + in.Edges.namedIncludedNetworkToHost[name] = []*Host{} + } else { + in.Edges.namedIncludedNetworkToHost[name] = append(in.Edges.namedIncludedNetworkToHost[name], edges...) + } +} + +// NamedIncludedNetworkToEnvironment returns the IncludedNetworkToEnvironment named value or an error if the edge was not +// loaded in eager-loading with this name. +func (in *IncludedNetwork) NamedIncludedNetworkToEnvironment(name string) ([]*Environment, error) { + if in.Edges.namedIncludedNetworkToEnvironment == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := in.Edges.namedIncludedNetworkToEnvironment[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} -func (in IncludedNetworks) config(cfg config) { - for _i := range in { - in[_i].config = cfg +func (in *IncludedNetwork) appendNamedIncludedNetworkToEnvironment(name string, edges ...*Environment) { + if in.Edges.namedIncludedNetworkToEnvironment == nil { + in.Edges.namedIncludedNetworkToEnvironment = make(map[string][]*Environment) + } + if len(edges) == 0 { + in.Edges.namedIncludedNetworkToEnvironment[name] = []*Environment{} + } else { + in.Edges.namedIncludedNetworkToEnvironment[name] = append(in.Edges.namedIncludedNetworkToEnvironment[name], edges...) } } + +// IncludedNetworks is a parsable slice of IncludedNetwork. +type IncludedNetworks []*IncludedNetwork diff --git a/ent/includednetwork/includednetwork.go b/ent/includednetwork/includednetwork.go index 79e6db73..e46db0fb 100755 --- a/ent/includednetwork/includednetwork.go +++ b/ent/includednetwork/includednetwork.go @@ -1,8 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package includednetwork import ( + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -92,3 +94,93 @@ var ( // DefaultID holds the default value on creation for the "id" field. DefaultID func() uuid.UUID ) + +// OrderOption defines the ordering options for the IncludedNetwork queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByIncludedNetworkToTagCount orders the results by IncludedNetworkToTag count. +func ByIncludedNetworkToTagCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newIncludedNetworkToTagStep(), opts...) + } +} + +// ByIncludedNetworkToTag orders the results by IncludedNetworkToTag terms. +func ByIncludedNetworkToTag(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newIncludedNetworkToTagStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByIncludedNetworkToHostCount orders the results by IncludedNetworkToHost count. +func ByIncludedNetworkToHostCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newIncludedNetworkToHostStep(), opts...) + } +} + +// ByIncludedNetworkToHost orders the results by IncludedNetworkToHost terms. +func ByIncludedNetworkToHost(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newIncludedNetworkToHostStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByIncludedNetworkToNetworkField orders the results by IncludedNetworkToNetwork field. +func ByIncludedNetworkToNetworkField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newIncludedNetworkToNetworkStep(), sql.OrderByField(field, opts...)) + } +} + +// ByIncludedNetworkToEnvironmentCount orders the results by IncludedNetworkToEnvironment count. +func ByIncludedNetworkToEnvironmentCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newIncludedNetworkToEnvironmentStep(), opts...) + } +} + +// ByIncludedNetworkToEnvironment orders the results by IncludedNetworkToEnvironment terms. +func ByIncludedNetworkToEnvironment(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newIncludedNetworkToEnvironmentStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newIncludedNetworkToTagStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(IncludedNetworkToTagInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, IncludedNetworkToTagTable, IncludedNetworkToTagColumn), + ) +} +func newIncludedNetworkToHostStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(IncludedNetworkToHostInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, IncludedNetworkToHostTable, IncludedNetworkToHostPrimaryKey...), + ) +} +func newIncludedNetworkToNetworkStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(IncludedNetworkToNetworkInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, IncludedNetworkToNetworkTable, IncludedNetworkToNetworkColumn), + ) +} +func newIncludedNetworkToEnvironmentStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(IncludedNetworkToEnvironmentInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, IncludedNetworkToEnvironmentTable, IncludedNetworkToEnvironmentPrimaryKey...), + ) +} diff --git a/ent/includednetwork/where.go b/ent/includednetwork/where.go index 2a59c7df..f538bc11 100755 --- a/ent/includednetwork/where.go +++ b/ent/includednetwork/where.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package includednetwork @@ -11,203 +11,117 @@ import ( // ID filters vertices based on their ID field. func ID(id uuid.UUID) predicate.IncludedNetwork { - return predicate.IncludedNetwork(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.IncludedNetwork(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id uuid.UUID) predicate.IncludedNetwork { - return predicate.IncludedNetwork(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.IncludedNetwork(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id uuid.UUID) predicate.IncludedNetwork { - return predicate.IncludedNetwork(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.IncludedNetwork(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...uuid.UUID) predicate.IncludedNetwork { - return predicate.IncludedNetwork(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.IncludedNetwork(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...uuid.UUID) predicate.IncludedNetwork { - return predicate.IncludedNetwork(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.IncludedNetwork(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id uuid.UUID) predicate.IncludedNetwork { - return predicate.IncludedNetwork(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.IncludedNetwork(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id uuid.UUID) predicate.IncludedNetwork { - return predicate.IncludedNetwork(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.IncludedNetwork(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id uuid.UUID) predicate.IncludedNetwork { - return predicate.IncludedNetwork(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.IncludedNetwork(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id uuid.UUID) predicate.IncludedNetwork { - return predicate.IncludedNetwork(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.IncludedNetwork(sql.FieldLTE(FieldID, id)) } // Name applies equality check predicate on the "name" field. It's identical to NameEQ. func Name(v string) predicate.IncludedNetwork { - return predicate.IncludedNetwork(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldName), v)) - }) + return predicate.IncludedNetwork(sql.FieldEQ(FieldName, v)) } // NameEQ applies the EQ predicate on the "name" field. func NameEQ(v string) predicate.IncludedNetwork { - return predicate.IncludedNetwork(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldName), v)) - }) + return predicate.IncludedNetwork(sql.FieldEQ(FieldName, v)) } // NameNEQ applies the NEQ predicate on the "name" field. func NameNEQ(v string) predicate.IncludedNetwork { - return predicate.IncludedNetwork(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldName), v)) - }) + return predicate.IncludedNetwork(sql.FieldNEQ(FieldName, v)) } // NameIn applies the In predicate on the "name" field. func NameIn(vs ...string) predicate.IncludedNetwork { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.IncludedNetwork(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldName), v...)) - }) + return predicate.IncludedNetwork(sql.FieldIn(FieldName, vs...)) } // NameNotIn applies the NotIn predicate on the "name" field. func NameNotIn(vs ...string) predicate.IncludedNetwork { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.IncludedNetwork(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldName), v...)) - }) + return predicate.IncludedNetwork(sql.FieldNotIn(FieldName, vs...)) } // NameGT applies the GT predicate on the "name" field. func NameGT(v string) predicate.IncludedNetwork { - return predicate.IncludedNetwork(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldName), v)) - }) + return predicate.IncludedNetwork(sql.FieldGT(FieldName, v)) } // NameGTE applies the GTE predicate on the "name" field. func NameGTE(v string) predicate.IncludedNetwork { - return predicate.IncludedNetwork(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldName), v)) - }) + return predicate.IncludedNetwork(sql.FieldGTE(FieldName, v)) } // NameLT applies the LT predicate on the "name" field. func NameLT(v string) predicate.IncludedNetwork { - return predicate.IncludedNetwork(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldName), v)) - }) + return predicate.IncludedNetwork(sql.FieldLT(FieldName, v)) } // NameLTE applies the LTE predicate on the "name" field. func NameLTE(v string) predicate.IncludedNetwork { - return predicate.IncludedNetwork(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldName), v)) - }) + return predicate.IncludedNetwork(sql.FieldLTE(FieldName, v)) } // NameContains applies the Contains predicate on the "name" field. func NameContains(v string) predicate.IncludedNetwork { - return predicate.IncludedNetwork(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldName), v)) - }) + return predicate.IncludedNetwork(sql.FieldContains(FieldName, v)) } // NameHasPrefix applies the HasPrefix predicate on the "name" field. func NameHasPrefix(v string) predicate.IncludedNetwork { - return predicate.IncludedNetwork(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldName), v)) - }) + return predicate.IncludedNetwork(sql.FieldHasPrefix(FieldName, v)) } // NameHasSuffix applies the HasSuffix predicate on the "name" field. func NameHasSuffix(v string) predicate.IncludedNetwork { - return predicate.IncludedNetwork(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldName), v)) - }) + return predicate.IncludedNetwork(sql.FieldHasSuffix(FieldName, v)) } // NameEqualFold applies the EqualFold predicate on the "name" field. func NameEqualFold(v string) predicate.IncludedNetwork { - return predicate.IncludedNetwork(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldName), v)) - }) + return predicate.IncludedNetwork(sql.FieldEqualFold(FieldName, v)) } // NameContainsFold applies the ContainsFold predicate on the "name" field. func NameContainsFold(v string) predicate.IncludedNetwork { - return predicate.IncludedNetwork(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldName), v)) - }) + return predicate.IncludedNetwork(sql.FieldContainsFold(FieldName, v)) } // HasIncludedNetworkToTag applies the HasEdge predicate on the "IncludedNetworkToTag" edge. @@ -215,7 +129,6 @@ func HasIncludedNetworkToTag() predicate.IncludedNetwork { return predicate.IncludedNetwork(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(IncludedNetworkToTagTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, false, IncludedNetworkToTagTable, IncludedNetworkToTagColumn), ) sqlgraph.HasNeighbors(s, step) @@ -225,11 +138,7 @@ func HasIncludedNetworkToTag() predicate.IncludedNetwork { // HasIncludedNetworkToTagWith applies the HasEdge predicate on the "IncludedNetworkToTag" edge with a given conditions (other predicates). func HasIncludedNetworkToTagWith(preds ...predicate.Tag) predicate.IncludedNetwork { return predicate.IncludedNetwork(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(IncludedNetworkToTagInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, IncludedNetworkToTagTable, IncludedNetworkToTagColumn), - ) + step := newIncludedNetworkToTagStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -243,7 +152,6 @@ func HasIncludedNetworkToHost() predicate.IncludedNetwork { return predicate.IncludedNetwork(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(IncludedNetworkToHostTable, FieldID), sqlgraph.Edge(sqlgraph.M2M, false, IncludedNetworkToHostTable, IncludedNetworkToHostPrimaryKey...), ) sqlgraph.HasNeighbors(s, step) @@ -253,11 +161,7 @@ func HasIncludedNetworkToHost() predicate.IncludedNetwork { // HasIncludedNetworkToHostWith applies the HasEdge predicate on the "IncludedNetworkToHost" edge with a given conditions (other predicates). func HasIncludedNetworkToHostWith(preds ...predicate.Host) predicate.IncludedNetwork { return predicate.IncludedNetwork(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(IncludedNetworkToHostInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2M, false, IncludedNetworkToHostTable, IncludedNetworkToHostPrimaryKey...), - ) + step := newIncludedNetworkToHostStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -271,7 +175,6 @@ func HasIncludedNetworkToNetwork() predicate.IncludedNetwork { return predicate.IncludedNetwork(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(IncludedNetworkToNetworkTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, false, IncludedNetworkToNetworkTable, IncludedNetworkToNetworkColumn), ) sqlgraph.HasNeighbors(s, step) @@ -281,11 +184,7 @@ func HasIncludedNetworkToNetwork() predicate.IncludedNetwork { // HasIncludedNetworkToNetworkWith applies the HasEdge predicate on the "IncludedNetworkToNetwork" edge with a given conditions (other predicates). func HasIncludedNetworkToNetworkWith(preds ...predicate.Network) predicate.IncludedNetwork { return predicate.IncludedNetwork(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(IncludedNetworkToNetworkInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, IncludedNetworkToNetworkTable, IncludedNetworkToNetworkColumn), - ) + step := newIncludedNetworkToNetworkStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -299,7 +198,6 @@ func HasIncludedNetworkToEnvironment() predicate.IncludedNetwork { return predicate.IncludedNetwork(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(IncludedNetworkToEnvironmentTable, FieldID), sqlgraph.Edge(sqlgraph.M2M, true, IncludedNetworkToEnvironmentTable, IncludedNetworkToEnvironmentPrimaryKey...), ) sqlgraph.HasNeighbors(s, step) @@ -309,11 +207,7 @@ func HasIncludedNetworkToEnvironment() predicate.IncludedNetwork { // HasIncludedNetworkToEnvironmentWith applies the HasEdge predicate on the "IncludedNetworkToEnvironment" edge with a given conditions (other predicates). func HasIncludedNetworkToEnvironmentWith(preds ...predicate.Environment) predicate.IncludedNetwork { return predicate.IncludedNetwork(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(IncludedNetworkToEnvironmentInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2M, true, IncludedNetworkToEnvironmentTable, IncludedNetworkToEnvironmentPrimaryKey...), - ) + step := newIncludedNetworkToEnvironmentStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -324,32 +218,15 @@ func HasIncludedNetworkToEnvironmentWith(preds ...predicate.Environment) predica // And groups predicates with the AND operator between them. func And(predicates ...predicate.IncludedNetwork) predicate.IncludedNetwork { - return predicate.IncludedNetwork(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.IncludedNetwork(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.IncludedNetwork) predicate.IncludedNetwork { - return predicate.IncludedNetwork(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.IncludedNetwork(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.IncludedNetwork) predicate.IncludedNetwork { - return predicate.IncludedNetwork(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.IncludedNetwork(sql.NotPredicates(p)) } diff --git a/ent/includednetwork_create.go b/ent/includednetwork_create.go index 42d2ef23..2705e199 100755 --- a/ent/includednetwork_create.go +++ b/ent/includednetwork_create.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -121,44 +121,8 @@ func (inc *IncludedNetworkCreate) Mutation() *IncludedNetworkMutation { // Save creates the IncludedNetwork in the database. func (inc *IncludedNetworkCreate) Save(ctx context.Context) (*IncludedNetwork, error) { - var ( - err error - node *IncludedNetwork - ) inc.defaults() - if len(inc.hooks) == 0 { - if err = inc.check(); err != nil { - return nil, err - } - node, err = inc.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*IncludedNetworkMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = inc.check(); err != nil { - return nil, err - } - inc.mutation = mutation - if node, err = inc.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(inc.hooks) - 1; i >= 0; i-- { - if inc.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = inc.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, inc.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, inc.sqlSave, inc.mutation, inc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -203,10 +167,13 @@ func (inc *IncludedNetworkCreate) check() error { } func (inc *IncludedNetworkCreate) sqlSave(ctx context.Context) (*IncludedNetwork, error) { + if err := inc.check(); err != nil { + return nil, err + } _node, _spec := inc.createSpec() if err := sqlgraph.CreateNode(ctx, inc.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } @@ -217,38 +184,26 @@ func (inc *IncludedNetworkCreate) sqlSave(ctx context.Context) (*IncludedNetwork return nil, err } } + inc.mutation.id = &_node.ID + inc.mutation.done = true return _node, nil } func (inc *IncludedNetworkCreate) createSpec() (*IncludedNetwork, *sqlgraph.CreateSpec) { var ( _node = &IncludedNetwork{config: inc.config} - _spec = &sqlgraph.CreateSpec{ - Table: includednetwork.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: includednetwork.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(includednetwork.Table, sqlgraph.NewFieldSpec(includednetwork.FieldID, field.TypeUUID)) ) if id, ok := inc.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } if value, ok := inc.mutation.Name(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: includednetwork.FieldName, - }) + _spec.SetField(includednetwork.FieldName, field.TypeString, value) _node.Name = value } if value, ok := inc.mutation.Hosts(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: includednetwork.FieldHosts, - }) + _spec.SetField(includednetwork.FieldHosts, field.TypeJSON, value) _node.Hosts = value } if nodes := inc.mutation.IncludedNetworkToTagIDs(); len(nodes) > 0 { @@ -259,10 +214,7 @@ func (inc *IncludedNetworkCreate) createSpec() (*IncludedNetwork, *sqlgraph.Crea Columns: []string{includednetwork.IncludedNetworkToTagColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: tag.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(tag.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -278,10 +230,7 @@ func (inc *IncludedNetworkCreate) createSpec() (*IncludedNetwork, *sqlgraph.Crea Columns: includednetwork.IncludedNetworkToHostPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: host.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(host.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -297,10 +246,7 @@ func (inc *IncludedNetworkCreate) createSpec() (*IncludedNetwork, *sqlgraph.Crea Columns: []string{includednetwork.IncludedNetworkToNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: network.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(network.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -317,10 +263,7 @@ func (inc *IncludedNetworkCreate) createSpec() (*IncludedNetwork, *sqlgraph.Crea Columns: includednetwork.IncludedNetworkToEnvironmentPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -334,11 +277,15 @@ func (inc *IncludedNetworkCreate) createSpec() (*IncludedNetwork, *sqlgraph.Crea // IncludedNetworkCreateBulk is the builder for creating many IncludedNetwork entities in bulk. type IncludedNetworkCreateBulk struct { config + err error builders []*IncludedNetworkCreate } // Save creates the IncludedNetwork entities in the database. func (incb *IncludedNetworkCreateBulk) Save(ctx context.Context) ([]*IncludedNetwork, error) { + if incb.err != nil { + return nil, incb.err + } specs := make([]*sqlgraph.CreateSpec, len(incb.builders)) nodes := make([]*IncludedNetwork, len(incb.builders)) mutators := make([]Mutator, len(incb.builders)) @@ -355,8 +302,8 @@ func (incb *IncludedNetworkCreateBulk) Save(ctx context.Context) ([]*IncludedNet return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, incb.builders[i+1].mutation) } else { @@ -364,7 +311,7 @@ func (incb *IncludedNetworkCreateBulk) Save(ctx context.Context) ([]*IncludedNet // Invoke the actual operation on the latest mutation in the chain. if err = sqlgraph.BatchCreate(ctx, incb.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } } } diff --git a/ent/includednetwork_delete.go b/ent/includednetwork_delete.go index 12440eac..c0f68935 100755 --- a/ent/includednetwork_delete.go +++ b/ent/includednetwork_delete.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (ind *IncludedNetworkDelete) Where(ps ...predicate.IncludedNetwork) *Includ // Exec executes the deletion query and returns how many vertices were deleted. func (ind *IncludedNetworkDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(ind.hooks) == 0 { - affected, err = ind.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*IncludedNetworkMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - ind.mutation = mutation - affected, err = ind.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(ind.hooks) - 1; i >= 0; i-- { - if ind.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = ind.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, ind.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, ind.sqlExec, ind.mutation, ind.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (ind *IncludedNetworkDelete) ExecX(ctx context.Context) int { } func (ind *IncludedNetworkDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: includednetwork.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: includednetwork.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(includednetwork.Table, sqlgraph.NewFieldSpec(includednetwork.FieldID, field.TypeUUID)) if ps := ind.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -84,7 +48,12 @@ func (ind *IncludedNetworkDelete) sqlExec(ctx context.Context) (int, error) { } } } - return sqlgraph.DeleteNodes(ctx, ind.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, ind.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + ind.mutation.done = true + return affected, err } // IncludedNetworkDeleteOne is the builder for deleting a single IncludedNetwork entity. @@ -92,6 +61,12 @@ type IncludedNetworkDeleteOne struct { ind *IncludedNetworkDelete } +// Where appends a list predicates to the IncludedNetworkDelete builder. +func (indo *IncludedNetworkDeleteOne) Where(ps ...predicate.IncludedNetwork) *IncludedNetworkDeleteOne { + indo.ind.mutation.Where(ps...) + return indo +} + // Exec executes the deletion query. func (indo *IncludedNetworkDeleteOne) Exec(ctx context.Context) error { n, err := indo.ind.Exec(ctx) @@ -107,5 +82,7 @@ func (indo *IncludedNetworkDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (indo *IncludedNetworkDeleteOne) ExecX(ctx context.Context) { - indo.ind.ExecX(ctx) + if err := indo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/ent/includednetwork_query.go b/ent/includednetwork_query.go index 55738c42..93cdb9bd 100755 --- a/ent/includednetwork_query.go +++ b/ent/includednetwork_query.go @@ -1,11 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" "database/sql/driver" - "errors" "fmt" "math" @@ -24,18 +23,20 @@ import ( // IncludedNetworkQuery is the builder for querying IncludedNetwork entities. type IncludedNetworkQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string - predicates []predicate.IncludedNetwork - // eager-loading edges. - withIncludedNetworkToTag *TagQuery - withIncludedNetworkToHost *HostQuery - withIncludedNetworkToNetwork *NetworkQuery - withIncludedNetworkToEnvironment *EnvironmentQuery - withFKs bool + ctx *QueryContext + order []includednetwork.OrderOption + inters []Interceptor + predicates []predicate.IncludedNetwork + withIncludedNetworkToTag *TagQuery + withIncludedNetworkToHost *HostQuery + withIncludedNetworkToNetwork *NetworkQuery + withIncludedNetworkToEnvironment *EnvironmentQuery + withFKs bool + modifiers []func(*sql.Selector) + loadTotal []func(context.Context, []*IncludedNetwork) error + withNamedIncludedNetworkToTag map[string]*TagQuery + withNamedIncludedNetworkToHost map[string]*HostQuery + withNamedIncludedNetworkToEnvironment map[string]*EnvironmentQuery // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -47,34 +48,34 @@ func (inq *IncludedNetworkQuery) Where(ps ...predicate.IncludedNetwork) *Include return inq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (inq *IncludedNetworkQuery) Limit(limit int) *IncludedNetworkQuery { - inq.limit = &limit + inq.ctx.Limit = &limit return inq } -// Offset adds an offset step to the query. +// Offset to start from. func (inq *IncludedNetworkQuery) Offset(offset int) *IncludedNetworkQuery { - inq.offset = &offset + inq.ctx.Offset = &offset return inq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (inq *IncludedNetworkQuery) Unique(unique bool) *IncludedNetworkQuery { - inq.unique = &unique + inq.ctx.Unique = &unique return inq } -// Order adds an order step to the query. -func (inq *IncludedNetworkQuery) Order(o ...OrderFunc) *IncludedNetworkQuery { +// Order specifies how the records should be ordered. +func (inq *IncludedNetworkQuery) Order(o ...includednetwork.OrderOption) *IncludedNetworkQuery { inq.order = append(inq.order, o...) return inq } // QueryIncludedNetworkToTag chains the current query on the "IncludedNetworkToTag" edge. func (inq *IncludedNetworkQuery) QueryIncludedNetworkToTag() *TagQuery { - query := &TagQuery{config: inq.config} + query := (&TagClient{config: inq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := inq.prepareQuery(ctx); err != nil { return nil, err @@ -96,7 +97,7 @@ func (inq *IncludedNetworkQuery) QueryIncludedNetworkToTag() *TagQuery { // QueryIncludedNetworkToHost chains the current query on the "IncludedNetworkToHost" edge. func (inq *IncludedNetworkQuery) QueryIncludedNetworkToHost() *HostQuery { - query := &HostQuery{config: inq.config} + query := (&HostClient{config: inq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := inq.prepareQuery(ctx); err != nil { return nil, err @@ -118,7 +119,7 @@ func (inq *IncludedNetworkQuery) QueryIncludedNetworkToHost() *HostQuery { // QueryIncludedNetworkToNetwork chains the current query on the "IncludedNetworkToNetwork" edge. func (inq *IncludedNetworkQuery) QueryIncludedNetworkToNetwork() *NetworkQuery { - query := &NetworkQuery{config: inq.config} + query := (&NetworkClient{config: inq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := inq.prepareQuery(ctx); err != nil { return nil, err @@ -140,7 +141,7 @@ func (inq *IncludedNetworkQuery) QueryIncludedNetworkToNetwork() *NetworkQuery { // QueryIncludedNetworkToEnvironment chains the current query on the "IncludedNetworkToEnvironment" edge. func (inq *IncludedNetworkQuery) QueryIncludedNetworkToEnvironment() *EnvironmentQuery { - query := &EnvironmentQuery{config: inq.config} + query := (&EnvironmentClient{config: inq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := inq.prepareQuery(ctx); err != nil { return nil, err @@ -163,7 +164,7 @@ func (inq *IncludedNetworkQuery) QueryIncludedNetworkToEnvironment() *Environmen // First returns the first IncludedNetwork entity from the query. // Returns a *NotFoundError when no IncludedNetwork was found. func (inq *IncludedNetworkQuery) First(ctx context.Context) (*IncludedNetwork, error) { - nodes, err := inq.Limit(1).All(ctx) + nodes, err := inq.Limit(1).All(setContextOp(ctx, inq.ctx, "First")) if err != nil { return nil, err } @@ -186,7 +187,7 @@ func (inq *IncludedNetworkQuery) FirstX(ctx context.Context) *IncludedNetwork { // Returns a *NotFoundError when no IncludedNetwork ID was found. func (inq *IncludedNetworkQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = inq.Limit(1).IDs(ctx); err != nil { + if ids, err = inq.Limit(1).IDs(setContextOp(ctx, inq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -209,7 +210,7 @@ func (inq *IncludedNetworkQuery) FirstIDX(ctx context.Context) uuid.UUID { // Returns a *NotSingularError when more than one IncludedNetwork entity is found. // Returns a *NotFoundError when no IncludedNetwork entities are found. func (inq *IncludedNetworkQuery) Only(ctx context.Context) (*IncludedNetwork, error) { - nodes, err := inq.Limit(2).All(ctx) + nodes, err := inq.Limit(2).All(setContextOp(ctx, inq.ctx, "Only")) if err != nil { return nil, err } @@ -237,7 +238,7 @@ func (inq *IncludedNetworkQuery) OnlyX(ctx context.Context) *IncludedNetwork { // Returns a *NotFoundError when no entities are found. func (inq *IncludedNetworkQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = inq.Limit(2).IDs(ctx); err != nil { + if ids, err = inq.Limit(2).IDs(setContextOp(ctx, inq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -262,10 +263,12 @@ func (inq *IncludedNetworkQuery) OnlyIDX(ctx context.Context) uuid.UUID { // All executes the query and returns a list of IncludedNetworks. func (inq *IncludedNetworkQuery) All(ctx context.Context) ([]*IncludedNetwork, error) { + ctx = setContextOp(ctx, inq.ctx, "All") if err := inq.prepareQuery(ctx); err != nil { return nil, err } - return inq.sqlAll(ctx) + qr := querierAll[[]*IncludedNetwork, *IncludedNetworkQuery]() + return withInterceptors[[]*IncludedNetwork](ctx, inq, qr, inq.inters) } // AllX is like All, but panics if an error occurs. @@ -278,9 +281,12 @@ func (inq *IncludedNetworkQuery) AllX(ctx context.Context) []*IncludedNetwork { } // IDs executes the query and returns a list of IncludedNetwork IDs. -func (inq *IncludedNetworkQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { - var ids []uuid.UUID - if err := inq.Select(includednetwork.FieldID).Scan(ctx, &ids); err != nil { +func (inq *IncludedNetworkQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if inq.ctx.Unique == nil && inq.path != nil { + inq.Unique(true) + } + ctx = setContextOp(ctx, inq.ctx, "IDs") + if err = inq.Select(includednetwork.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -297,10 +303,11 @@ func (inq *IncludedNetworkQuery) IDsX(ctx context.Context) []uuid.UUID { // Count returns the count of the given query. func (inq *IncludedNetworkQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, inq.ctx, "Count") if err := inq.prepareQuery(ctx); err != nil { return 0, err } - return inq.sqlCount(ctx) + return withInterceptors[int](ctx, inq, querierCount[*IncludedNetworkQuery](), inq.inters) } // CountX is like Count, but panics if an error occurs. @@ -314,10 +321,15 @@ func (inq *IncludedNetworkQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (inq *IncludedNetworkQuery) Exist(ctx context.Context) (bool, error) { - if err := inq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, inq.ctx, "Exist") + switch _, err := inq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return inq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -337,25 +349,24 @@ func (inq *IncludedNetworkQuery) Clone() *IncludedNetworkQuery { } return &IncludedNetworkQuery{ config: inq.config, - limit: inq.limit, - offset: inq.offset, - order: append([]OrderFunc{}, inq.order...), + ctx: inq.ctx.Clone(), + order: append([]includednetwork.OrderOption{}, inq.order...), + inters: append([]Interceptor{}, inq.inters...), predicates: append([]predicate.IncludedNetwork{}, inq.predicates...), withIncludedNetworkToTag: inq.withIncludedNetworkToTag.Clone(), withIncludedNetworkToHost: inq.withIncludedNetworkToHost.Clone(), withIncludedNetworkToNetwork: inq.withIncludedNetworkToNetwork.Clone(), withIncludedNetworkToEnvironment: inq.withIncludedNetworkToEnvironment.Clone(), // clone intermediate query. - sql: inq.sql.Clone(), - path: inq.path, - unique: inq.unique, + sql: inq.sql.Clone(), + path: inq.path, } } // WithIncludedNetworkToTag tells the query-builder to eager-load the nodes that are connected to // the "IncludedNetworkToTag" edge. The optional arguments are used to configure the query builder of the edge. func (inq *IncludedNetworkQuery) WithIncludedNetworkToTag(opts ...func(*TagQuery)) *IncludedNetworkQuery { - query := &TagQuery{config: inq.config} + query := (&TagClient{config: inq.config}).Query() for _, opt := range opts { opt(query) } @@ -366,7 +377,7 @@ func (inq *IncludedNetworkQuery) WithIncludedNetworkToTag(opts ...func(*TagQuery // WithIncludedNetworkToHost tells the query-builder to eager-load the nodes that are connected to // the "IncludedNetworkToHost" edge. The optional arguments are used to configure the query builder of the edge. func (inq *IncludedNetworkQuery) WithIncludedNetworkToHost(opts ...func(*HostQuery)) *IncludedNetworkQuery { - query := &HostQuery{config: inq.config} + query := (&HostClient{config: inq.config}).Query() for _, opt := range opts { opt(query) } @@ -377,7 +388,7 @@ func (inq *IncludedNetworkQuery) WithIncludedNetworkToHost(opts ...func(*HostQue // WithIncludedNetworkToNetwork tells the query-builder to eager-load the nodes that are connected to // the "IncludedNetworkToNetwork" edge. The optional arguments are used to configure the query builder of the edge. func (inq *IncludedNetworkQuery) WithIncludedNetworkToNetwork(opts ...func(*NetworkQuery)) *IncludedNetworkQuery { - query := &NetworkQuery{config: inq.config} + query := (&NetworkClient{config: inq.config}).Query() for _, opt := range opts { opt(query) } @@ -388,7 +399,7 @@ func (inq *IncludedNetworkQuery) WithIncludedNetworkToNetwork(opts ...func(*Netw // WithIncludedNetworkToEnvironment tells the query-builder to eager-load the nodes that are connected to // the "IncludedNetworkToEnvironment" edge. The optional arguments are used to configure the query builder of the edge. func (inq *IncludedNetworkQuery) WithIncludedNetworkToEnvironment(opts ...func(*EnvironmentQuery)) *IncludedNetworkQuery { - query := &EnvironmentQuery{config: inq.config} + query := (&EnvironmentClient{config: inq.config}).Query() for _, opt := range opts { opt(query) } @@ -410,17 +421,13 @@ func (inq *IncludedNetworkQuery) WithIncludedNetworkToEnvironment(opts ...func(* // GroupBy(includednetwork.FieldName). // Aggregate(ent.Count()). // Scan(ctx, &v) -// func (inq *IncludedNetworkQuery) GroupBy(field string, fields ...string) *IncludedNetworkGroupBy { - group := &IncludedNetworkGroupBy{config: inq.config} - group.fields = append([]string{field}, fields...) - group.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := inq.prepareQuery(ctx); err != nil { - return nil, err - } - return inq.sqlQuery(ctx), nil - } - return group + inq.ctx.Fields = append([]string{field}, fields...) + grbuild := &IncludedNetworkGroupBy{build: inq} + grbuild.flds = &inq.ctx.Fields + grbuild.label = includednetwork.Label + grbuild.scan = grbuild.Scan + return grbuild } // Select allows the selection one or more fields/columns for the given query, @@ -435,14 +442,31 @@ func (inq *IncludedNetworkQuery) GroupBy(field string, fields ...string) *Includ // client.IncludedNetwork.Query(). // Select(includednetwork.FieldName). // Scan(ctx, &v) -// func (inq *IncludedNetworkQuery) Select(fields ...string) *IncludedNetworkSelect { - inq.fields = append(inq.fields, fields...) - return &IncludedNetworkSelect{IncludedNetworkQuery: inq} + inq.ctx.Fields = append(inq.ctx.Fields, fields...) + sbuild := &IncludedNetworkSelect{IncludedNetworkQuery: inq} + sbuild.label = includednetwork.Label + sbuild.flds, sbuild.scan = &inq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a IncludedNetworkSelect configured with the given aggregations. +func (inq *IncludedNetworkQuery) Aggregate(fns ...AggregateFunc) *IncludedNetworkSelect { + return inq.Select().Aggregate(fns...) } func (inq *IncludedNetworkQuery) prepareQuery(ctx context.Context) error { - for _, f := range inq.fields { + for _, inter := range inq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, inq); err != nil { + return err + } + } + } + for _, f := range inq.ctx.Fields { if !includednetwork.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -457,7 +481,7 @@ func (inq *IncludedNetworkQuery) prepareQuery(ctx context.Context) error { return nil } -func (inq *IncludedNetworkQuery) sqlAll(ctx context.Context) ([]*IncludedNetwork, error) { +func (inq *IncludedNetworkQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*IncludedNetwork, error) { var ( nodes = []*IncludedNetwork{} withFKs = inq.withFKs @@ -475,251 +499,296 @@ func (inq *IncludedNetworkQuery) sqlAll(ctx context.Context) ([]*IncludedNetwork if withFKs { _spec.Node.Columns = append(_spec.Node.Columns, includednetwork.ForeignKeys...) } - _spec.ScanValues = func(columns []string) ([]interface{}, error) { + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*IncludedNetwork).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { node := &IncludedNetwork{config: inq.config} nodes = append(nodes, node) - return node.scanValues(columns) - } - _spec.Assign = func(columns []string, values []interface{}) error { - if len(nodes) == 0 { - return fmt.Errorf("ent: Assign called without calling ScanValues") - } - node := nodes[len(nodes)-1] node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(inq.modifiers) > 0 { + _spec.Modifiers = inq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } if err := sqlgraph.QueryNodes(ctx, inq.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := inq.withIncludedNetworkToTag; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*IncludedNetwork) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.IncludedNetworkToTag = []*Tag{} - } - query.withFKs = true - query.Where(predicate.Tag(func(s *sql.Selector) { - s.Where(sql.InValues(includednetwork.IncludedNetworkToTagColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { + if err := inq.loadIncludedNetworkToTag(ctx, query, nodes, + func(n *IncludedNetwork) { n.Edges.IncludedNetworkToTag = []*Tag{} }, + func(n *IncludedNetwork, e *Tag) { + n.Edges.IncludedNetworkToTag = append(n.Edges.IncludedNetworkToTag, e) + }); err != nil { return nil, err } - for _, n := range neighbors { - fk := n.included_network_included_network_to_tag - if fk == nil { - return nil, fmt.Errorf(`foreign-key "included_network_included_network_to_tag" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "included_network_included_network_to_tag" returned %v for node %v`, *fk, n.ID) - } - node.Edges.IncludedNetworkToTag = append(node.Edges.IncludedNetworkToTag, n) - } } - if query := inq.withIncludedNetworkToHost; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - ids := make(map[uuid.UUID]*IncludedNetwork, len(nodes)) - for _, node := range nodes { - ids[node.ID] = node - fks = append(fks, node.ID) - node.Edges.IncludedNetworkToHost = []*Host{} - } - var ( - edgeids []uuid.UUID - edges = make(map[uuid.UUID][]*IncludedNetwork) - ) - _spec := &sqlgraph.EdgeQuerySpec{ - Edge: &sqlgraph.EdgeSpec{ - Inverse: false, - Table: includednetwork.IncludedNetworkToHostTable, - Columns: includednetwork.IncludedNetworkToHostPrimaryKey, - }, - Predicate: func(s *sql.Selector) { - s.Where(sql.InValues(includednetwork.IncludedNetworkToHostPrimaryKey[0], fks...)) - }, - ScanValues: func() [2]interface{} { - return [2]interface{}{new(uuid.UUID), new(uuid.UUID)} - }, - Assign: func(out, in interface{}) error { - eout, ok := out.(*uuid.UUID) - if !ok || eout == nil { - return fmt.Errorf("unexpected id value for edge-out") - } - ein, ok := in.(*uuid.UUID) - if !ok || ein == nil { - return fmt.Errorf("unexpected id value for edge-in") - } - outValue := *eout - inValue := *ein - node, ok := ids[outValue] - if !ok { - return fmt.Errorf("unexpected node id in edges: %v", outValue) - } - if _, ok := edges[inValue]; !ok { - edgeids = append(edgeids, inValue) - } - edges[inValue] = append(edges[inValue], node) - return nil - }, + if err := inq.loadIncludedNetworkToHost(ctx, query, nodes, + func(n *IncludedNetwork) { n.Edges.IncludedNetworkToHost = []*Host{} }, + func(n *IncludedNetwork, e *Host) { + n.Edges.IncludedNetworkToHost = append(n.Edges.IncludedNetworkToHost, e) + }); err != nil { + return nil, err } - if err := sqlgraph.QueryEdges(ctx, inq.driver, _spec); err != nil { - return nil, fmt.Errorf(`query edges "IncludedNetworkToHost": %w`, err) + } + if query := inq.withIncludedNetworkToNetwork; query != nil { + if err := inq.loadIncludedNetworkToNetwork(ctx, query, nodes, nil, + func(n *IncludedNetwork, e *Network) { n.Edges.IncludedNetworkToNetwork = e }); err != nil { + return nil, err } - query.Where(host.IDIn(edgeids...)) - neighbors, err := query.All(ctx) - if err != nil { + } + if query := inq.withIncludedNetworkToEnvironment; query != nil { + if err := inq.loadIncludedNetworkToEnvironment(ctx, query, nodes, + func(n *IncludedNetwork) { n.Edges.IncludedNetworkToEnvironment = []*Environment{} }, + func(n *IncludedNetwork, e *Environment) { + n.Edges.IncludedNetworkToEnvironment = append(n.Edges.IncludedNetworkToEnvironment, e) + }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := edges[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected "IncludedNetworkToHost" node returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.IncludedNetworkToHost = append(nodes[i].Edges.IncludedNetworkToHost, n) - } + } + for name, query := range inq.withNamedIncludedNetworkToTag { + if err := inq.loadIncludedNetworkToTag(ctx, query, nodes, + func(n *IncludedNetwork) { n.appendNamedIncludedNetworkToTag(name) }, + func(n *IncludedNetwork, e *Tag) { n.appendNamedIncludedNetworkToTag(name, e) }); err != nil { + return nil, err } } - - if query := inq.withIncludedNetworkToNetwork; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*IncludedNetwork) - for i := range nodes { - if nodes[i].included_network_included_network_to_network == nil { - continue - } - fk := *nodes[i].included_network_included_network_to_network - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + for name, query := range inq.withNamedIncludedNetworkToHost { + if err := inq.loadIncludedNetworkToHost(ctx, query, nodes, + func(n *IncludedNetwork) { n.appendNamedIncludedNetworkToHost(name) }, + func(n *IncludedNetwork, e *Host) { n.appendNamedIncludedNetworkToHost(name, e) }); err != nil { + return nil, err } - query.Where(network.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + } + for name, query := range inq.withNamedIncludedNetworkToEnvironment { + if err := inq.loadIncludedNetworkToEnvironment(ctx, query, nodes, + func(n *IncludedNetwork) { n.appendNamedIncludedNetworkToEnvironment(name) }, + func(n *IncludedNetwork, e *Environment) { n.appendNamedIncludedNetworkToEnvironment(name, e) }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "included_network_included_network_to_network" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.IncludedNetworkToNetwork = n - } + } + for i := range inq.loadTotal { + if err := inq.loadTotal[i](ctx, nodes); err != nil { + return nil, err } } + return nodes, nil +} - if query := inq.withIncludedNetworkToEnvironment; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - ids := make(map[uuid.UUID]*IncludedNetwork, len(nodes)) - for _, node := range nodes { - ids[node.ID] = node - fks = append(fks, node.ID) - node.Edges.IncludedNetworkToEnvironment = []*Environment{} +func (inq *IncludedNetworkQuery) loadIncludedNetworkToTag(ctx context.Context, query *TagQuery, nodes []*IncludedNetwork, init func(*IncludedNetwork), assign func(*IncludedNetwork, *Tag)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*IncludedNetwork) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) } - var ( - edgeids []uuid.UUID - edges = make(map[uuid.UUID][]*IncludedNetwork) - ) - _spec := &sqlgraph.EdgeQuerySpec{ - Edge: &sqlgraph.EdgeSpec{ - Inverse: true, - Table: includednetwork.IncludedNetworkToEnvironmentTable, - Columns: includednetwork.IncludedNetworkToEnvironmentPrimaryKey, - }, - Predicate: func(s *sql.Selector) { - s.Where(sql.InValues(includednetwork.IncludedNetworkToEnvironmentPrimaryKey[1], fks...)) - }, - ScanValues: func() [2]interface{} { - return [2]interface{}{new(uuid.UUID), new(uuid.UUID)} - }, - Assign: func(out, in interface{}) error { - eout, ok := out.(*uuid.UUID) - if !ok || eout == nil { - return fmt.Errorf("unexpected id value for edge-out") - } - ein, ok := in.(*uuid.UUID) - if !ok || ein == nil { - return fmt.Errorf("unexpected id value for edge-in") - } - outValue := *eout - inValue := *ein - node, ok := ids[outValue] - if !ok { - return fmt.Errorf("unexpected node id in edges: %v", outValue) + } + query.withFKs = true + query.Where(predicate.Tag(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(includednetwork.IncludedNetworkToTagColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.included_network_included_network_to_tag + if fk == nil { + return fmt.Errorf(`foreign-key "included_network_included_network_to_tag" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "included_network_included_network_to_tag" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} +func (inq *IncludedNetworkQuery) loadIncludedNetworkToHost(ctx context.Context, query *HostQuery, nodes []*IncludedNetwork, init func(*IncludedNetwork), assign func(*IncludedNetwork, *Host)) error { + edgeIDs := make([]driver.Value, len(nodes)) + byID := make(map[uuid.UUID]*IncludedNetwork) + nids := make(map[uuid.UUID]map[*IncludedNetwork]struct{}) + for i, node := range nodes { + edgeIDs[i] = node.ID + byID[node.ID] = node + if init != nil { + init(node) + } + } + query.Where(func(s *sql.Selector) { + joinT := sql.Table(includednetwork.IncludedNetworkToHostTable) + s.Join(joinT).On(s.C(host.FieldID), joinT.C(includednetwork.IncludedNetworkToHostPrimaryKey[1])) + s.Where(sql.InValues(joinT.C(includednetwork.IncludedNetworkToHostPrimaryKey[0]), edgeIDs...)) + columns := s.SelectedColumns() + s.Select(joinT.C(includednetwork.IncludedNetworkToHostPrimaryKey[0])) + s.AppendSelect(columns...) + s.SetDistinct(false) + }) + if err := query.prepareQuery(ctx); err != nil { + return err + } + qr := QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + return query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) { + assign := spec.Assign + values := spec.ScanValues + spec.ScanValues = func(columns []string) ([]any, error) { + values, err := values(columns[1:]) + if err != nil { + return nil, err } - if _, ok := edges[inValue]; !ok { - edgeids = append(edgeids, inValue) + return append([]any{new(uuid.UUID)}, values...), nil + } + spec.Assign = func(columns []string, values []any) error { + outValue := *values[0].(*uuid.UUID) + inValue := *values[1].(*uuid.UUID) + if nids[inValue] == nil { + nids[inValue] = map[*IncludedNetwork]struct{}{byID[outValue]: {}} + return assign(columns[1:], values[1:]) } - edges[inValue] = append(edges[inValue], node) + nids[inValue][byID[outValue]] = struct{}{} return nil - }, + } + }) + }) + neighbors, err := withInterceptors[[]*Host](ctx, query, qr, query.inters) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nids[n.ID] + if !ok { + return fmt.Errorf(`unexpected "IncludedNetworkToHost" node returned %v`, n.ID) } - if err := sqlgraph.QueryEdges(ctx, inq.driver, _spec); err != nil { - return nil, fmt.Errorf(`query edges "IncludedNetworkToEnvironment": %w`, err) + for kn := range nodes { + assign(kn, n) } - query.Where(environment.IDIn(edgeids...)) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + } + return nil +} +func (inq *IncludedNetworkQuery) loadIncludedNetworkToNetwork(ctx context.Context, query *NetworkQuery, nodes []*IncludedNetwork, init func(*IncludedNetwork), assign func(*IncludedNetwork, *Network)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*IncludedNetwork) + for i := range nodes { + if nodes[i].included_network_included_network_to_network == nil { + continue + } + fk := *nodes[i].included_network_included_network_to_network + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) } - for _, n := range neighbors { - nodes, ok := edges[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected "IncludedNetworkToEnvironment" node returned %v`, n.ID) + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(network.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "included_network_included_network_to_network" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (inq *IncludedNetworkQuery) loadIncludedNetworkToEnvironment(ctx context.Context, query *EnvironmentQuery, nodes []*IncludedNetwork, init func(*IncludedNetwork), assign func(*IncludedNetwork, *Environment)) error { + edgeIDs := make([]driver.Value, len(nodes)) + byID := make(map[uuid.UUID]*IncludedNetwork) + nids := make(map[uuid.UUID]map[*IncludedNetwork]struct{}) + for i, node := range nodes { + edgeIDs[i] = node.ID + byID[node.ID] = node + if init != nil { + init(node) + } + } + query.Where(func(s *sql.Selector) { + joinT := sql.Table(includednetwork.IncludedNetworkToEnvironmentTable) + s.Join(joinT).On(s.C(environment.FieldID), joinT.C(includednetwork.IncludedNetworkToEnvironmentPrimaryKey[0])) + s.Where(sql.InValues(joinT.C(includednetwork.IncludedNetworkToEnvironmentPrimaryKey[1]), edgeIDs...)) + columns := s.SelectedColumns() + s.Select(joinT.C(includednetwork.IncludedNetworkToEnvironmentPrimaryKey[1])) + s.AppendSelect(columns...) + s.SetDistinct(false) + }) + if err := query.prepareQuery(ctx); err != nil { + return err + } + qr := QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + return query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) { + assign := spec.Assign + values := spec.ScanValues + spec.ScanValues = func(columns []string) ([]any, error) { + values, err := values(columns[1:]) + if err != nil { + return nil, err + } + return append([]any{new(uuid.UUID)}, values...), nil } - for i := range nodes { - nodes[i].Edges.IncludedNetworkToEnvironment = append(nodes[i].Edges.IncludedNetworkToEnvironment, n) + spec.Assign = func(columns []string, values []any) error { + outValue := *values[0].(*uuid.UUID) + inValue := *values[1].(*uuid.UUID) + if nids[inValue] == nil { + nids[inValue] = map[*IncludedNetwork]struct{}{byID[outValue]: {}} + return assign(columns[1:], values[1:]) + } + nids[inValue][byID[outValue]] = struct{}{} + return nil } + }) + }) + neighbors, err := withInterceptors[[]*Environment](ctx, query, qr, query.inters) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nids[n.ID] + if !ok { + return fmt.Errorf(`unexpected "IncludedNetworkToEnvironment" node returned %v`, n.ID) + } + for kn := range nodes { + assign(kn, n) } } - - return nodes, nil + return nil } func (inq *IncludedNetworkQuery) sqlCount(ctx context.Context) (int, error) { _spec := inq.querySpec() - _spec.Node.Columns = inq.fields - if len(inq.fields) > 0 { - _spec.Unique = inq.unique != nil && *inq.unique + if len(inq.modifiers) > 0 { + _spec.Modifiers = inq.modifiers } - return sqlgraph.CountNodes(ctx, inq.driver, _spec) -} - -func (inq *IncludedNetworkQuery) sqlExist(ctx context.Context) (bool, error) { - n, err := inq.sqlCount(ctx) - if err != nil { - return false, fmt.Errorf("ent: check existence: %w", err) + _spec.Node.Columns = inq.ctx.Fields + if len(inq.ctx.Fields) > 0 { + _spec.Unique = inq.ctx.Unique != nil && *inq.ctx.Unique } - return n > 0, nil + return sqlgraph.CountNodes(ctx, inq.driver, _spec) } func (inq *IncludedNetworkQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: includednetwork.Table, - Columns: includednetwork.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: includednetwork.FieldID, - }, - }, - From: inq.sql, - Unique: true, - } - if unique := inq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(includednetwork.Table, includednetwork.Columns, sqlgraph.NewFieldSpec(includednetwork.FieldID, field.TypeUUID)) + _spec.From = inq.sql + if unique := inq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if inq.path != nil { + _spec.Unique = true } - if fields := inq.fields; len(fields) > 0 { + if fields := inq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, includednetwork.FieldID) for i := range fields { @@ -735,10 +804,10 @@ func (inq *IncludedNetworkQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := inq.limit; limit != nil { + if limit := inq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := inq.offset; offset != nil { + if offset := inq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := inq.order; len(ps) > 0 { @@ -754,7 +823,7 @@ func (inq *IncludedNetworkQuery) querySpec() *sqlgraph.QuerySpec { func (inq *IncludedNetworkQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(inq.driver.Dialect()) t1 := builder.Table(includednetwork.Table) - columns := inq.fields + columns := inq.ctx.Fields if len(columns) == 0 { columns = includednetwork.Columns } @@ -763,7 +832,7 @@ func (inq *IncludedNetworkQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = inq.sql selector.Select(selector.Columns(columns...)...) } - if inq.unique != nil && *inq.unique { + if inq.ctx.Unique != nil && *inq.ctx.Unique { selector.Distinct() } for _, p := range inq.predicates { @@ -772,498 +841,142 @@ func (inq *IncludedNetworkQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range inq.order { p(selector) } - if offset := inq.offset; offset != nil { + if offset := inq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := inq.limit; limit != nil { + if limit := inq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector } -// IncludedNetworkGroupBy is the group-by builder for IncludedNetwork entities. -type IncludedNetworkGroupBy struct { - config - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) -} - -// Aggregate adds the given aggregation functions to the group-by query. -func (ingb *IncludedNetworkGroupBy) Aggregate(fns ...AggregateFunc) *IncludedNetworkGroupBy { - ingb.fns = append(ingb.fns, fns...) - return ingb -} - -// Scan applies the group-by query and scans the result into the given value. -func (ingb *IncludedNetworkGroupBy) Scan(ctx context.Context, v interface{}) error { - query, err := ingb.path(ctx) - if err != nil { - return err - } - ingb.sql = query - return ingb.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (ingb *IncludedNetworkGroupBy) ScanX(ctx context.Context, v interface{}) { - if err := ingb.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from group-by. -// It is only allowed when executing a group-by query with one field. -func (ingb *IncludedNetworkGroupBy) Strings(ctx context.Context) ([]string, error) { - if len(ingb.fields) > 1 { - return nil, errors.New("ent: IncludedNetworkGroupBy.Strings is not achievable when grouping more than 1 field") - } - var v []string - if err := ingb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (ingb *IncludedNetworkGroupBy) StringsX(ctx context.Context) []string { - v, err := ingb.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (ingb *IncludedNetworkGroupBy) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = ingb.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{includednetwork.Label} - default: - err = fmt.Errorf("ent: IncludedNetworkGroupBy.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (ingb *IncludedNetworkGroupBy) StringX(ctx context.Context) string { - v, err := ingb.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from group-by. -// It is only allowed when executing a group-by query with one field. -func (ingb *IncludedNetworkGroupBy) Ints(ctx context.Context) ([]int, error) { - if len(ingb.fields) > 1 { - return nil, errors.New("ent: IncludedNetworkGroupBy.Ints is not achievable when grouping more than 1 field") - } - var v []int - if err := ingb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (ingb *IncludedNetworkGroupBy) IntsX(ctx context.Context) []int { - v, err := ingb.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (ingb *IncludedNetworkGroupBy) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = ingb.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{includednetwork.Label} - default: - err = fmt.Errorf("ent: IncludedNetworkGroupBy.Ints returned %d results when one was expected", len(v)) +// WithNamedIncludedNetworkToTag tells the query-builder to eager-load the nodes that are connected to the "IncludedNetworkToTag" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (inq *IncludedNetworkQuery) WithNamedIncludedNetworkToTag(name string, opts ...func(*TagQuery)) *IncludedNetworkQuery { + query := (&TagClient{config: inq.config}).Query() + for _, opt := range opts { + opt(query) } - return -} - -// IntX is like Int, but panics if an error occurs. -func (ingb *IncludedNetworkGroupBy) IntX(ctx context.Context) int { - v, err := ingb.Int(ctx) - if err != nil { - panic(err) + if inq.withNamedIncludedNetworkToTag == nil { + inq.withNamedIncludedNetworkToTag = make(map[string]*TagQuery) } - return v + inq.withNamedIncludedNetworkToTag[name] = query + return inq } -// Float64s returns list of float64s from group-by. -// It is only allowed when executing a group-by query with one field. -func (ingb *IncludedNetworkGroupBy) Float64s(ctx context.Context) ([]float64, error) { - if len(ingb.fields) > 1 { - return nil, errors.New("ent: IncludedNetworkGroupBy.Float64s is not achievable when grouping more than 1 field") - } - var v []float64 - if err := ingb.Scan(ctx, &v); err != nil { - return nil, err +// WithNamedIncludedNetworkToHost tells the query-builder to eager-load the nodes that are connected to the "IncludedNetworkToHost" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (inq *IncludedNetworkQuery) WithNamedIncludedNetworkToHost(name string, opts ...func(*HostQuery)) *IncludedNetworkQuery { + query := (&HostClient{config: inq.config}).Query() + for _, opt := range opts { + opt(query) } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (ingb *IncludedNetworkGroupBy) Float64sX(ctx context.Context) []float64 { - v, err := ingb.Float64s(ctx) - if err != nil { - panic(err) + if inq.withNamedIncludedNetworkToHost == nil { + inq.withNamedIncludedNetworkToHost = make(map[string]*HostQuery) } - return v + inq.withNamedIncludedNetworkToHost[name] = query + return inq } -// Float64 returns a single float64 from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (ingb *IncludedNetworkGroupBy) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = ingb.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{includednetwork.Label} - default: - err = fmt.Errorf("ent: IncludedNetworkGroupBy.Float64s returned %d results when one was expected", len(v)) +// WithNamedIncludedNetworkToEnvironment tells the query-builder to eager-load the nodes that are connected to the "IncludedNetworkToEnvironment" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (inq *IncludedNetworkQuery) WithNamedIncludedNetworkToEnvironment(name string, opts ...func(*EnvironmentQuery)) *IncludedNetworkQuery { + query := (&EnvironmentClient{config: inq.config}).Query() + for _, opt := range opts { + opt(query) } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (ingb *IncludedNetworkGroupBy) Float64X(ctx context.Context) float64 { - v, err := ingb.Float64(ctx) - if err != nil { - panic(err) + if inq.withNamedIncludedNetworkToEnvironment == nil { + inq.withNamedIncludedNetworkToEnvironment = make(map[string]*EnvironmentQuery) } - return v + inq.withNamedIncludedNetworkToEnvironment[name] = query + return inq } -// Bools returns list of bools from group-by. -// It is only allowed when executing a group-by query with one field. -func (ingb *IncludedNetworkGroupBy) Bools(ctx context.Context) ([]bool, error) { - if len(ingb.fields) > 1 { - return nil, errors.New("ent: IncludedNetworkGroupBy.Bools is not achievable when grouping more than 1 field") - } - var v []bool - if err := ingb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil +// IncludedNetworkGroupBy is the group-by builder for IncludedNetwork entities. +type IncludedNetworkGroupBy struct { + selector + build *IncludedNetworkQuery } -// BoolsX is like Bools, but panics if an error occurs. -func (ingb *IncludedNetworkGroupBy) BoolsX(ctx context.Context) []bool { - v, err := ingb.Bools(ctx) - if err != nil { - panic(err) - } - return v +// Aggregate adds the given aggregation functions to the group-by query. +func (ingb *IncludedNetworkGroupBy) Aggregate(fns ...AggregateFunc) *IncludedNetworkGroupBy { + ingb.fns = append(ingb.fns, fns...) + return ingb } -// Bool returns a single bool from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (ingb *IncludedNetworkGroupBy) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = ingb.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{includednetwork.Label} - default: - err = fmt.Errorf("ent: IncludedNetworkGroupBy.Bools returned %d results when one was expected", len(v)) +// Scan applies the selector query and scans the result into the given value. +func (ingb *IncludedNetworkGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ingb.build.ctx, "GroupBy") + if err := ingb.build.prepareQuery(ctx); err != nil { + return err } - return + return scanWithInterceptors[*IncludedNetworkQuery, *IncludedNetworkGroupBy](ctx, ingb.build, ingb, ingb.build.inters, v) } -// BoolX is like Bool, but panics if an error occurs. -func (ingb *IncludedNetworkGroupBy) BoolX(ctx context.Context) bool { - v, err := ingb.Bool(ctx) - if err != nil { - panic(err) +func (ingb *IncludedNetworkGroupBy) sqlScan(ctx context.Context, root *IncludedNetworkQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(ingb.fns)) + for _, fn := range ingb.fns { + aggregation = append(aggregation, fn(selector)) } - return v -} - -func (ingb *IncludedNetworkGroupBy) sqlScan(ctx context.Context, v interface{}) error { - for _, f := range ingb.fields { - if !includednetwork.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*ingb.flds)+len(ingb.fns)) + for _, f := range *ingb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := ingb.sqlQuery() + selector.GroupBy(selector.Columns(*ingb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := ingb.driver.Query(ctx, query, args, rows); err != nil { + if err := ingb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (ingb *IncludedNetworkGroupBy) sqlQuery() *sql.Selector { - selector := ingb.sql.Select() - aggregation := make([]string, 0, len(ingb.fns)) - for _, fn := range ingb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(ingb.fields)+len(ingb.fns)) - for _, f := range ingb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(ingb.fields...)...) -} - // IncludedNetworkSelect is the builder for selecting fields of IncludedNetwork entities. type IncludedNetworkSelect struct { *IncludedNetworkQuery - // intermediate query (i.e. traversal path). - sql *sql.Selector + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (ins *IncludedNetworkSelect) Aggregate(fns ...AggregateFunc) *IncludedNetworkSelect { + ins.fns = append(ins.fns, fns...) + return ins } // Scan applies the selector query and scans the result into the given value. -func (ins *IncludedNetworkSelect) Scan(ctx context.Context, v interface{}) error { +func (ins *IncludedNetworkSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ins.ctx, "Select") if err := ins.prepareQuery(ctx); err != nil { return err } - ins.sql = ins.IncludedNetworkQuery.sqlQuery(ctx) - return ins.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (ins *IncludedNetworkSelect) ScanX(ctx context.Context, v interface{}) { - if err := ins.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from a selector. It is only allowed when selecting one field. -func (ins *IncludedNetworkSelect) Strings(ctx context.Context) ([]string, error) { - if len(ins.fields) > 1 { - return nil, errors.New("ent: IncludedNetworkSelect.Strings is not achievable when selecting more than 1 field") - } - var v []string - if err := ins.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (ins *IncludedNetworkSelect) StringsX(ctx context.Context) []string { - v, err := ins.Strings(ctx) - if err != nil { - panic(err) - } - return v + return scanWithInterceptors[*IncludedNetworkQuery, *IncludedNetworkSelect](ctx, ins.IncludedNetworkQuery, ins, ins.inters, v) } -// String returns a single string from a selector. It is only allowed when selecting one field. -func (ins *IncludedNetworkSelect) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = ins.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{includednetwork.Label} - default: - err = fmt.Errorf("ent: IncludedNetworkSelect.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (ins *IncludedNetworkSelect) StringX(ctx context.Context) string { - v, err := ins.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from a selector. It is only allowed when selecting one field. -func (ins *IncludedNetworkSelect) Ints(ctx context.Context) ([]int, error) { - if len(ins.fields) > 1 { - return nil, errors.New("ent: IncludedNetworkSelect.Ints is not achievable when selecting more than 1 field") - } - var v []int - if err := ins.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (ins *IncludedNetworkSelect) IntsX(ctx context.Context) []int { - v, err := ins.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a selector. It is only allowed when selecting one field. -func (ins *IncludedNetworkSelect) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = ins.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{includednetwork.Label} - default: - err = fmt.Errorf("ent: IncludedNetworkSelect.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (ins *IncludedNetworkSelect) IntX(ctx context.Context) int { - v, err := ins.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. -func (ins *IncludedNetworkSelect) Float64s(ctx context.Context) ([]float64, error) { - if len(ins.fields) > 1 { - return nil, errors.New("ent: IncludedNetworkSelect.Float64s is not achievable when selecting more than 1 field") - } - var v []float64 - if err := ins.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (ins *IncludedNetworkSelect) Float64sX(ctx context.Context) []float64 { - v, err := ins.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. -func (ins *IncludedNetworkSelect) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = ins.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{includednetwork.Label} - default: - err = fmt.Errorf("ent: IncludedNetworkSelect.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (ins *IncludedNetworkSelect) Float64X(ctx context.Context) float64 { - v, err := ins.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from a selector. It is only allowed when selecting one field. -func (ins *IncludedNetworkSelect) Bools(ctx context.Context) ([]bool, error) { - if len(ins.fields) > 1 { - return nil, errors.New("ent: IncludedNetworkSelect.Bools is not achievable when selecting more than 1 field") - } - var v []bool - if err := ins.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (ins *IncludedNetworkSelect) BoolsX(ctx context.Context) []bool { - v, err := ins.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a selector. It is only allowed when selecting one field. -func (ins *IncludedNetworkSelect) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = ins.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{includednetwork.Label} - default: - err = fmt.Errorf("ent: IncludedNetworkSelect.Bools returned %d results when one was expected", len(v)) +func (ins *IncludedNetworkSelect) sqlScan(ctx context.Context, root *IncludedNetworkQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(ins.fns)) + for _, fn := range ins.fns { + aggregation = append(aggregation, fn(selector)) } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (ins *IncludedNetworkSelect) BoolX(ctx context.Context) bool { - v, err := ins.Bool(ctx) - if err != nil { - panic(err) + switch n := len(*ins.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) } - return v -} - -func (ins *IncludedNetworkSelect) sqlScan(ctx context.Context, v interface{}) error { rows := &sql.Rows{} - query, args := ins.sql.Query() + query, args := selector.Query() if err := ins.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/ent/includednetwork_update.go b/ent/includednetwork_update.go index fc857dba..55db37c7 100755 --- a/ent/includednetwork_update.go +++ b/ent/includednetwork_update.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -9,6 +9,7 @@ import ( "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/dialect/sql/sqljson" "entgo.io/ent/schema/field" "github.com/gen0cide/laforge/ent/environment" "github.com/gen0cide/laforge/ent/host" @@ -38,12 +39,26 @@ func (inu *IncludedNetworkUpdate) SetName(s string) *IncludedNetworkUpdate { return inu } +// SetNillableName sets the "name" field if the given value is not nil. +func (inu *IncludedNetworkUpdate) SetNillableName(s *string) *IncludedNetworkUpdate { + if s != nil { + inu.SetName(*s) + } + return inu +} + // SetHosts sets the "hosts" field. func (inu *IncludedNetworkUpdate) SetHosts(s []string) *IncludedNetworkUpdate { inu.mutation.SetHosts(s) return inu } +// AppendHosts appends s to the "hosts" field. +func (inu *IncludedNetworkUpdate) AppendHosts(s []string) *IncludedNetworkUpdate { + inu.mutation.AppendHosts(s) + return inu +} + // AddIncludedNetworkToTagIDs adds the "IncludedNetworkToTag" edge to the Tag entity by IDs. func (inu *IncludedNetworkUpdate) AddIncludedNetworkToTagIDs(ids ...uuid.UUID) *IncludedNetworkUpdate { inu.mutation.AddIncludedNetworkToTagIDs(ids...) @@ -184,34 +199,7 @@ func (inu *IncludedNetworkUpdate) RemoveIncludedNetworkToEnvironment(e ...*Envir // Save executes the query and returns the number of nodes affected by the update operation. func (inu *IncludedNetworkUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(inu.hooks) == 0 { - affected, err = inu.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*IncludedNetworkMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - inu.mutation = mutation - affected, err = inu.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(inu.hooks) - 1; i >= 0; i-- { - if inu.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = inu.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, inu.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, inu.sqlSave, inu.mutation, inu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -237,16 +225,7 @@ func (inu *IncludedNetworkUpdate) ExecX(ctx context.Context) { } func (inu *IncludedNetworkUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: includednetwork.Table, - Columns: includednetwork.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: includednetwork.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(includednetwork.Table, includednetwork.Columns, sqlgraph.NewFieldSpec(includednetwork.FieldID, field.TypeUUID)) if ps := inu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -255,17 +234,14 @@ func (inu *IncludedNetworkUpdate) sqlSave(ctx context.Context) (n int, err error } } if value, ok := inu.mutation.Name(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: includednetwork.FieldName, - }) + _spec.SetField(includednetwork.FieldName, field.TypeString, value) } if value, ok := inu.mutation.Hosts(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: includednetwork.FieldHosts, + _spec.SetField(includednetwork.FieldHosts, field.TypeJSON, value) + } + if value, ok := inu.mutation.AppendedHosts(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, includednetwork.FieldHosts, value) }) } if inu.mutation.IncludedNetworkToTagCleared() { @@ -276,10 +252,7 @@ func (inu *IncludedNetworkUpdate) sqlSave(ctx context.Context) (n int, err error Columns: []string{includednetwork.IncludedNetworkToTagColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: tag.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(tag.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -292,10 +265,7 @@ func (inu *IncludedNetworkUpdate) sqlSave(ctx context.Context) (n int, err error Columns: []string{includednetwork.IncludedNetworkToTagColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: tag.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(tag.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -311,10 +281,7 @@ func (inu *IncludedNetworkUpdate) sqlSave(ctx context.Context) (n int, err error Columns: []string{includednetwork.IncludedNetworkToTagColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: tag.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(tag.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -330,10 +297,7 @@ func (inu *IncludedNetworkUpdate) sqlSave(ctx context.Context) (n int, err error Columns: includednetwork.IncludedNetworkToHostPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: host.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(host.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -346,10 +310,7 @@ func (inu *IncludedNetworkUpdate) sqlSave(ctx context.Context) (n int, err error Columns: includednetwork.IncludedNetworkToHostPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: host.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(host.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -365,10 +326,7 @@ func (inu *IncludedNetworkUpdate) sqlSave(ctx context.Context) (n int, err error Columns: includednetwork.IncludedNetworkToHostPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: host.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(host.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -384,10 +342,7 @@ func (inu *IncludedNetworkUpdate) sqlSave(ctx context.Context) (n int, err error Columns: []string{includednetwork.IncludedNetworkToNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: network.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(network.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -400,10 +355,7 @@ func (inu *IncludedNetworkUpdate) sqlSave(ctx context.Context) (n int, err error Columns: []string{includednetwork.IncludedNetworkToNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: network.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(network.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -419,10 +371,7 @@ func (inu *IncludedNetworkUpdate) sqlSave(ctx context.Context) (n int, err error Columns: includednetwork.IncludedNetworkToEnvironmentPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -435,10 +384,7 @@ func (inu *IncludedNetworkUpdate) sqlSave(ctx context.Context) (n int, err error Columns: includednetwork.IncludedNetworkToEnvironmentPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -454,10 +400,7 @@ func (inu *IncludedNetworkUpdate) sqlSave(ctx context.Context) (n int, err error Columns: includednetwork.IncludedNetworkToEnvironmentPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -469,10 +412,11 @@ func (inu *IncludedNetworkUpdate) sqlSave(ctx context.Context) (n int, err error if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{includednetwork.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return 0, err } + inu.mutation.done = true return n, nil } @@ -490,12 +434,26 @@ func (inuo *IncludedNetworkUpdateOne) SetName(s string) *IncludedNetworkUpdateOn return inuo } +// SetNillableName sets the "name" field if the given value is not nil. +func (inuo *IncludedNetworkUpdateOne) SetNillableName(s *string) *IncludedNetworkUpdateOne { + if s != nil { + inuo.SetName(*s) + } + return inuo +} + // SetHosts sets the "hosts" field. func (inuo *IncludedNetworkUpdateOne) SetHosts(s []string) *IncludedNetworkUpdateOne { inuo.mutation.SetHosts(s) return inuo } +// AppendHosts appends s to the "hosts" field. +func (inuo *IncludedNetworkUpdateOne) AppendHosts(s []string) *IncludedNetworkUpdateOne { + inuo.mutation.AppendHosts(s) + return inuo +} + // AddIncludedNetworkToTagIDs adds the "IncludedNetworkToTag" edge to the Tag entity by IDs. func (inuo *IncludedNetworkUpdateOne) AddIncludedNetworkToTagIDs(ids ...uuid.UUID) *IncludedNetworkUpdateOne { inuo.mutation.AddIncludedNetworkToTagIDs(ids...) @@ -634,6 +592,12 @@ func (inuo *IncludedNetworkUpdateOne) RemoveIncludedNetworkToEnvironment(e ...*E return inuo.RemoveIncludedNetworkToEnvironmentIDs(ids...) } +// Where appends a list predicates to the IncludedNetworkUpdate builder. +func (inuo *IncludedNetworkUpdateOne) Where(ps ...predicate.IncludedNetwork) *IncludedNetworkUpdateOne { + inuo.mutation.Where(ps...) + return inuo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (inuo *IncludedNetworkUpdateOne) Select(field string, fields ...string) *IncludedNetworkUpdateOne { @@ -643,34 +607,7 @@ func (inuo *IncludedNetworkUpdateOne) Select(field string, fields ...string) *In // Save executes the query and returns the updated IncludedNetwork entity. func (inuo *IncludedNetworkUpdateOne) Save(ctx context.Context) (*IncludedNetwork, error) { - var ( - err error - node *IncludedNetwork - ) - if len(inuo.hooks) == 0 { - node, err = inuo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*IncludedNetworkMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - inuo.mutation = mutation - node, err = inuo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(inuo.hooks) - 1; i >= 0; i-- { - if inuo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = inuo.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, inuo.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, inuo.sqlSave, inuo.mutation, inuo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -696,16 +633,7 @@ func (inuo *IncludedNetworkUpdateOne) ExecX(ctx context.Context) { } func (inuo *IncludedNetworkUpdateOne) sqlSave(ctx context.Context) (_node *IncludedNetwork, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: includednetwork.Table, - Columns: includednetwork.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: includednetwork.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(includednetwork.Table, includednetwork.Columns, sqlgraph.NewFieldSpec(includednetwork.FieldID, field.TypeUUID)) id, ok := inuo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "IncludedNetwork.id" for update`)} @@ -731,17 +659,14 @@ func (inuo *IncludedNetworkUpdateOne) sqlSave(ctx context.Context) (_node *Inclu } } if value, ok := inuo.mutation.Name(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: includednetwork.FieldName, - }) + _spec.SetField(includednetwork.FieldName, field.TypeString, value) } if value, ok := inuo.mutation.Hosts(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: includednetwork.FieldHosts, + _spec.SetField(includednetwork.FieldHosts, field.TypeJSON, value) + } + if value, ok := inuo.mutation.AppendedHosts(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, includednetwork.FieldHosts, value) }) } if inuo.mutation.IncludedNetworkToTagCleared() { @@ -752,10 +677,7 @@ func (inuo *IncludedNetworkUpdateOne) sqlSave(ctx context.Context) (_node *Inclu Columns: []string{includednetwork.IncludedNetworkToTagColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: tag.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(tag.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -768,10 +690,7 @@ func (inuo *IncludedNetworkUpdateOne) sqlSave(ctx context.Context) (_node *Inclu Columns: []string{includednetwork.IncludedNetworkToTagColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: tag.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(tag.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -787,10 +706,7 @@ func (inuo *IncludedNetworkUpdateOne) sqlSave(ctx context.Context) (_node *Inclu Columns: []string{includednetwork.IncludedNetworkToTagColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: tag.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(tag.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -806,10 +722,7 @@ func (inuo *IncludedNetworkUpdateOne) sqlSave(ctx context.Context) (_node *Inclu Columns: includednetwork.IncludedNetworkToHostPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: host.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(host.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -822,10 +735,7 @@ func (inuo *IncludedNetworkUpdateOne) sqlSave(ctx context.Context) (_node *Inclu Columns: includednetwork.IncludedNetworkToHostPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: host.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(host.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -841,10 +751,7 @@ func (inuo *IncludedNetworkUpdateOne) sqlSave(ctx context.Context) (_node *Inclu Columns: includednetwork.IncludedNetworkToHostPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: host.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(host.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -860,10 +767,7 @@ func (inuo *IncludedNetworkUpdateOne) sqlSave(ctx context.Context) (_node *Inclu Columns: []string{includednetwork.IncludedNetworkToNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: network.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(network.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -876,10 +780,7 @@ func (inuo *IncludedNetworkUpdateOne) sqlSave(ctx context.Context) (_node *Inclu Columns: []string{includednetwork.IncludedNetworkToNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: network.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(network.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -895,10 +796,7 @@ func (inuo *IncludedNetworkUpdateOne) sqlSave(ctx context.Context) (_node *Inclu Columns: includednetwork.IncludedNetworkToEnvironmentPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -911,10 +809,7 @@ func (inuo *IncludedNetworkUpdateOne) sqlSave(ctx context.Context) (_node *Inclu Columns: includednetwork.IncludedNetworkToEnvironmentPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -930,10 +825,7 @@ func (inuo *IncludedNetworkUpdateOne) sqlSave(ctx context.Context) (_node *Inclu Columns: includednetwork.IncludedNetworkToEnvironmentPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -948,9 +840,10 @@ func (inuo *IncludedNetworkUpdateOne) sqlSave(ctx context.Context) (_node *Inclu if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{includednetwork.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } + inuo.mutation.done = true return _node, nil } diff --git a/ent/migrate/migrate.go b/ent/migrate/migrate.go index 9bdaf523..1956a6bf 100755 --- a/ent/migrate/migrate.go +++ b/ent/migrate/migrate.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package migrate @@ -28,9 +28,6 @@ var ( // and therefore, it's recommended to enable this option to get more // flexibility in the schema changes. WithDropIndex = schema.WithDropIndex - // WithFixture sets the foreign-key renaming option to the migration when upgrading - // ent from v0.1.0 (issue-#285). Defaults to false. - WithFixture = schema.WithFixture // WithForeignKeys enables creating foreign-key in schema DDL. This defaults to true. WithForeignKeys = schema.WithForeignKeys ) @@ -45,27 +42,23 @@ func NewSchema(drv dialect.Driver) *Schema { return &Schema{drv: drv} } // Create creates all schema resources. func (s *Schema) Create(ctx context.Context, opts ...schema.MigrateOption) error { + return Create(ctx, s, Tables, opts...) +} + +// Create creates all table resources using the given schema driver. +func Create(ctx context.Context, s *Schema, tables []*schema.Table, opts ...schema.MigrateOption) error { migrate, err := schema.NewMigrate(s.drv, opts...) if err != nil { return fmt.Errorf("ent/migrate: %w", err) } - return migrate.Create(ctx, Tables...) + return migrate.Create(ctx, tables...) } // WriteTo writes the schema changes to w instead of running them against the database. // -// if err := client.Schema.WriteTo(context.Background(), os.Stdout); err != nil { +// if err := client.Schema.WriteTo(context.Background(), os.Stdout); err != nil { // log.Fatal(err) -// } -// +// } func (s *Schema) WriteTo(ctx context.Context, w io.Writer, opts ...schema.MigrateOption) error { - drv := &schema.WriteDriver{ - Writer: w, - Driver: s.drv, - } - migrate, err := schema.NewMigrate(drv, opts...) - if err != nil { - return fmt.Errorf("ent/migrate: %w", err) - } - return migrate.Create(ctx, Tables...) + return Create(ctx, &Schema{drv: &schema.WriteDriver{Writer: w, Driver: s.drv}}, Tables, opts...) } diff --git a/ent/migrate/schema.go b/ent/migrate/schema.go index 3f2e901c..a94502b8 100755 --- a/ent/migrate/schema.go +++ b/ent/migrate/schema.go @@ -1,8 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package migrate import ( + "entgo.io/ent/dialect/entsql" "entgo.io/ent/dialect/sql/schema" "entgo.io/ent/schema/field" ) @@ -1544,10 +1545,13 @@ func init() { CompetitionCompetitionToDNSTable.ForeignKeys[1].RefTable = DnSsTable EnvironmentEnvironmentToUserTable.ForeignKeys[0].RefTable = EnvironmentsTable EnvironmentEnvironmentToUserTable.ForeignKeys[1].RefTable = UsersTable + EnvironmentEnvironmentToUserTable.Annotation = &entsql.Annotation{} EnvironmentEnvironmentToIncludedNetworkTable.ForeignKeys[0].RefTable = EnvironmentsTable EnvironmentEnvironmentToIncludedNetworkTable.ForeignKeys[1].RefTable = IncludedNetworksTable + EnvironmentEnvironmentToIncludedNetworkTable.Annotation = &entsql.Annotation{} EnvironmentEnvironmentToDNSTable.ForeignKeys[0].RefTable = EnvironmentsTable EnvironmentEnvironmentToDNSTable.ForeignKeys[1].RefTable = DnSsTable + EnvironmentEnvironmentToDNSTable.Annotation = &entsql.Annotation{} IncludedNetworkIncludedNetworkToHostTable.ForeignKeys[0].RefTable = IncludedNetworksTable IncludedNetworkIncludedNetworkToHostTable.ForeignKeys[1].RefTable = HostsTable PlanNextPlanTable.ForeignKeys[0].RefTable = PlansTable diff --git a/ent/mutation.go b/ent/mutation.go index 4eb3ac70..167f16de 100755 --- a/ent/mutation.go +++ b/ent/mutation.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -9,6 +9,8 @@ import ( "sync" "time" + "entgo.io/ent" + "entgo.io/ent/dialect/sql" "github.com/gen0cide/laforge/ent/adhocplan" "github.com/gen0cide/laforge/ent/agentstatus" "github.com/gen0cide/laforge/ent/agenttask" @@ -49,8 +51,6 @@ import ( "github.com/gen0cide/laforge/ent/user" "github.com/go-git/go-git/v5/plumbing/object" "github.com/google/uuid" - - "entgo.io/ent" ) const ( @@ -459,11 +459,26 @@ func (m *AdhocPlanMutation) Where(ps ...predicate.AdhocPlan) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the AdhocPlanMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *AdhocPlanMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.AdhocPlan, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *AdhocPlanMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *AdhocPlanMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (AdhocPlan). func (m *AdhocPlanMutation) Type() string { return m.typ @@ -1681,11 +1696,26 @@ func (m *AgentStatusMutation) Where(ps ...predicate.AgentStatus) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the AgentStatusMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *AgentStatusMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.AgentStatus, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *AgentStatusMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *AgentStatusMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (AgentStatus). func (m *AgentStatusMutation) Type() string { return m.typ @@ -2180,8 +2210,6 @@ func (m *AgentStatusMutation) RemovedEdges() []string { // RemovedIDs returns all IDs (to other nodes) that were removed for the edge with // the given name in this mutation. func (m *AgentStatusMutation) RemovedIDs(name string) []ent.Value { - switch name { - } return nil } @@ -2751,11 +2779,26 @@ func (m *AgentTaskMutation) Where(ps ...predicate.AgentTask) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the AgentTaskMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *AgentTaskMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.AgentTask, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *AgentTaskMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *AgentTaskMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (AgentTask). func (m *AgentTaskMutation) Type() string { return m.typ @@ -3248,13 +3291,13 @@ func (m *AnsibleMutation) ResetName() { m.name = nil } -// SetHclID sets the "hcl_id" field. -func (m *AnsibleMutation) SetHclID(s string) { +// SetHCLID sets the "hcl_id" field. +func (m *AnsibleMutation) SetHCLID(s string) { m.hcl_id = &s } -// HclID returns the value of the "hcl_id" field in the mutation. -func (m *AnsibleMutation) HclID() (r string, exists bool) { +// HCLID returns the value of the "hcl_id" field in the mutation. +func (m *AnsibleMutation) HCLID() (r string, exists bool) { v := m.hcl_id if v == nil { return @@ -3262,25 +3305,25 @@ func (m *AnsibleMutation) HclID() (r string, exists bool) { return *v, true } -// OldHclID returns the old "hcl_id" field's value of the Ansible entity. +// OldHCLID returns the old "hcl_id" field's value of the Ansible entity. // If the Ansible object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *AnsibleMutation) OldHclID(ctx context.Context) (v string, err error) { +func (m *AnsibleMutation) OldHCLID(ctx context.Context) (v string, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldHclID is only allowed on UpdateOne operations") + return v, errors.New("OldHCLID is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldHclID requires an ID field in the mutation") + return v, errors.New("OldHCLID requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldHclID: %w", err) + return v, fmt.Errorf("querying old value for OldHCLID: %w", err) } - return oldValue.HclID, nil + return oldValue.HCLID, nil } -// ResetHclID resets all changes to the "hcl_id" field. -func (m *AnsibleMutation) ResetHclID() { +// ResetHCLID resets all changes to the "hcl_id" field. +func (m *AnsibleMutation) ResetHCLID() { m.hcl_id = nil } @@ -3634,11 +3677,26 @@ func (m *AnsibleMutation) Where(ps ...predicate.Ansible) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the AnsibleMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *AnsibleMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Ansible, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *AnsibleMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *AnsibleMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (Ansible). func (m *AnsibleMutation) Type() string { return m.typ @@ -3653,7 +3711,7 @@ func (m *AnsibleMutation) Fields() []string { fields = append(fields, ansible.FieldName) } if m.hcl_id != nil { - fields = append(fields, ansible.FieldHclID) + fields = append(fields, ansible.FieldHCLID) } if m.description != nil { fields = append(fields, ansible.FieldDescription) @@ -3686,8 +3744,8 @@ func (m *AnsibleMutation) Field(name string) (ent.Value, bool) { switch name { case ansible.FieldName: return m.Name() - case ansible.FieldHclID: - return m.HclID() + case ansible.FieldHCLID: + return m.HCLID() case ansible.FieldDescription: return m.Description() case ansible.FieldSource: @@ -3713,8 +3771,8 @@ func (m *AnsibleMutation) OldField(ctx context.Context, name string) (ent.Value, switch name { case ansible.FieldName: return m.OldName(ctx) - case ansible.FieldHclID: - return m.OldHclID(ctx) + case ansible.FieldHCLID: + return m.OldHCLID(ctx) case ansible.FieldDescription: return m.OldDescription(ctx) case ansible.FieldSource: @@ -3745,12 +3803,12 @@ func (m *AnsibleMutation) SetField(name string, value ent.Value) error { } m.SetName(v) return nil - case ansible.FieldHclID: + case ansible.FieldHCLID: v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetHclID(v) + m.SetHCLID(v) return nil case ansible.FieldDescription: v, ok := value.(string) @@ -3853,8 +3911,8 @@ func (m *AnsibleMutation) ResetField(name string) error { case ansible.FieldName: m.ResetName() return nil - case ansible.FieldHclID: - m.ResetHclID() + case ansible.FieldHCLID: + m.ResetHCLID() return nil case ansible.FieldDescription: m.ResetDescription() @@ -4625,11 +4683,26 @@ func (m *AuthUserMutation) Where(ps ...predicate.AuthUser) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the AuthUserMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *AuthUserMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.AuthUser, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *AuthUserMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *AuthUserMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (AuthUser). func (m *AuthUserMutation) Type() string { return m.typ @@ -5931,11 +6004,26 @@ func (m *BuildMutation) Where(ps ...predicate.Build) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the BuildMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *BuildMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Build, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *BuildMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *BuildMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (Build). func (m *BuildMutation) Type() string { return m.typ @@ -6895,11 +6983,26 @@ func (m *BuildCommitMutation) Where(ps ...predicate.BuildCommit) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the BuildCommitMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *BuildCommitMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.BuildCommit, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *BuildCommitMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *BuildCommitMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (BuildCommit). func (m *BuildCommitMutation) Type() string { return m.typ @@ -7211,6 +7314,7 @@ type CommandMutation struct { description *string program *string args *[]string + appendargs []string ignore_errors *bool disabled *bool cooldown *int @@ -7334,13 +7438,13 @@ func (m *CommandMutation) IDs(ctx context.Context) ([]uuid.UUID, error) { } } -// SetHclID sets the "hcl_id" field. -func (m *CommandMutation) SetHclID(s string) { +// SetHCLID sets the "hcl_id" field. +func (m *CommandMutation) SetHCLID(s string) { m.hcl_id = &s } -// HclID returns the value of the "hcl_id" field in the mutation. -func (m *CommandMutation) HclID() (r string, exists bool) { +// HCLID returns the value of the "hcl_id" field in the mutation. +func (m *CommandMutation) HCLID() (r string, exists bool) { v := m.hcl_id if v == nil { return @@ -7348,25 +7452,25 @@ func (m *CommandMutation) HclID() (r string, exists bool) { return *v, true } -// OldHclID returns the old "hcl_id" field's value of the Command entity. +// OldHCLID returns the old "hcl_id" field's value of the Command entity. // If the Command object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *CommandMutation) OldHclID(ctx context.Context) (v string, err error) { +func (m *CommandMutation) OldHCLID(ctx context.Context) (v string, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldHclID is only allowed on UpdateOne operations") + return v, errors.New("OldHCLID is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldHclID requires an ID field in the mutation") + return v, errors.New("OldHCLID requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldHclID: %w", err) + return v, fmt.Errorf("querying old value for OldHCLID: %w", err) } - return oldValue.HclID, nil + return oldValue.HCLID, nil } -// ResetHclID resets all changes to the "hcl_id" field. -func (m *CommandMutation) ResetHclID() { +// ResetHCLID resets all changes to the "hcl_id" field. +func (m *CommandMutation) ResetHCLID() { m.hcl_id = nil } @@ -7481,6 +7585,7 @@ func (m *CommandMutation) ResetProgram() { // SetArgs sets the "args" field. func (m *CommandMutation) SetArgs(s []string) { m.args = &s + m.appendargs = nil } // Args returns the value of the "args" field in the mutation. @@ -7509,9 +7614,23 @@ func (m *CommandMutation) OldArgs(ctx context.Context) (v []string, err error) { return oldValue.Args, nil } +// AppendArgs adds s to the "args" field. +func (m *CommandMutation) AppendArgs(s []string) { + m.appendargs = append(m.appendargs, s...) +} + +// AppendedArgs returns the list of values that were appended to the "args" field in this mutation. +func (m *CommandMutation) AppendedArgs() ([]string, bool) { + if len(m.appendargs) == 0 { + return nil, false + } + return m.appendargs, true +} + // ResetArgs resets all changes to the "args" field. func (m *CommandMutation) ResetArgs() { m.args = nil + m.appendargs = nil } // SetIgnoreErrors sets the "ignore_errors" field. @@ -7868,11 +7987,26 @@ func (m *CommandMutation) Where(ps ...predicate.Command) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the CommandMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *CommandMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Command, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *CommandMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *CommandMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (Command). func (m *CommandMutation) Type() string { return m.typ @@ -7884,7 +8018,7 @@ func (m *CommandMutation) Type() string { func (m *CommandMutation) Fields() []string { fields := make([]string, 0, 11) if m.hcl_id != nil { - fields = append(fields, command.FieldHclID) + fields = append(fields, command.FieldHCLID) } if m.name != nil { fields = append(fields, command.FieldName) @@ -7924,8 +8058,8 @@ func (m *CommandMutation) Fields() []string { // schema. func (m *CommandMutation) Field(name string) (ent.Value, bool) { switch name { - case command.FieldHclID: - return m.HclID() + case command.FieldHCLID: + return m.HCLID() case command.FieldName: return m.Name() case command.FieldDescription: @@ -7955,8 +8089,8 @@ func (m *CommandMutation) Field(name string) (ent.Value, bool) { // database failed. func (m *CommandMutation) OldField(ctx context.Context, name string) (ent.Value, error) { switch name { - case command.FieldHclID: - return m.OldHclID(ctx) + case command.FieldHCLID: + return m.OldHCLID(ctx) case command.FieldName: return m.OldName(ctx) case command.FieldDescription: @@ -7986,12 +8120,12 @@ func (m *CommandMutation) OldField(ctx context.Context, name string) (ent.Value, // type. func (m *CommandMutation) SetField(name string, value ent.Value) error { switch name { - case command.FieldHclID: + case command.FieldHCLID: v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetHclID(v) + m.SetHCLID(v) return nil case command.FieldName: v, ok := value.(string) @@ -8139,8 +8273,8 @@ func (m *CommandMutation) ClearField(name string) error { // It returns an error if the field is not defined in the schema. func (m *CommandMutation) ResetField(name string) error { switch name { - case command.FieldHclID: - m.ResetHclID() + case command.FieldHCLID: + m.ResetHCLID() return nil case command.FieldName: m.ResetName() @@ -8406,13 +8540,13 @@ func (m *CompetitionMutation) IDs(ctx context.Context) ([]uuid.UUID, error) { } } -// SetHclID sets the "hcl_id" field. -func (m *CompetitionMutation) SetHclID(s string) { +// SetHCLID sets the "hcl_id" field. +func (m *CompetitionMutation) SetHCLID(s string) { m.hcl_id = &s } -// HclID returns the value of the "hcl_id" field in the mutation. -func (m *CompetitionMutation) HclID() (r string, exists bool) { +// HCLID returns the value of the "hcl_id" field in the mutation. +func (m *CompetitionMutation) HCLID() (r string, exists bool) { v := m.hcl_id if v == nil { return @@ -8420,25 +8554,25 @@ func (m *CompetitionMutation) HclID() (r string, exists bool) { return *v, true } -// OldHclID returns the old "hcl_id" field's value of the Competition entity. +// OldHCLID returns the old "hcl_id" field's value of the Competition entity. // If the Competition object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *CompetitionMutation) OldHclID(ctx context.Context) (v string, err error) { +func (m *CompetitionMutation) OldHCLID(ctx context.Context) (v string, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldHclID is only allowed on UpdateOne operations") + return v, errors.New("OldHCLID is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldHclID requires an ID field in the mutation") + return v, errors.New("OldHCLID requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldHclID: %w", err) + return v, fmt.Errorf("querying old value for OldHCLID: %w", err) } - return oldValue.HclID, nil + return oldValue.HCLID, nil } -// ResetHclID resets all changes to the "hcl_id" field. -func (m *CompetitionMutation) ResetHclID() { +// ResetHCLID resets all changes to the "hcl_id" field. +func (m *CompetitionMutation) ResetHCLID() { m.hcl_id = nil } @@ -8702,11 +8836,26 @@ func (m *CompetitionMutation) Where(ps ...predicate.Competition) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the CompetitionMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *CompetitionMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Competition, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *CompetitionMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *CompetitionMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (Competition). func (m *CompetitionMutation) Type() string { return m.typ @@ -8718,7 +8867,7 @@ func (m *CompetitionMutation) Type() string { func (m *CompetitionMutation) Fields() []string { fields := make([]string, 0, 4) if m.hcl_id != nil { - fields = append(fields, competition.FieldHclID) + fields = append(fields, competition.FieldHCLID) } if m.root_password != nil { fields = append(fields, competition.FieldRootPassword) @@ -8737,8 +8886,8 @@ func (m *CompetitionMutation) Fields() []string { // schema. func (m *CompetitionMutation) Field(name string) (ent.Value, bool) { switch name { - case competition.FieldHclID: - return m.HclID() + case competition.FieldHCLID: + return m.HCLID() case competition.FieldRootPassword: return m.RootPassword() case competition.FieldConfig: @@ -8754,8 +8903,8 @@ func (m *CompetitionMutation) Field(name string) (ent.Value, bool) { // database failed. func (m *CompetitionMutation) OldField(ctx context.Context, name string) (ent.Value, error) { switch name { - case competition.FieldHclID: - return m.OldHclID(ctx) + case competition.FieldHCLID: + return m.OldHCLID(ctx) case competition.FieldRootPassword: return m.OldRootPassword(ctx) case competition.FieldConfig: @@ -8771,12 +8920,12 @@ func (m *CompetitionMutation) OldField(ctx context.Context, name string) (ent.Va // type. func (m *CompetitionMutation) SetField(name string, value ent.Value) error { switch name { - case competition.FieldHclID: + case competition.FieldHCLID: v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetHclID(v) + m.SetHCLID(v) return nil case competition.FieldRootPassword: v, ok := value.(string) @@ -8848,8 +8997,8 @@ func (m *CompetitionMutation) ClearField(name string) error { // It returns an error if the field is not defined in the schema. func (m *CompetitionMutation) ResetField(name string) error { switch name { - case competition.FieldHclID: - m.ResetHclID() + case competition.FieldHCLID: + m.ResetHCLID() return nil case competition.FieldRootPassword: m.ResetRootPassword() @@ -9002,7 +9151,9 @@ type DNSMutation struct { _type *string root_domain *string dns_servers *[]string + appenddns_servers []string ntp_servers *[]string + appendntp_servers []string _config *map[string]string clearedFields map[string]struct{} _DNSToEnvironment map[uuid.UUID]struct{} @@ -9120,13 +9271,13 @@ func (m *DNSMutation) IDs(ctx context.Context) ([]uuid.UUID, error) { } } -// SetHclID sets the "hcl_id" field. -func (m *DNSMutation) SetHclID(s string) { +// SetHCLID sets the "hcl_id" field. +func (m *DNSMutation) SetHCLID(s string) { m.hcl_id = &s } -// HclID returns the value of the "hcl_id" field in the mutation. -func (m *DNSMutation) HclID() (r string, exists bool) { +// HCLID returns the value of the "hcl_id" field in the mutation. +func (m *DNSMutation) HCLID() (r string, exists bool) { v := m.hcl_id if v == nil { return @@ -9134,25 +9285,25 @@ func (m *DNSMutation) HclID() (r string, exists bool) { return *v, true } -// OldHclID returns the old "hcl_id" field's value of the DNS entity. +// OldHCLID returns the old "hcl_id" field's value of the DNS entity. // If the DNS object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *DNSMutation) OldHclID(ctx context.Context) (v string, err error) { +func (m *DNSMutation) OldHCLID(ctx context.Context) (v string, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldHclID is only allowed on UpdateOne operations") + return v, errors.New("OldHCLID is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldHclID requires an ID field in the mutation") + return v, errors.New("OldHCLID requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldHclID: %w", err) + return v, fmt.Errorf("querying old value for OldHCLID: %w", err) } - return oldValue.HclID, nil + return oldValue.HCLID, nil } -// ResetHclID resets all changes to the "hcl_id" field. -func (m *DNSMutation) ResetHclID() { +// ResetHCLID resets all changes to the "hcl_id" field. +func (m *DNSMutation) ResetHCLID() { m.hcl_id = nil } @@ -9231,6 +9382,7 @@ func (m *DNSMutation) ResetRootDomain() { // SetDNSServers sets the "dns_servers" field. func (m *DNSMutation) SetDNSServers(s []string) { m.dns_servers = &s + m.appenddns_servers = nil } // DNSServers returns the value of the "dns_servers" field in the mutation. @@ -9259,14 +9411,29 @@ func (m *DNSMutation) OldDNSServers(ctx context.Context) (v []string, err error) return oldValue.DNSServers, nil } +// AppendDNSServers adds s to the "dns_servers" field. +func (m *DNSMutation) AppendDNSServers(s []string) { + m.appenddns_servers = append(m.appenddns_servers, s...) +} + +// AppendedDNSServers returns the list of values that were appended to the "dns_servers" field in this mutation. +func (m *DNSMutation) AppendedDNSServers() ([]string, bool) { + if len(m.appenddns_servers) == 0 { + return nil, false + } + return m.appenddns_servers, true +} + // ResetDNSServers resets all changes to the "dns_servers" field. func (m *DNSMutation) ResetDNSServers() { m.dns_servers = nil + m.appenddns_servers = nil } // SetNtpServers sets the "ntp_servers" field. func (m *DNSMutation) SetNtpServers(s []string) { m.ntp_servers = &s + m.appendntp_servers = nil } // NtpServers returns the value of the "ntp_servers" field in the mutation. @@ -9295,9 +9462,23 @@ func (m *DNSMutation) OldNtpServers(ctx context.Context) (v []string, err error) return oldValue.NtpServers, nil } +// AppendNtpServers adds s to the "ntp_servers" field. +func (m *DNSMutation) AppendNtpServers(s []string) { + m.appendntp_servers = append(m.appendntp_servers, s...) +} + +// AppendedNtpServers returns the list of values that were appended to the "ntp_servers" field in this mutation. +func (m *DNSMutation) AppendedNtpServers() ([]string, bool) { + if len(m.appendntp_servers) == 0 { + return nil, false + } + return m.appendntp_servers, true +} + // ResetNtpServers resets all changes to the "ntp_servers" field. func (m *DNSMutation) ResetNtpServers() { m.ntp_servers = nil + m.appendntp_servers = nil } // SetConfig sets the "config" field. @@ -9449,11 +9630,26 @@ func (m *DNSMutation) Where(ps ...predicate.DNS) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the DNSMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *DNSMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.DNS, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *DNSMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *DNSMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (DNS). func (m *DNSMutation) Type() string { return m.typ @@ -9465,7 +9661,7 @@ func (m *DNSMutation) Type() string { func (m *DNSMutation) Fields() []string { fields := make([]string, 0, 6) if m.hcl_id != nil { - fields = append(fields, dns.FieldHclID) + fields = append(fields, dns.FieldHCLID) } if m._type != nil { fields = append(fields, dns.FieldType) @@ -9490,8 +9686,8 @@ func (m *DNSMutation) Fields() []string { // schema. func (m *DNSMutation) Field(name string) (ent.Value, bool) { switch name { - case dns.FieldHclID: - return m.HclID() + case dns.FieldHCLID: + return m.HCLID() case dns.FieldType: return m.GetType() case dns.FieldRootDomain: @@ -9511,8 +9707,8 @@ func (m *DNSMutation) Field(name string) (ent.Value, bool) { // database failed. func (m *DNSMutation) OldField(ctx context.Context, name string) (ent.Value, error) { switch name { - case dns.FieldHclID: - return m.OldHclID(ctx) + case dns.FieldHCLID: + return m.OldHCLID(ctx) case dns.FieldType: return m.OldType(ctx) case dns.FieldRootDomain: @@ -9532,12 +9728,12 @@ func (m *DNSMutation) OldField(ctx context.Context, name string) (ent.Value, err // type. func (m *DNSMutation) SetField(name string, value ent.Value) error { switch name { - case dns.FieldHclID: + case dns.FieldHCLID: v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetHclID(v) + m.SetHCLID(v) return nil case dns.FieldType: v, ok := value.(string) @@ -9623,8 +9819,8 @@ func (m *DNSMutation) ClearField(name string) error { // It returns an error if the field is not defined in the schema. func (m *DNSMutation) ResetField(name string) error { switch name { - case dns.FieldHclID: - m.ResetHclID() + case dns.FieldHCLID: + m.ResetHCLID() return nil case dns.FieldType: m.ResetType() @@ -9764,6 +9960,7 @@ type DNSRecordMutation struct { hcl_id *string name *string values *[]string + appendvalues []string _type *string zone *string vars *map[string]string @@ -9881,13 +10078,13 @@ func (m *DNSRecordMutation) IDs(ctx context.Context) ([]uuid.UUID, error) { } } -// SetHclID sets the "hcl_id" field. -func (m *DNSRecordMutation) SetHclID(s string) { +// SetHCLID sets the "hcl_id" field. +func (m *DNSRecordMutation) SetHCLID(s string) { m.hcl_id = &s } -// HclID returns the value of the "hcl_id" field in the mutation. -func (m *DNSRecordMutation) HclID() (r string, exists bool) { +// HCLID returns the value of the "hcl_id" field in the mutation. +func (m *DNSRecordMutation) HCLID() (r string, exists bool) { v := m.hcl_id if v == nil { return @@ -9895,25 +10092,25 @@ func (m *DNSRecordMutation) HclID() (r string, exists bool) { return *v, true } -// OldHclID returns the old "hcl_id" field's value of the DNSRecord entity. +// OldHCLID returns the old "hcl_id" field's value of the DNSRecord entity. // If the DNSRecord object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *DNSRecordMutation) OldHclID(ctx context.Context) (v string, err error) { +func (m *DNSRecordMutation) OldHCLID(ctx context.Context) (v string, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldHclID is only allowed on UpdateOne operations") + return v, errors.New("OldHCLID is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldHclID requires an ID field in the mutation") + return v, errors.New("OldHCLID requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldHclID: %w", err) + return v, fmt.Errorf("querying old value for OldHCLID: %w", err) } - return oldValue.HclID, nil + return oldValue.HCLID, nil } -// ResetHclID resets all changes to the "hcl_id" field. -func (m *DNSRecordMutation) ResetHclID() { +// ResetHCLID resets all changes to the "hcl_id" field. +func (m *DNSRecordMutation) ResetHCLID() { m.hcl_id = nil } @@ -9956,6 +10153,7 @@ func (m *DNSRecordMutation) ResetName() { // SetValues sets the "values" field. func (m *DNSRecordMutation) SetValues(s []string) { m.values = &s + m.appendvalues = nil } // Values returns the value of the "values" field in the mutation. @@ -9984,9 +10182,23 @@ func (m *DNSRecordMutation) OldValues(ctx context.Context) (v []string, err erro return oldValue.Values, nil } +// AppendValues adds s to the "values" field. +func (m *DNSRecordMutation) AppendValues(s []string) { + m.appendvalues = append(m.appendvalues, s...) +} + +// AppendedValues returns the list of values that were appended to the "values" field in this mutation. +func (m *DNSRecordMutation) AppendedValues() ([]string, bool) { + if len(m.appendvalues) == 0 { + return nil, false + } + return m.appendvalues, true +} + // ResetValues resets all changes to the "values" field. func (m *DNSRecordMutation) ResetValues() { m.values = nil + m.appendvalues = nil } // SetType sets the "type" field. @@ -10213,11 +10425,26 @@ func (m *DNSRecordMutation) Where(ps ...predicate.DNSRecord) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the DNSRecordMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *DNSRecordMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.DNSRecord, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *DNSRecordMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *DNSRecordMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (DNSRecord). func (m *DNSRecordMutation) Type() string { return m.typ @@ -10229,7 +10456,7 @@ func (m *DNSRecordMutation) Type() string { func (m *DNSRecordMutation) Fields() []string { fields := make([]string, 0, 8) if m.hcl_id != nil { - fields = append(fields, dnsrecord.FieldHclID) + fields = append(fields, dnsrecord.FieldHCLID) } if m.name != nil { fields = append(fields, dnsrecord.FieldName) @@ -10260,8 +10487,8 @@ func (m *DNSRecordMutation) Fields() []string { // schema. func (m *DNSRecordMutation) Field(name string) (ent.Value, bool) { switch name { - case dnsrecord.FieldHclID: - return m.HclID() + case dnsrecord.FieldHCLID: + return m.HCLID() case dnsrecord.FieldName: return m.Name() case dnsrecord.FieldValues: @@ -10285,8 +10512,8 @@ func (m *DNSRecordMutation) Field(name string) (ent.Value, bool) { // database failed. func (m *DNSRecordMutation) OldField(ctx context.Context, name string) (ent.Value, error) { switch name { - case dnsrecord.FieldHclID: - return m.OldHclID(ctx) + case dnsrecord.FieldHCLID: + return m.OldHCLID(ctx) case dnsrecord.FieldName: return m.OldName(ctx) case dnsrecord.FieldValues: @@ -10310,12 +10537,12 @@ func (m *DNSRecordMutation) OldField(ctx context.Context, name string) (ent.Valu // type. func (m *DNSRecordMutation) SetField(name string, value ent.Value) error { switch name { - case dnsrecord.FieldHclID: + case dnsrecord.FieldHCLID: v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetHclID(v) + m.SetHCLID(v) return nil case dnsrecord.FieldName: v, ok := value.(string) @@ -10415,8 +10642,8 @@ func (m *DNSRecordMutation) ClearField(name string) error { // It returns an error if the field is not defined in the schema. func (m *DNSRecordMutation) ResetField(name string) error { switch name { - case dnsrecord.FieldHclID: - m.ResetHclID() + case dnsrecord.FieldHCLID: + m.ResetHCLID() return nil case dnsrecord.FieldName: m.ResetName() @@ -10473,8 +10700,6 @@ func (m *DNSRecordMutation) RemovedEdges() []string { // RemovedIDs returns all IDs (to other nodes) that were removed for the edge with // the given name in this mutation. func (m *DNSRecordMutation) RemovedIDs(name string) []ent.Value { - switch name { - } return nil } @@ -10739,11 +10964,26 @@ func (m *DiskMutation) Where(ps ...predicate.Disk) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the DiskMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *DiskMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Disk, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *DiskMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *DiskMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (Disk). func (m *DiskMutation) Type() string { return m.typ @@ -10895,8 +11135,6 @@ func (m *DiskMutation) RemovedEdges() []string { // RemovedIDs returns all IDs (to other nodes) that were removed for the edge with // the given name in this mutation. func (m *DiskMutation) RemovedIDs(name string) []ent.Value { - switch name { - } return nil } @@ -10957,7 +11195,9 @@ type EnvironmentMutation struct { revision *int addrevision *int admin_cidrs *[]string + appendadmin_cidrs []string exposed_vdi_ports *[]string + appendexposed_vdi_ports []string _config *map[string]string tags *map[string]string clearedFields map[string]struct{} @@ -11127,13 +11367,13 @@ func (m *EnvironmentMutation) IDs(ctx context.Context) ([]uuid.UUID, error) { } } -// SetHclID sets the "hcl_id" field. -func (m *EnvironmentMutation) SetHclID(s string) { +// SetHCLID sets the "hcl_id" field. +func (m *EnvironmentMutation) SetHCLID(s string) { m.hcl_id = &s } -// HclID returns the value of the "hcl_id" field in the mutation. -func (m *EnvironmentMutation) HclID() (r string, exists bool) { +// HCLID returns the value of the "hcl_id" field in the mutation. +func (m *EnvironmentMutation) HCLID() (r string, exists bool) { v := m.hcl_id if v == nil { return @@ -11141,25 +11381,25 @@ func (m *EnvironmentMutation) HclID() (r string, exists bool) { return *v, true } -// OldHclID returns the old "hcl_id" field's value of the Environment entity. +// OldHCLID returns the old "hcl_id" field's value of the Environment entity. // If the Environment object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *EnvironmentMutation) OldHclID(ctx context.Context) (v string, err error) { +func (m *EnvironmentMutation) OldHCLID(ctx context.Context) (v string, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldHclID is only allowed on UpdateOne operations") + return v, errors.New("OldHCLID is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldHclID requires an ID field in the mutation") + return v, errors.New("OldHCLID requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldHclID: %w", err) + return v, fmt.Errorf("querying old value for OldHCLID: %w", err) } - return oldValue.HclID, nil + return oldValue.HCLID, nil } -// ResetHclID resets all changes to the "hcl_id" field. -func (m *EnvironmentMutation) ResetHclID() { +// ResetHCLID resets all changes to the "hcl_id" field. +func (m *EnvironmentMutation) ResetHCLID() { m.hcl_id = nil } @@ -11422,6 +11662,7 @@ func (m *EnvironmentMutation) ResetRevision() { // SetAdminCidrs sets the "admin_cidrs" field. func (m *EnvironmentMutation) SetAdminCidrs(s []string) { m.admin_cidrs = &s + m.appendadmin_cidrs = nil } // AdminCidrs returns the value of the "admin_cidrs" field in the mutation. @@ -11450,14 +11691,29 @@ func (m *EnvironmentMutation) OldAdminCidrs(ctx context.Context) (v []string, er return oldValue.AdminCidrs, nil } +// AppendAdminCidrs adds s to the "admin_cidrs" field. +func (m *EnvironmentMutation) AppendAdminCidrs(s []string) { + m.appendadmin_cidrs = append(m.appendadmin_cidrs, s...) +} + +// AppendedAdminCidrs returns the list of values that were appended to the "admin_cidrs" field in this mutation. +func (m *EnvironmentMutation) AppendedAdminCidrs() ([]string, bool) { + if len(m.appendadmin_cidrs) == 0 { + return nil, false + } + return m.appendadmin_cidrs, true +} + // ResetAdminCidrs resets all changes to the "admin_cidrs" field. func (m *EnvironmentMutation) ResetAdminCidrs() { m.admin_cidrs = nil + m.appendadmin_cidrs = nil } // SetExposedVdiPorts sets the "exposed_vdi_ports" field. func (m *EnvironmentMutation) SetExposedVdiPorts(s []string) { m.exposed_vdi_ports = &s + m.appendexposed_vdi_ports = nil } // ExposedVdiPorts returns the value of the "exposed_vdi_ports" field in the mutation. @@ -11486,9 +11742,23 @@ func (m *EnvironmentMutation) OldExposedVdiPorts(ctx context.Context) (v []strin return oldValue.ExposedVdiPorts, nil } +// AppendExposedVdiPorts adds s to the "exposed_vdi_ports" field. +func (m *EnvironmentMutation) AppendExposedVdiPorts(s []string) { + m.appendexposed_vdi_ports = append(m.appendexposed_vdi_ports, s...) +} + +// AppendedExposedVdiPorts returns the list of values that were appended to the "exposed_vdi_ports" field in this mutation. +func (m *EnvironmentMutation) AppendedExposedVdiPorts() ([]string, bool) { + if len(m.appendexposed_vdi_ports) == 0 { + return nil, false + } + return m.appendexposed_vdi_ports, true +} + // ResetExposedVdiPorts resets all changes to the "exposed_vdi_ports" field. func (m *EnvironmentMutation) ResetExposedVdiPorts() { m.exposed_vdi_ports = nil + m.appendexposed_vdi_ports = nil } // SetConfig sets the "config" field. @@ -12594,11 +12864,26 @@ func (m *EnvironmentMutation) Where(ps ...predicate.Environment) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the EnvironmentMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *EnvironmentMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Environment, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *EnvironmentMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *EnvironmentMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (Environment). func (m *EnvironmentMutation) Type() string { return m.typ @@ -12610,7 +12895,7 @@ func (m *EnvironmentMutation) Type() string { func (m *EnvironmentMutation) Fields() []string { fields := make([]string, 0, 11) if m.hcl_id != nil { - fields = append(fields, environment.FieldHclID) + fields = append(fields, environment.FieldHCLID) } if m.competition_id != nil { fields = append(fields, environment.FieldCompetitionID) @@ -12650,8 +12935,8 @@ func (m *EnvironmentMutation) Fields() []string { // schema. func (m *EnvironmentMutation) Field(name string) (ent.Value, bool) { switch name { - case environment.FieldHclID: - return m.HclID() + case environment.FieldHCLID: + return m.HCLID() case environment.FieldCompetitionID: return m.CompetitionID() case environment.FieldName: @@ -12681,8 +12966,8 @@ func (m *EnvironmentMutation) Field(name string) (ent.Value, bool) { // database failed. func (m *EnvironmentMutation) OldField(ctx context.Context, name string) (ent.Value, error) { switch name { - case environment.FieldHclID: - return m.OldHclID(ctx) + case environment.FieldHCLID: + return m.OldHCLID(ctx) case environment.FieldCompetitionID: return m.OldCompetitionID(ctx) case environment.FieldName: @@ -12712,12 +12997,12 @@ func (m *EnvironmentMutation) OldField(ctx context.Context, name string) (ent.Va // type. func (m *EnvironmentMutation) SetField(name string, value ent.Value) error { switch name { - case environment.FieldHclID: + case environment.FieldHCLID: v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetHclID(v) + m.SetHCLID(v) return nil case environment.FieldCompetitionID: v, ok := value.(string) @@ -12865,8 +13150,8 @@ func (m *EnvironmentMutation) ClearField(name string) error { // It returns an error if the field is not defined in the schema. func (m *EnvironmentMutation) ResetField(name string) error { switch name { - case environment.FieldHclID: - m.ResetHclID() + case environment.FieldHCLID: + m.ResetHCLID() return nil case environment.FieldCompetitionID: m.ResetCompetitionID() @@ -13575,13 +13860,13 @@ func (m *FileDeleteMutation) IDs(ctx context.Context) ([]uuid.UUID, error) { } } -// SetHclID sets the "hcl_id" field. -func (m *FileDeleteMutation) SetHclID(s string) { +// SetHCLID sets the "hcl_id" field. +func (m *FileDeleteMutation) SetHCLID(s string) { m.hcl_id = &s } -// HclID returns the value of the "hcl_id" field in the mutation. -func (m *FileDeleteMutation) HclID() (r string, exists bool) { +// HCLID returns the value of the "hcl_id" field in the mutation. +func (m *FileDeleteMutation) HCLID() (r string, exists bool) { v := m.hcl_id if v == nil { return @@ -13589,25 +13874,25 @@ func (m *FileDeleteMutation) HclID() (r string, exists bool) { return *v, true } -// OldHclID returns the old "hcl_id" field's value of the FileDelete entity. +// OldHCLID returns the old "hcl_id" field's value of the FileDelete entity. // If the FileDelete object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *FileDeleteMutation) OldHclID(ctx context.Context) (v string, err error) { +func (m *FileDeleteMutation) OldHCLID(ctx context.Context) (v string, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldHclID is only allowed on UpdateOne operations") + return v, errors.New("OldHCLID is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldHclID requires an ID field in the mutation") + return v, errors.New("OldHCLID requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldHclID: %w", err) + return v, fmt.Errorf("querying old value for OldHCLID: %w", err) } - return oldValue.HclID, nil + return oldValue.HCLID, nil } -// ResetHclID resets all changes to the "hcl_id" field. -func (m *FileDeleteMutation) ResetHclID() { +// ResetHCLID resets all changes to the "hcl_id" field. +func (m *FileDeleteMutation) ResetHCLID() { m.hcl_id = nil } @@ -13727,11 +14012,26 @@ func (m *FileDeleteMutation) Where(ps ...predicate.FileDelete) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the FileDeleteMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *FileDeleteMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.FileDelete, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *FileDeleteMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *FileDeleteMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (FileDelete). func (m *FileDeleteMutation) Type() string { return m.typ @@ -13743,7 +14043,7 @@ func (m *FileDeleteMutation) Type() string { func (m *FileDeleteMutation) Fields() []string { fields := make([]string, 0, 3) if m.hcl_id != nil { - fields = append(fields, filedelete.FieldHclID) + fields = append(fields, filedelete.FieldHCLID) } if m._path != nil { fields = append(fields, filedelete.FieldPath) @@ -13759,8 +14059,8 @@ func (m *FileDeleteMutation) Fields() []string { // schema. func (m *FileDeleteMutation) Field(name string) (ent.Value, bool) { switch name { - case filedelete.FieldHclID: - return m.HclID() + case filedelete.FieldHCLID: + return m.HCLID() case filedelete.FieldPath: return m.Path() case filedelete.FieldTags: @@ -13774,8 +14074,8 @@ func (m *FileDeleteMutation) Field(name string) (ent.Value, bool) { // database failed. func (m *FileDeleteMutation) OldField(ctx context.Context, name string) (ent.Value, error) { switch name { - case filedelete.FieldHclID: - return m.OldHclID(ctx) + case filedelete.FieldHCLID: + return m.OldHCLID(ctx) case filedelete.FieldPath: return m.OldPath(ctx) case filedelete.FieldTags: @@ -13789,12 +14089,12 @@ func (m *FileDeleteMutation) OldField(ctx context.Context, name string) (ent.Val // type. func (m *FileDeleteMutation) SetField(name string, value ent.Value) error { switch name { - case filedelete.FieldHclID: + case filedelete.FieldHCLID: v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetHclID(v) + m.SetHCLID(v) return nil case filedelete.FieldPath: v, ok := value.(string) @@ -13859,8 +14159,8 @@ func (m *FileDeleteMutation) ClearField(name string) error { // It returns an error if the field is not defined in the schema. func (m *FileDeleteMutation) ResetField(name string) error { switch name { - case filedelete.FieldHclID: - m.ResetHclID() + case filedelete.FieldHCLID: + m.ResetHCLID() return nil case filedelete.FieldPath: m.ResetPath() @@ -13902,8 +14202,6 @@ func (m *FileDeleteMutation) RemovedEdges() []string { // RemovedIDs returns all IDs (to other nodes) that were removed for the edge with // the given name in this mutation. func (m *FileDeleteMutation) RemovedIDs(name string) []ent.Value { - switch name { - } return nil } @@ -14077,13 +14375,13 @@ func (m *FileDownloadMutation) IDs(ctx context.Context) ([]uuid.UUID, error) { } } -// SetHclID sets the "hcl_id" field. -func (m *FileDownloadMutation) SetHclID(s string) { +// SetHCLID sets the "hcl_id" field. +func (m *FileDownloadMutation) SetHCLID(s string) { m.hcl_id = &s } -// HclID returns the value of the "hcl_id" field in the mutation. -func (m *FileDownloadMutation) HclID() (r string, exists bool) { +// HCLID returns the value of the "hcl_id" field in the mutation. +func (m *FileDownloadMutation) HCLID() (r string, exists bool) { v := m.hcl_id if v == nil { return @@ -14091,25 +14389,25 @@ func (m *FileDownloadMutation) HclID() (r string, exists bool) { return *v, true } -// OldHclID returns the old "hcl_id" field's value of the FileDownload entity. +// OldHCLID returns the old "hcl_id" field's value of the FileDownload entity. // If the FileDownload object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *FileDownloadMutation) OldHclID(ctx context.Context) (v string, err error) { +func (m *FileDownloadMutation) OldHCLID(ctx context.Context) (v string, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldHclID is only allowed on UpdateOne operations") + return v, errors.New("OldHCLID is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldHclID requires an ID field in the mutation") + return v, errors.New("OldHCLID requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldHclID: %w", err) + return v, fmt.Errorf("querying old value for OldHCLID: %w", err) } - return oldValue.HclID, nil + return oldValue.HCLID, nil } -// ResetHclID resets all changes to the "hcl_id" field. -func (m *FileDownloadMutation) ResetHclID() { +// ResetHCLID resets all changes to the "hcl_id" field. +func (m *FileDownloadMutation) ResetHCLID() { m.hcl_id = nil } @@ -14517,11 +14815,26 @@ func (m *FileDownloadMutation) Where(ps ...predicate.FileDownload) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the FileDownloadMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *FileDownloadMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.FileDownload, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *FileDownloadMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *FileDownloadMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (FileDownload). func (m *FileDownloadMutation) Type() string { return m.typ @@ -14533,7 +14846,7 @@ func (m *FileDownloadMutation) Type() string { func (m *FileDownloadMutation) Fields() []string { fields := make([]string, 0, 11) if m.hcl_id != nil { - fields = append(fields, filedownload.FieldHclID) + fields = append(fields, filedownload.FieldHCLID) } if m.source_type != nil { fields = append(fields, filedownload.FieldSourceType) @@ -14573,8 +14886,8 @@ func (m *FileDownloadMutation) Fields() []string { // schema. func (m *FileDownloadMutation) Field(name string) (ent.Value, bool) { switch name { - case filedownload.FieldHclID: - return m.HclID() + case filedownload.FieldHCLID: + return m.HCLID() case filedownload.FieldSourceType: return m.SourceType() case filedownload.FieldSource: @@ -14604,8 +14917,8 @@ func (m *FileDownloadMutation) Field(name string) (ent.Value, bool) { // database failed. func (m *FileDownloadMutation) OldField(ctx context.Context, name string) (ent.Value, error) { switch name { - case filedownload.FieldHclID: - return m.OldHclID(ctx) + case filedownload.FieldHCLID: + return m.OldHCLID(ctx) case filedownload.FieldSourceType: return m.OldSourceType(ctx) case filedownload.FieldSource: @@ -14635,12 +14948,12 @@ func (m *FileDownloadMutation) OldField(ctx context.Context, name string) (ent.V // type. func (m *FileDownloadMutation) SetField(name string, value ent.Value) error { switch name { - case filedownload.FieldHclID: + case filedownload.FieldHCLID: v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetHclID(v) + m.SetHCLID(v) return nil case filedownload.FieldSourceType: v, ok := value.(string) @@ -14761,8 +15074,8 @@ func (m *FileDownloadMutation) ClearField(name string) error { // It returns an error if the field is not defined in the schema. func (m *FileDownloadMutation) ResetField(name string) error { switch name { - case filedownload.FieldHclID: - m.ResetHclID() + case filedownload.FieldHCLID: + m.ResetHCLID() return nil case filedownload.FieldSourceType: m.ResetSourceType() @@ -14828,8 +15141,6 @@ func (m *FileDownloadMutation) RemovedEdges() []string { // RemovedIDs returns all IDs (to other nodes) that were removed for the edge with // the given name in this mutation. func (m *FileDownloadMutation) RemovedIDs(name string) []ent.Value { - switch name { - } return nil } @@ -14997,13 +15308,13 @@ func (m *FileExtractMutation) IDs(ctx context.Context) ([]uuid.UUID, error) { } } -// SetHclID sets the "hcl_id" field. -func (m *FileExtractMutation) SetHclID(s string) { +// SetHCLID sets the "hcl_id" field. +func (m *FileExtractMutation) SetHCLID(s string) { m.hcl_id = &s } -// HclID returns the value of the "hcl_id" field in the mutation. -func (m *FileExtractMutation) HclID() (r string, exists bool) { +// HCLID returns the value of the "hcl_id" field in the mutation. +func (m *FileExtractMutation) HCLID() (r string, exists bool) { v := m.hcl_id if v == nil { return @@ -15011,25 +15322,25 @@ func (m *FileExtractMutation) HclID() (r string, exists bool) { return *v, true } -// OldHclID returns the old "hcl_id" field's value of the FileExtract entity. +// OldHCLID returns the old "hcl_id" field's value of the FileExtract entity. // If the FileExtract object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *FileExtractMutation) OldHclID(ctx context.Context) (v string, err error) { +func (m *FileExtractMutation) OldHCLID(ctx context.Context) (v string, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldHclID is only allowed on UpdateOne operations") + return v, errors.New("OldHCLID is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldHclID requires an ID field in the mutation") + return v, errors.New("OldHCLID requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldHclID: %w", err) + return v, fmt.Errorf("querying old value for OldHCLID: %w", err) } - return oldValue.HclID, nil + return oldValue.HCLID, nil } -// ResetHclID resets all changes to the "hcl_id" field. -func (m *FileExtractMutation) ResetHclID() { +// ResetHCLID resets all changes to the "hcl_id" field. +func (m *FileExtractMutation) ResetHCLID() { m.hcl_id = nil } @@ -15221,11 +15532,26 @@ func (m *FileExtractMutation) Where(ps ...predicate.FileExtract) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the FileExtractMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *FileExtractMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.FileExtract, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *FileExtractMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *FileExtractMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (FileExtract). func (m *FileExtractMutation) Type() string { return m.typ @@ -15237,7 +15563,7 @@ func (m *FileExtractMutation) Type() string { func (m *FileExtractMutation) Fields() []string { fields := make([]string, 0, 5) if m.hcl_id != nil { - fields = append(fields, fileextract.FieldHclID) + fields = append(fields, fileextract.FieldHCLID) } if m.source != nil { fields = append(fields, fileextract.FieldSource) @@ -15259,8 +15585,8 @@ func (m *FileExtractMutation) Fields() []string { // schema. func (m *FileExtractMutation) Field(name string) (ent.Value, bool) { switch name { - case fileextract.FieldHclID: - return m.HclID() + case fileextract.FieldHCLID: + return m.HCLID() case fileextract.FieldSource: return m.Source() case fileextract.FieldDestination: @@ -15278,8 +15604,8 @@ func (m *FileExtractMutation) Field(name string) (ent.Value, bool) { // database failed. func (m *FileExtractMutation) OldField(ctx context.Context, name string) (ent.Value, error) { switch name { - case fileextract.FieldHclID: - return m.OldHclID(ctx) + case fileextract.FieldHCLID: + return m.OldHCLID(ctx) case fileextract.FieldSource: return m.OldSource(ctx) case fileextract.FieldDestination: @@ -15297,12 +15623,12 @@ func (m *FileExtractMutation) OldField(ctx context.Context, name string) (ent.Va // type. func (m *FileExtractMutation) SetField(name string, value ent.Value) error { switch name { - case fileextract.FieldHclID: + case fileextract.FieldHCLID: v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetHclID(v) + m.SetHCLID(v) return nil case fileextract.FieldSource: v, ok := value.(string) @@ -15381,8 +15707,8 @@ func (m *FileExtractMutation) ClearField(name string) error { // It returns an error if the field is not defined in the schema. func (m *FileExtractMutation) ResetField(name string) error { switch name { - case fileextract.FieldHclID: - m.ResetHclID() + case fileextract.FieldHCLID: + m.ResetHCLID() return nil case fileextract.FieldSource: m.ResetSource() @@ -15430,8 +15756,6 @@ func (m *FileExtractMutation) RemovedEdges() []string { // RemovedIDs returns all IDs (to other nodes) that were removed for the edge with // the given name in this mutation. func (m *FileExtractMutation) RemovedIDs(name string) []ent.Value { - switch name { - } return nil } @@ -15962,11 +16286,26 @@ func (m *FindingMutation) Where(ps ...predicate.Finding) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the FindingMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *FindingMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Finding, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *FindingMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *FindingMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (Finding). func (m *FindingMutation) Type() string { return m.typ @@ -16593,11 +16932,26 @@ func (m *GinFileMiddlewareMutation) Where(ps ...predicate.GinFileMiddleware) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the GinFileMiddlewareMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *GinFileMiddlewareMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.GinFileMiddleware, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *GinFileMiddlewareMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *GinFileMiddlewareMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (GinFileMiddleware). func (m *GinFileMiddlewareMutation) Type() string { return m.typ @@ -16775,8 +17129,6 @@ func (m *GinFileMiddlewareMutation) RemovedEdges() []string { // RemovedIDs returns all IDs (to other nodes) that were removed for the edge with // the given name in this mutation. func (m *GinFileMiddlewareMutation) RemovedIDs(name string) []ent.Value { - switch name { - } return nil } @@ -16847,11 +17199,15 @@ type HostMutation struct { instance_size *string allow_mac_changes *bool exposed_tcp_ports *[]string + appendexposed_tcp_ports []string exposed_udp_ports *[]string + appendexposed_udp_ports []string override_password *string vars *map[string]string user_groups *[]string + appenduser_groups []string provision_steps *[]string + appendprovision_steps []string tags *map[string]string clearedFields map[string]struct{} _HostToDisk *uuid.UUID @@ -16979,13 +17335,13 @@ func (m *HostMutation) IDs(ctx context.Context) ([]uuid.UUID, error) { } } -// SetHclID sets the "hcl_id" field. -func (m *HostMutation) SetHclID(s string) { +// SetHCLID sets the "hcl_id" field. +func (m *HostMutation) SetHCLID(s string) { m.hcl_id = &s } -// HclID returns the value of the "hcl_id" field in the mutation. -func (m *HostMutation) HclID() (r string, exists bool) { +// HCLID returns the value of the "hcl_id" field in the mutation. +func (m *HostMutation) HCLID() (r string, exists bool) { v := m.hcl_id if v == nil { return @@ -16993,25 +17349,25 @@ func (m *HostMutation) HclID() (r string, exists bool) { return *v, true } -// OldHclID returns the old "hcl_id" field's value of the Host entity. +// OldHCLID returns the old "hcl_id" field's value of the Host entity. // If the Host object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *HostMutation) OldHclID(ctx context.Context) (v string, err error) { +func (m *HostMutation) OldHCLID(ctx context.Context) (v string, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldHclID is only allowed on UpdateOne operations") + return v, errors.New("OldHCLID is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldHclID requires an ID field in the mutation") + return v, errors.New("OldHCLID requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldHclID: %w", err) + return v, fmt.Errorf("querying old value for OldHCLID: %w", err) } - return oldValue.HclID, nil + return oldValue.HCLID, nil } -// ResetHclID resets all changes to the "hcl_id" field. -func (m *HostMutation) ResetHclID() { +// ResetHCLID resets all changes to the "hcl_id" field. +func (m *HostMutation) ResetHCLID() { m.hcl_id = nil } @@ -17254,6 +17610,7 @@ func (m *HostMutation) ResetAllowMACChanges() { // SetExposedTCPPorts sets the "exposed_tcp_ports" field. func (m *HostMutation) SetExposedTCPPorts(s []string) { m.exposed_tcp_ports = &s + m.appendexposed_tcp_ports = nil } // ExposedTCPPorts returns the value of the "exposed_tcp_ports" field in the mutation. @@ -17282,14 +17639,29 @@ func (m *HostMutation) OldExposedTCPPorts(ctx context.Context) (v []string, err return oldValue.ExposedTCPPorts, nil } +// AppendExposedTCPPorts adds s to the "exposed_tcp_ports" field. +func (m *HostMutation) AppendExposedTCPPorts(s []string) { + m.appendexposed_tcp_ports = append(m.appendexposed_tcp_ports, s...) +} + +// AppendedExposedTCPPorts returns the list of values that were appended to the "exposed_tcp_ports" field in this mutation. +func (m *HostMutation) AppendedExposedTCPPorts() ([]string, bool) { + if len(m.appendexposed_tcp_ports) == 0 { + return nil, false + } + return m.appendexposed_tcp_ports, true +} + // ResetExposedTCPPorts resets all changes to the "exposed_tcp_ports" field. func (m *HostMutation) ResetExposedTCPPorts() { m.exposed_tcp_ports = nil + m.appendexposed_tcp_ports = nil } // SetExposedUDPPorts sets the "exposed_udp_ports" field. func (m *HostMutation) SetExposedUDPPorts(s []string) { m.exposed_udp_ports = &s + m.appendexposed_udp_ports = nil } // ExposedUDPPorts returns the value of the "exposed_udp_ports" field in the mutation. @@ -17318,9 +17690,23 @@ func (m *HostMutation) OldExposedUDPPorts(ctx context.Context) (v []string, err return oldValue.ExposedUDPPorts, nil } +// AppendExposedUDPPorts adds s to the "exposed_udp_ports" field. +func (m *HostMutation) AppendExposedUDPPorts(s []string) { + m.appendexposed_udp_ports = append(m.appendexposed_udp_ports, s...) +} + +// AppendedExposedUDPPorts returns the list of values that were appended to the "exposed_udp_ports" field in this mutation. +func (m *HostMutation) AppendedExposedUDPPorts() ([]string, bool) { + if len(m.appendexposed_udp_ports) == 0 { + return nil, false + } + return m.appendexposed_udp_ports, true +} + // ResetExposedUDPPorts resets all changes to the "exposed_udp_ports" field. func (m *HostMutation) ResetExposedUDPPorts() { m.exposed_udp_ports = nil + m.appendexposed_udp_ports = nil } // SetOverridePassword sets the "override_password" field. @@ -17398,6 +17784,7 @@ func (m *HostMutation) ResetVars() { // SetUserGroups sets the "user_groups" field. func (m *HostMutation) SetUserGroups(s []string) { m.user_groups = &s + m.appenduser_groups = nil } // UserGroups returns the value of the "user_groups" field in the mutation. @@ -17426,14 +17813,29 @@ func (m *HostMutation) OldUserGroups(ctx context.Context) (v []string, err error return oldValue.UserGroups, nil } +// AppendUserGroups adds s to the "user_groups" field. +func (m *HostMutation) AppendUserGroups(s []string) { + m.appenduser_groups = append(m.appenduser_groups, s...) +} + +// AppendedUserGroups returns the list of values that were appended to the "user_groups" field in this mutation. +func (m *HostMutation) AppendedUserGroups() ([]string, bool) { + if len(m.appenduser_groups) == 0 { + return nil, false + } + return m.appenduser_groups, true +} + // ResetUserGroups resets all changes to the "user_groups" field. func (m *HostMutation) ResetUserGroups() { m.user_groups = nil + m.appenduser_groups = nil } // SetProvisionSteps sets the "provision_steps" field. func (m *HostMutation) SetProvisionSteps(s []string) { m.provision_steps = &s + m.appendprovision_steps = nil } // ProvisionSteps returns the value of the "provision_steps" field in the mutation. @@ -17462,9 +17864,23 @@ func (m *HostMutation) OldProvisionSteps(ctx context.Context) (v []string, err e return oldValue.ProvisionSteps, nil } +// AppendProvisionSteps adds s to the "provision_steps" field. +func (m *HostMutation) AppendProvisionSteps(s []string) { + m.appendprovision_steps = append(m.appendprovision_steps, s...) +} + +// AppendedProvisionSteps returns the list of values that were appended to the "provision_steps" field in this mutation. +func (m *HostMutation) AppendedProvisionSteps() ([]string, bool) { + if len(m.appendprovision_steps) == 0 { + return nil, false + } + return m.appendprovision_steps, true +} + // ClearProvisionSteps clears the value of the "provision_steps" field. func (m *HostMutation) ClearProvisionSteps() { m.provision_steps = nil + m.appendprovision_steps = nil m.clearedFields[host.FieldProvisionSteps] = struct{}{} } @@ -17477,6 +17893,7 @@ func (m *HostMutation) ProvisionStepsCleared() bool { // ResetProvisionSteps resets all changes to the "provision_steps" field. func (m *HostMutation) ResetProvisionSteps() { m.provision_steps = nil + m.appendprovision_steps = nil delete(m.clearedFields, host.FieldProvisionSteps) } @@ -17815,11 +18232,26 @@ func (m *HostMutation) Where(ps ...predicate.Host) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the HostMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *HostMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Host, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *HostMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *HostMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (Host). func (m *HostMutation) Type() string { return m.typ @@ -17831,7 +18263,7 @@ func (m *HostMutation) Type() string { func (m *HostMutation) Fields() []string { fields := make([]string, 0, 14) if m.hcl_id != nil { - fields = append(fields, host.FieldHclID) + fields = append(fields, host.FieldHCLID) } if m.hostname != nil { fields = append(fields, host.FieldHostname) @@ -17880,8 +18312,8 @@ func (m *HostMutation) Fields() []string { // schema. func (m *HostMutation) Field(name string) (ent.Value, bool) { switch name { - case host.FieldHclID: - return m.HclID() + case host.FieldHCLID: + return m.HCLID() case host.FieldHostname: return m.Hostname() case host.FieldDescription: @@ -17917,8 +18349,8 @@ func (m *HostMutation) Field(name string) (ent.Value, bool) { // database failed. func (m *HostMutation) OldField(ctx context.Context, name string) (ent.Value, error) { switch name { - case host.FieldHclID: - return m.OldHclID(ctx) + case host.FieldHCLID: + return m.OldHCLID(ctx) case host.FieldHostname: return m.OldHostname(ctx) case host.FieldDescription: @@ -17954,12 +18386,12 @@ func (m *HostMutation) OldField(ctx context.Context, name string) (ent.Value, er // type. func (m *HostMutation) SetField(name string, value ent.Value) error { switch name { - case host.FieldHclID: + case host.FieldHCLID: v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetHclID(v) + m.SetHCLID(v) return nil case host.FieldHostname: v, ok := value.(string) @@ -18125,8 +18557,8 @@ func (m *HostMutation) ClearField(name string) error { // It returns an error if the field is not defined in the schema. func (m *HostMutation) ResetField(name string) error { switch name { - case host.FieldHclID: - m.ResetHclID() + case host.FieldHCLID: + m.ResetHCLID() return nil case host.FieldHostname: m.ResetHostname() @@ -18728,11 +19160,26 @@ func (m *HostDependencyMutation) Where(ps ...predicate.HostDependency) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the HostDependencyMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *HostDependencyMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.HostDependency, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *HostDependencyMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *HostDependencyMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (HostDependency). func (m *HostDependencyMutation) Type() string { return m.typ @@ -18907,8 +19354,6 @@ func (m *HostDependencyMutation) RemovedEdges() []string { // RemovedIDs returns all IDs (to other nodes) that were removed for the edge with // the given name in this mutation. func (m *HostDependencyMutation) RemovedIDs(name string) []ent.Value { - switch name { - } return nil } @@ -19113,13 +19558,13 @@ func (m *IdentityMutation) IDs(ctx context.Context) ([]uuid.UUID, error) { } } -// SetHclID sets the "hcl_id" field. -func (m *IdentityMutation) SetHclID(s string) { +// SetHCLID sets the "hcl_id" field. +func (m *IdentityMutation) SetHCLID(s string) { m.hcl_id = &s } -// HclID returns the value of the "hcl_id" field in the mutation. -func (m *IdentityMutation) HclID() (r string, exists bool) { +// HCLID returns the value of the "hcl_id" field in the mutation. +func (m *IdentityMutation) HCLID() (r string, exists bool) { v := m.hcl_id if v == nil { return @@ -19127,25 +19572,25 @@ func (m *IdentityMutation) HclID() (r string, exists bool) { return *v, true } -// OldHclID returns the old "hcl_id" field's value of the Identity entity. +// OldHCLID returns the old "hcl_id" field's value of the Identity entity. // If the Identity object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *IdentityMutation) OldHclID(ctx context.Context) (v string, err error) { +func (m *IdentityMutation) OldHCLID(ctx context.Context) (v string, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldHclID is only allowed on UpdateOne operations") + return v, errors.New("OldHCLID is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldHclID requires an ID field in the mutation") + return v, errors.New("OldHCLID requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldHclID: %w", err) + return v, fmt.Errorf("querying old value for OldHCLID: %w", err) } - return oldValue.HclID, nil + return oldValue.HCLID, nil } -// ResetHclID resets all changes to the "hcl_id" field. -func (m *IdentityMutation) ResetHclID() { +// ResetHCLID resets all changes to the "hcl_id" field. +func (m *IdentityMutation) ResetHCLID() { m.hcl_id = nil } @@ -19481,11 +19926,26 @@ func (m *IdentityMutation) Where(ps ...predicate.Identity) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the IdentityMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *IdentityMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Identity, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *IdentityMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *IdentityMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (Identity). func (m *IdentityMutation) Type() string { return m.typ @@ -19497,7 +19957,7 @@ func (m *IdentityMutation) Type() string { func (m *IdentityMutation) Fields() []string { fields := make([]string, 0, 9) if m.hcl_id != nil { - fields = append(fields, identity.FieldHclID) + fields = append(fields, identity.FieldHCLID) } if m.first_name != nil { fields = append(fields, identity.FieldFirstName) @@ -19531,8 +19991,8 @@ func (m *IdentityMutation) Fields() []string { // schema. func (m *IdentityMutation) Field(name string) (ent.Value, bool) { switch name { - case identity.FieldHclID: - return m.HclID() + case identity.FieldHCLID: + return m.HCLID() case identity.FieldFirstName: return m.FirstName() case identity.FieldLastName: @@ -19558,8 +20018,8 @@ func (m *IdentityMutation) Field(name string) (ent.Value, bool) { // database failed. func (m *IdentityMutation) OldField(ctx context.Context, name string) (ent.Value, error) { switch name { - case identity.FieldHclID: - return m.OldHclID(ctx) + case identity.FieldHCLID: + return m.OldHCLID(ctx) case identity.FieldFirstName: return m.OldFirstName(ctx) case identity.FieldLastName: @@ -19585,12 +20045,12 @@ func (m *IdentityMutation) OldField(ctx context.Context, name string) (ent.Value // type. func (m *IdentityMutation) SetField(name string, value ent.Value) error { switch name { - case identity.FieldHclID: + case identity.FieldHCLID: v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetHclID(v) + m.SetHCLID(v) return nil case identity.FieldFirstName: v, ok := value.(string) @@ -19697,8 +20157,8 @@ func (m *IdentityMutation) ClearField(name string) error { // It returns an error if the field is not defined in the schema. func (m *IdentityMutation) ResetField(name string) error { switch name { - case identity.FieldHclID: - m.ResetHclID() + case identity.FieldHCLID: + m.ResetHCLID() return nil case identity.FieldFirstName: m.ResetFirstName() @@ -19758,8 +20218,6 @@ func (m *IdentityMutation) RemovedEdges() []string { // RemovedIDs returns all IDs (to other nodes) that were removed for the edge with // the given name in this mutation. func (m *IdentityMutation) RemovedIDs(name string) []ent.Value { - switch name { - } return nil } @@ -19812,6 +20270,7 @@ type IncludedNetworkMutation struct { id *uuid.UUID name *string hosts *[]string + appendhosts []string clearedFields map[string]struct{} _IncludedNetworkToTag map[uuid.UUID]struct{} removed_IncludedNetworkToTag map[uuid.UUID]struct{} @@ -19972,6 +20431,7 @@ func (m *IncludedNetworkMutation) ResetName() { // SetHosts sets the "hosts" field. func (m *IncludedNetworkMutation) SetHosts(s []string) { m.hosts = &s + m.appendhosts = nil } // Hosts returns the value of the "hosts" field in the mutation. @@ -20000,9 +20460,23 @@ func (m *IncludedNetworkMutation) OldHosts(ctx context.Context) (v []string, err return oldValue.Hosts, nil } +// AppendHosts adds s to the "hosts" field. +func (m *IncludedNetworkMutation) AppendHosts(s []string) { + m.appendhosts = append(m.appendhosts, s...) +} + +// AppendedHosts returns the list of values that were appended to the "hosts" field in this mutation. +func (m *IncludedNetworkMutation) AppendedHosts() ([]string, bool) { + if len(m.appendhosts) == 0 { + return nil, false + } + return m.appendhosts, true +} + // ResetHosts resets all changes to the "hosts" field. func (m *IncludedNetworkMutation) ResetHosts() { m.hosts = nil + m.appendhosts = nil } // AddIncludedNetworkToTagIDs adds the "IncludedNetworkToTag" edge to the Tag entity by ids. @@ -20211,11 +20685,26 @@ func (m *IncludedNetworkMutation) Where(ps ...predicate.IncludedNetwork) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the IncludedNetworkMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *IncludedNetworkMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.IncludedNetwork, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *IncludedNetworkMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *IncludedNetworkMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (IncludedNetwork). func (m *IncludedNetworkMutation) Type() string { return m.typ @@ -20623,13 +21112,13 @@ func (m *NetworkMutation) IDs(ctx context.Context) ([]uuid.UUID, error) { } } -// SetHclID sets the "hcl_id" field. -func (m *NetworkMutation) SetHclID(s string) { +// SetHCLID sets the "hcl_id" field. +func (m *NetworkMutation) SetHCLID(s string) { m.hcl_id = &s } -// HclID returns the value of the "hcl_id" field in the mutation. -func (m *NetworkMutation) HclID() (r string, exists bool) { +// HCLID returns the value of the "hcl_id" field in the mutation. +func (m *NetworkMutation) HCLID() (r string, exists bool) { v := m.hcl_id if v == nil { return @@ -20637,25 +21126,25 @@ func (m *NetworkMutation) HclID() (r string, exists bool) { return *v, true } -// OldHclID returns the old "hcl_id" field's value of the Network entity. +// OldHCLID returns the old "hcl_id" field's value of the Network entity. // If the Network object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *NetworkMutation) OldHclID(ctx context.Context) (v string, err error) { +func (m *NetworkMutation) OldHCLID(ctx context.Context) (v string, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldHclID is only allowed on UpdateOne operations") + return v, errors.New("OldHCLID is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldHclID requires an ID field in the mutation") + return v, errors.New("OldHCLID requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldHclID: %w", err) + return v, fmt.Errorf("querying old value for OldHCLID: %w", err) } - return oldValue.HclID, nil + return oldValue.HCLID, nil } -// ResetHclID resets all changes to the "hcl_id" field. -func (m *NetworkMutation) ResetHclID() { +// ResetHCLID resets all changes to the "hcl_id" field. +func (m *NetworkMutation) ResetHCLID() { m.hcl_id = nil } @@ -20991,11 +21480,26 @@ func (m *NetworkMutation) Where(ps ...predicate.Network) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the NetworkMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *NetworkMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Network, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *NetworkMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *NetworkMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (Network). func (m *NetworkMutation) Type() string { return m.typ @@ -21007,7 +21511,7 @@ func (m *NetworkMutation) Type() string { func (m *NetworkMutation) Fields() []string { fields := make([]string, 0, 6) if m.hcl_id != nil { - fields = append(fields, network.FieldHclID) + fields = append(fields, network.FieldHCLID) } if m.name != nil { fields = append(fields, network.FieldName) @@ -21032,8 +21536,8 @@ func (m *NetworkMutation) Fields() []string { // schema. func (m *NetworkMutation) Field(name string) (ent.Value, bool) { switch name { - case network.FieldHclID: - return m.HclID() + case network.FieldHCLID: + return m.HCLID() case network.FieldName: return m.Name() case network.FieldCidr: @@ -21053,8 +21557,8 @@ func (m *NetworkMutation) Field(name string) (ent.Value, bool) { // database failed. func (m *NetworkMutation) OldField(ctx context.Context, name string) (ent.Value, error) { switch name { - case network.FieldHclID: - return m.OldHclID(ctx) + case network.FieldHCLID: + return m.OldHCLID(ctx) case network.FieldName: return m.OldName(ctx) case network.FieldCidr: @@ -21074,12 +21578,12 @@ func (m *NetworkMutation) OldField(ctx context.Context, name string) (ent.Value, // type. func (m *NetworkMutation) SetField(name string, value ent.Value) error { switch name { - case network.FieldHclID: + case network.FieldHCLID: v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetHclID(v) + m.SetHCLID(v) return nil case network.FieldName: v, ok := value.(string) @@ -21165,8 +21669,8 @@ func (m *NetworkMutation) ClearField(name string) error { // It returns an error if the field is not defined in the schema. func (m *NetworkMutation) ResetField(name string) error { switch name { - case network.FieldHclID: - m.ResetHclID() + case network.FieldHCLID: + m.ResetHCLID() return nil case network.FieldName: m.ResetName() @@ -21985,11 +22489,26 @@ func (m *PlanMutation) Where(ps ...predicate.Plan) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the PlanMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *PlanMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Plan, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *PlanMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *PlanMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (Plan). func (m *PlanMutation) Type() string { return m.typ @@ -22687,11 +23206,26 @@ func (m *PlanDiffMutation) Where(ps ...predicate.PlanDiff) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the PlanDiffMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *PlanDiffMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.PlanDiff, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *PlanDiffMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *PlanDiffMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (PlanDiff). func (m *PlanDiffMutation) Type() string { return m.typ @@ -22867,8 +23401,6 @@ func (m *PlanDiffMutation) RemovedEdges() []string { // RemovedIDs returns all IDs (to other nodes) that were removed for the edge with // the given name in this mutation. func (m *PlanDiffMutation) RemovedIDs(name string) []ent.Value { - switch name { - } return nil } @@ -23627,11 +24159,26 @@ func (m *ProvisionedHostMutation) Where(ps ...predicate.ProvisionedHost) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the ProvisionedHostMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *ProvisionedHostMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.ProvisionedHost, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *ProvisionedHostMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *ProvisionedHostMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (ProvisionedHost). func (m *ProvisionedHostMutation) Type() string { return m.typ @@ -24537,11 +25084,26 @@ func (m *ProvisionedNetworkMutation) Where(ps ...predicate.ProvisionedNetwork) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the ProvisionedNetworkMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *ProvisionedNetworkMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.ProvisionedNetwork, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *ProvisionedNetworkMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *ProvisionedNetworkMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (ProvisionedNetwork). func (m *ProvisionedNetworkMutation) Type() string { return m.typ @@ -25580,11 +26142,26 @@ func (m *ProvisioningStepMutation) Where(ps ...predicate.ProvisioningStep) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the ProvisioningStepMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *ProvisioningStepMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.ProvisioningStep, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *ProvisioningStepMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *ProvisioningStepMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (ProvisioningStep). func (m *ProvisioningStepMutation) Type() string { return m.typ @@ -26020,6 +26597,7 @@ type RepoCommitMutation struct { message *string tree_hash *string parent_hashes *[]string + appendparent_hashes []string clearedFields map[string]struct{} _RepoCommitToRepository *uuid.UUID cleared_RepoCommitToRepository bool @@ -26407,6 +26985,7 @@ func (m *RepoCommitMutation) ResetTreeHash() { // SetParentHashes sets the "parent_hashes" field. func (m *RepoCommitMutation) SetParentHashes(s []string) { m.parent_hashes = &s + m.appendparent_hashes = nil } // ParentHashes returns the value of the "parent_hashes" field in the mutation. @@ -26435,9 +27014,23 @@ func (m *RepoCommitMutation) OldParentHashes(ctx context.Context) (v []string, e return oldValue.ParentHashes, nil } +// AppendParentHashes adds s to the "parent_hashes" field. +func (m *RepoCommitMutation) AppendParentHashes(s []string) { + m.appendparent_hashes = append(m.appendparent_hashes, s...) +} + +// AppendedParentHashes returns the list of values that were appended to the "parent_hashes" field in this mutation. +func (m *RepoCommitMutation) AppendedParentHashes() ([]string, bool) { + if len(m.appendparent_hashes) == 0 { + return nil, false + } + return m.appendparent_hashes, true +} + // ResetParentHashes resets all changes to the "parent_hashes" field. func (m *RepoCommitMutation) ResetParentHashes() { m.parent_hashes = nil + m.appendparent_hashes = nil } // SetRepoCommitToRepositoryID sets the "RepoCommitToRepository" edge to the Repository entity by id. @@ -26484,11 +27077,26 @@ func (m *RepoCommitMutation) Where(ps ...predicate.RepoCommit) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the RepoCommitMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *RepoCommitMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.RepoCommit, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *RepoCommitMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *RepoCommitMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (RepoCommit). func (m *RepoCommitMutation) Type() string { return m.typ @@ -26759,8 +27367,6 @@ func (m *RepoCommitMutation) RemovedEdges() []string { // RemovedIDs returns all IDs (to other nodes) that were removed for the edge with // the given name in this mutation. func (m *RepoCommitMutation) RemovedIDs(name string) []ent.Value { - switch name { - } return nil } @@ -27188,11 +27794,26 @@ func (m *RepositoryMutation) Where(ps ...predicate.Repository) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the RepositoryMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *RepositoryMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Repository, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *RepositoryMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *RepositoryMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (Repository). func (m *RepositoryMutation) Type() string { return m.typ @@ -27478,6 +28099,7 @@ type ScriptMutation struct { addtimeout *int ignore_errors *bool args *[]string + appendargs []string disabled *bool vars *map[string]string abs_path *string @@ -27600,13 +28222,13 @@ func (m *ScriptMutation) IDs(ctx context.Context) ([]uuid.UUID, error) { } } -// SetHclID sets the "hcl_id" field. -func (m *ScriptMutation) SetHclID(s string) { +// SetHCLID sets the "hcl_id" field. +func (m *ScriptMutation) SetHCLID(s string) { m.hcl_id = &s } -// HclID returns the value of the "hcl_id" field in the mutation. -func (m *ScriptMutation) HclID() (r string, exists bool) { +// HCLID returns the value of the "hcl_id" field in the mutation. +func (m *ScriptMutation) HCLID() (r string, exists bool) { v := m.hcl_id if v == nil { return @@ -27614,25 +28236,25 @@ func (m *ScriptMutation) HclID() (r string, exists bool) { return *v, true } -// OldHclID returns the old "hcl_id" field's value of the Script entity. +// OldHCLID returns the old "hcl_id" field's value of the Script entity. // If the Script object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *ScriptMutation) OldHclID(ctx context.Context) (v string, err error) { +func (m *ScriptMutation) OldHCLID(ctx context.Context) (v string, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldHclID is only allowed on UpdateOne operations") + return v, errors.New("OldHCLID is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldHclID requires an ID field in the mutation") + return v, errors.New("OldHCLID requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldHclID: %w", err) + return v, fmt.Errorf("querying old value for OldHCLID: %w", err) } - return oldValue.HclID, nil + return oldValue.HCLID, nil } -// ResetHclID resets all changes to the "hcl_id" field. -func (m *ScriptMutation) ResetHclID() { +// ResetHCLID resets all changes to the "hcl_id" field. +func (m *ScriptMutation) ResetHCLID() { m.hcl_id = nil } @@ -27967,6 +28589,7 @@ func (m *ScriptMutation) ResetIgnoreErrors() { // SetArgs sets the "args" field. func (m *ScriptMutation) SetArgs(s []string) { m.args = &s + m.appendargs = nil } // Args returns the value of the "args" field in the mutation. @@ -27995,9 +28618,23 @@ func (m *ScriptMutation) OldArgs(ctx context.Context) (v []string, err error) { return oldValue.Args, nil } +// AppendArgs adds s to the "args" field. +func (m *ScriptMutation) AppendArgs(s []string) { + m.appendargs = append(m.appendargs, s...) +} + +// AppendedArgs returns the list of values that were appended to the "args" field in this mutation. +func (m *ScriptMutation) AppendedArgs() ([]string, bool) { + if len(m.appendargs) == 0 { + return nil, false + } + return m.appendargs, true +} + // ResetArgs resets all changes to the "args" field. func (m *ScriptMutation) ResetArgs() { m.args = nil + m.appendargs = nil } // SetDisabled sets the "disabled" field. @@ -28296,11 +28933,26 @@ func (m *ScriptMutation) Where(ps ...predicate.Script) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the ScriptMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *ScriptMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Script, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *ScriptMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *ScriptMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (Script). func (m *ScriptMutation) Type() string { return m.typ @@ -28312,7 +28964,7 @@ func (m *ScriptMutation) Type() string { func (m *ScriptMutation) Fields() []string { fields := make([]string, 0, 14) if m.hcl_id != nil { - fields = append(fields, script.FieldHclID) + fields = append(fields, script.FieldHCLID) } if m.name != nil { fields = append(fields, script.FieldName) @@ -28361,8 +29013,8 @@ func (m *ScriptMutation) Fields() []string { // schema. func (m *ScriptMutation) Field(name string) (ent.Value, bool) { switch name { - case script.FieldHclID: - return m.HclID() + case script.FieldHCLID: + return m.HCLID() case script.FieldName: return m.Name() case script.FieldLanguage: @@ -28398,8 +29050,8 @@ func (m *ScriptMutation) Field(name string) (ent.Value, bool) { // database failed. func (m *ScriptMutation) OldField(ctx context.Context, name string) (ent.Value, error) { switch name { - case script.FieldHclID: - return m.OldHclID(ctx) + case script.FieldHCLID: + return m.OldHCLID(ctx) case script.FieldName: return m.OldName(ctx) case script.FieldLanguage: @@ -28435,12 +29087,12 @@ func (m *ScriptMutation) OldField(ctx context.Context, name string) (ent.Value, // type. func (m *ScriptMutation) SetField(name string, value ent.Value) error { switch name { - case script.FieldHclID: + case script.FieldHCLID: v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetHclID(v) + m.SetHCLID(v) return nil case script.FieldName: v, ok := value.(string) @@ -28609,8 +29261,8 @@ func (m *ScriptMutation) ClearField(name string) error { // It returns an error if the field is not defined in the schema. func (m *ScriptMutation) ResetField(name string) error { switch name { - case script.FieldHclID: - m.ResetHclID() + case script.FieldHCLID: + m.ResetHCLID() return nil case script.FieldName: m.ResetName() @@ -28793,6 +29445,7 @@ type ServerTaskMutation struct { start_time *time.Time end_time *time.Time errors *[]string + appenderrors []string log_file_path *string clearedFields map[string]struct{} _ServerTaskToAuthUser *uuid.UUID @@ -29054,6 +29707,7 @@ func (m *ServerTaskMutation) ResetEndTime() { // SetErrors sets the "errors" field. func (m *ServerTaskMutation) SetErrors(s []string) { m.errors = &s + m.appenderrors = nil } // Errors returns the value of the "errors" field in the mutation. @@ -29082,9 +29736,23 @@ func (m *ServerTaskMutation) OldErrors(ctx context.Context) (v []string, err err return oldValue.Errors, nil } +// AppendErrors adds s to the "errors" field. +func (m *ServerTaskMutation) AppendErrors(s []string) { + m.appenderrors = append(m.appenderrors, s...) +} + +// AppendedErrors returns the list of values that were appended to the "errors" field in this mutation. +func (m *ServerTaskMutation) AppendedErrors() ([]string, bool) { + if len(m.appenderrors) == 0 { + return nil, false + } + return m.appenderrors, true +} + // ClearErrors clears the value of the "errors" field. func (m *ServerTaskMutation) ClearErrors() { m.errors = nil + m.appenderrors = nil m.clearedFields[servertask.FieldErrors] = struct{}{} } @@ -29097,6 +29765,7 @@ func (m *ServerTaskMutation) ErrorsCleared() bool { // ResetErrors resets all changes to the "errors" field. func (m *ServerTaskMutation) ResetErrors() { m.errors = nil + m.appenderrors = nil delete(m.clearedFields, servertask.FieldErrors) } @@ -29403,11 +30072,26 @@ func (m *ServerTaskMutation) Where(ps ...predicate.ServerTask) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the ServerTaskMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *ServerTaskMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.ServerTask, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *ServerTaskMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *ServerTaskMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (ServerTask). func (m *ServerTaskMutation) Type() string { return m.typ @@ -30530,11 +31214,26 @@ func (m *StatusMutation) Where(ps ...predicate.Status) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the StatusMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *StatusMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Status, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *StatusMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *StatusMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (Status). func (m *StatusMutation) Type() string { return m.typ @@ -30843,8 +31542,6 @@ func (m *StatusMutation) RemovedEdges() []string { // RemovedIDs returns all IDs (to other nodes) that were removed for the edge with // the given name in this mutation. func (m *StatusMutation) RemovedIDs(name string) []ent.Value { - switch name { - } return nil } @@ -31198,11 +31895,26 @@ func (m *TagMutation) Where(ps ...predicate.Tag) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the TagMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *TagMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Tag, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *TagMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *TagMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (Tag). func (m *TagMutation) Type() string { return m.typ @@ -31787,11 +32499,26 @@ func (m *TeamMutation) Where(ps ...predicate.Team) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the TeamMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *TeamMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Team, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *TeamMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *TeamMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (Team). func (m *TeamMutation) Type() string { return m.typ @@ -32325,11 +33052,26 @@ func (m *TokenMutation) Where(ps ...predicate.Token) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the TokenMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *TokenMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Token, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *TokenMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *TokenMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (Token). func (m *TokenMutation) Type() string { return m.typ @@ -32498,8 +33240,6 @@ func (m *TokenMutation) RemovedEdges() []string { // RemovedIDs returns all IDs (to other nodes) that were removed for the edge with // the given name in this mutation. func (m *TokenMutation) RemovedIDs(name string) []ent.Value { - switch name { - } return nil } @@ -32778,13 +33518,13 @@ func (m *UserMutation) ResetEmail() { m.email = nil } -// SetHclID sets the "hcl_id" field. -func (m *UserMutation) SetHclID(s string) { +// SetHCLID sets the "hcl_id" field. +func (m *UserMutation) SetHCLID(s string) { m.hcl_id = &s } -// HclID returns the value of the "hcl_id" field in the mutation. -func (m *UserMutation) HclID() (r string, exists bool) { +// HCLID returns the value of the "hcl_id" field in the mutation. +func (m *UserMutation) HCLID() (r string, exists bool) { v := m.hcl_id if v == nil { return @@ -32792,25 +33532,25 @@ func (m *UserMutation) HclID() (r string, exists bool) { return *v, true } -// OldHclID returns the old "hcl_id" field's value of the User entity. +// OldHCLID returns the old "hcl_id" field's value of the User entity. // If the User object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *UserMutation) OldHclID(ctx context.Context) (v string, err error) { +func (m *UserMutation) OldHCLID(ctx context.Context) (v string, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldHclID is only allowed on UpdateOne operations") + return v, errors.New("OldHCLID is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldHclID requires an ID field in the mutation") + return v, errors.New("OldHCLID requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldHclID: %w", err) + return v, fmt.Errorf("querying old value for OldHCLID: %w", err) } - return oldValue.HclID, nil + return oldValue.HCLID, nil } -// ResetHclID resets all changes to the "hcl_id" field. -func (m *UserMutation) ResetHclID() { +// ResetHCLID resets all changes to the "hcl_id" field. +func (m *UserMutation) ResetHCLID() { m.hcl_id = nil } @@ -32927,11 +33667,26 @@ func (m *UserMutation) Where(ps ...predicate.User) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the UserMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *UserMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.User, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *UserMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *UserMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (User). func (m *UserMutation) Type() string { return m.typ @@ -32952,7 +33707,7 @@ func (m *UserMutation) Fields() []string { fields = append(fields, user.FieldEmail) } if m.hcl_id != nil { - fields = append(fields, user.FieldHclID) + fields = append(fields, user.FieldHCLID) } return fields } @@ -32968,8 +33723,8 @@ func (m *UserMutation) Field(name string) (ent.Value, bool) { return m.UUID() case user.FieldEmail: return m.Email() - case user.FieldHclID: - return m.HclID() + case user.FieldHCLID: + return m.HCLID() } return nil, false } @@ -32985,8 +33740,8 @@ func (m *UserMutation) OldField(ctx context.Context, name string) (ent.Value, er return m.OldUUID(ctx) case user.FieldEmail: return m.OldEmail(ctx) - case user.FieldHclID: - return m.OldHclID(ctx) + case user.FieldHCLID: + return m.OldHCLID(ctx) } return nil, fmt.Errorf("unknown User field %s", name) } @@ -33017,12 +33772,12 @@ func (m *UserMutation) SetField(name string, value ent.Value) error { } m.SetEmail(v) return nil - case user.FieldHclID: + case user.FieldHCLID: v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetHclID(v) + m.SetHCLID(v) return nil } return fmt.Errorf("unknown User field %s", name) @@ -33082,8 +33837,8 @@ func (m *UserMutation) ResetField(name string) error { case user.FieldEmail: m.ResetEmail() return nil - case user.FieldHclID: - m.ResetHclID() + case user.FieldHCLID: + m.ResetHCLID() return nil } return fmt.Errorf("unknown User field %s", name) diff --git a/ent/network.go b/ent/network.go index 1aa15888..45ba34b3 100755 --- a/ent/network.go +++ b/ent/network.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -7,6 +7,7 @@ import ( "fmt" "strings" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/gen0cide/laforge/ent/environment" "github.com/gen0cide/laforge/ent/network" @@ -18,8 +19,8 @@ type Network struct { config ` json:"-"` // ID of the ent. ID uuid.UUID `json:"id,omitempty"` - // HclID holds the value of the "hcl_id" field. - HclID string `json:"hcl_id,omitempty" hcl:"id,label"` + // HCLID holds the value of the "hcl_id" field. + HCLID string `json:"hcl_id,omitempty" hcl:"id,label"` // Name holds the value of the "name" field. Name string `json:"name,omitempty" hcl:"name,attr"` // Cidr holds the value of the "cidr" field. @@ -34,6 +35,7 @@ type Network struct { // The values are being populated by the NetworkQuery when eager-loading is set. Edges NetworkEdges `json:"edges"` + // vvvvvvvvvvvv CUSTOM vvvvvvvvvvvv // Edges put into the main struct to be loaded via hcl // NetworkToEnvironment holds the value of the NetworkToEnvironment edge. HCLNetworkToEnvironment *Environment `json:"NetworkToEnvironment,omitempty"` @@ -41,8 +43,9 @@ type Network struct { HCLNetworkToHostDependency []*HostDependency `json:"NetworkToHostDependency,omitempty"` // NetworkToIncludedNetwork holds the value of the NetworkToIncludedNetwork edge. HCLNetworkToIncludedNetwork []*IncludedNetwork `json:"NetworkToIncludedNetwork,omitempty"` - // + // ^^^^^^^^^^^^ CUSTOM ^^^^^^^^^^^^^ environment_environment_to_network *uuid.UUID + selectValues sql.SelectValues } // NetworkEdges holds the relations/edges for other nodes in the graph. @@ -56,6 +59,11 @@ type NetworkEdges struct { // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. loadedTypes [3]bool + // totalCount holds the count of the edges above. + totalCount [3]map[string]int + + namedNetworkToHostDependency map[string][]*HostDependency + namedNetworkToIncludedNetwork map[string][]*IncludedNetwork } // NetworkToEnvironmentOrErr returns the NetworkToEnvironment value or an error if the edge @@ -63,8 +71,7 @@ type NetworkEdges struct { func (e NetworkEdges) NetworkToEnvironmentOrErr() (*Environment, error) { if e.loadedTypes[0] { if e.NetworkToEnvironment == nil { - // The edge NetworkToEnvironment was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: environment.Label} } return e.NetworkToEnvironment, nil @@ -91,22 +98,22 @@ func (e NetworkEdges) NetworkToIncludedNetworkOrErr() ([]*IncludedNetwork, error } // scanValues returns the types for scanning values from sql.Rows. -func (*Network) scanValues(columns []string) ([]interface{}, error) { - values := make([]interface{}, len(columns)) +func (*Network) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) for i := range columns { switch columns[i] { case network.FieldVars, network.FieldTags: values[i] = new([]byte) case network.FieldVdiVisible: values[i] = new(sql.NullBool) - case network.FieldHclID, network.FieldName, network.FieldCidr: + case network.FieldHCLID, network.FieldName, network.FieldCidr: values[i] = new(sql.NullString) case network.FieldID: values[i] = new(uuid.UUID) case network.ForeignKeys[0]: // environment_environment_to_network values[i] = &sql.NullScanner{S: new(uuid.UUID)} default: - return nil, fmt.Errorf("unexpected column %q for type Network", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -114,7 +121,7 @@ func (*Network) scanValues(columns []string) ([]interface{}, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the Network fields. -func (n *Network) assignValues(columns []string, values []interface{}) error { +func (n *Network) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -126,11 +133,11 @@ func (n *Network) assignValues(columns []string, values []interface{}) error { } else if value != nil { n.ID = *value } - case network.FieldHclID: + case network.FieldHCLID: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field hcl_id", values[i]) } else if value.Valid { - n.HclID = value.String + n.HCLID = value.String } case network.FieldName: if value, ok := values[i].(*sql.NullString); !ok { @@ -173,41 +180,49 @@ func (n *Network) assignValues(columns []string, values []interface{}) error { n.environment_environment_to_network = new(uuid.UUID) *n.environment_environment_to_network = *value.S.(*uuid.UUID) } + default: + n.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the Network. +// This includes values selected through modifiers, order, etc. +func (n *Network) Value(name string) (ent.Value, error) { + return n.selectValues.Get(name) +} + // QueryNetworkToEnvironment queries the "NetworkToEnvironment" edge of the Network entity. func (n *Network) QueryNetworkToEnvironment() *EnvironmentQuery { - return (&NetworkClient{config: n.config}).QueryNetworkToEnvironment(n) + return NewNetworkClient(n.config).QueryNetworkToEnvironment(n) } // QueryNetworkToHostDependency queries the "NetworkToHostDependency" edge of the Network entity. func (n *Network) QueryNetworkToHostDependency() *HostDependencyQuery { - return (&NetworkClient{config: n.config}).QueryNetworkToHostDependency(n) + return NewNetworkClient(n.config).QueryNetworkToHostDependency(n) } // QueryNetworkToIncludedNetwork queries the "NetworkToIncludedNetwork" edge of the Network entity. func (n *Network) QueryNetworkToIncludedNetwork() *IncludedNetworkQuery { - return (&NetworkClient{config: n.config}).QueryNetworkToIncludedNetwork(n) + return NewNetworkClient(n.config).QueryNetworkToIncludedNetwork(n) } // Update returns a builder for updating this Network. // Note that you need to call Network.Unwrap() before calling this method if this Network // was returned from a transaction, and the transaction was committed or rolled back. func (n *Network) Update() *NetworkUpdateOne { - return (&NetworkClient{config: n.config}).UpdateOne(n) + return NewNetworkClient(n.config).UpdateOne(n) } // Unwrap unwraps the Network entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. func (n *Network) Unwrap() *Network { - tx, ok := n.config.driver.(*txDriver) + _tx, ok := n.config.driver.(*txDriver) if !ok { panic("ent: Network is not a transactional entity") } - n.config.driver = tx.drv + n.config.driver = _tx.drv return n } @@ -215,28 +230,75 @@ func (n *Network) Unwrap() *Network { func (n *Network) String() string { var builder strings.Builder builder.WriteString("Network(") - builder.WriteString(fmt.Sprintf("id=%v", n.ID)) - builder.WriteString(", hcl_id=") - builder.WriteString(n.HclID) - builder.WriteString(", name=") + builder.WriteString(fmt.Sprintf("id=%v, ", n.ID)) + builder.WriteString("hcl_id=") + builder.WriteString(n.HCLID) + builder.WriteString(", ") + builder.WriteString("name=") builder.WriteString(n.Name) - builder.WriteString(", cidr=") + builder.WriteString(", ") + builder.WriteString("cidr=") builder.WriteString(n.Cidr) - builder.WriteString(", vdi_visible=") + builder.WriteString(", ") + builder.WriteString("vdi_visible=") builder.WriteString(fmt.Sprintf("%v", n.VdiVisible)) - builder.WriteString(", vars=") + builder.WriteString(", ") + builder.WriteString("vars=") builder.WriteString(fmt.Sprintf("%v", n.Vars)) - builder.WriteString(", tags=") + builder.WriteString(", ") + builder.WriteString("tags=") builder.WriteString(fmt.Sprintf("%v", n.Tags)) builder.WriteByte(')') return builder.String() } -// Networks is a parsable slice of Network. -type Networks []*Network +// NamedNetworkToHostDependency returns the NetworkToHostDependency named value or an error if the edge was not +// loaded in eager-loading with this name. +func (n *Network) NamedNetworkToHostDependency(name string) ([]*HostDependency, error) { + if n.Edges.namedNetworkToHostDependency == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := n.Edges.namedNetworkToHostDependency[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} -func (n Networks) config(cfg config) { - for _i := range n { - n[_i].config = cfg +func (n *Network) appendNamedNetworkToHostDependency(name string, edges ...*HostDependency) { + if n.Edges.namedNetworkToHostDependency == nil { + n.Edges.namedNetworkToHostDependency = make(map[string][]*HostDependency) + } + if len(edges) == 0 { + n.Edges.namedNetworkToHostDependency[name] = []*HostDependency{} + } else { + n.Edges.namedNetworkToHostDependency[name] = append(n.Edges.namedNetworkToHostDependency[name], edges...) } } + +// NamedNetworkToIncludedNetwork returns the NetworkToIncludedNetwork named value or an error if the edge was not +// loaded in eager-loading with this name. +func (n *Network) NamedNetworkToIncludedNetwork(name string) ([]*IncludedNetwork, error) { + if n.Edges.namedNetworkToIncludedNetwork == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := n.Edges.namedNetworkToIncludedNetwork[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} + +func (n *Network) appendNamedNetworkToIncludedNetwork(name string, edges ...*IncludedNetwork) { + if n.Edges.namedNetworkToIncludedNetwork == nil { + n.Edges.namedNetworkToIncludedNetwork = make(map[string][]*IncludedNetwork) + } + if len(edges) == 0 { + n.Edges.namedNetworkToIncludedNetwork[name] = []*IncludedNetwork{} + } else { + n.Edges.namedNetworkToIncludedNetwork[name] = append(n.Edges.namedNetworkToIncludedNetwork[name], edges...) + } +} + +// Networks is a parsable slice of Network. +type Networks []*Network diff --git a/ent/network/network.go b/ent/network/network.go index 8e1d51bc..13ff6b41 100755 --- a/ent/network/network.go +++ b/ent/network/network.go @@ -1,8 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package network import ( + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -11,8 +13,8 @@ const ( Label = "network" // FieldID holds the string denoting the id field in the database. FieldID = "id" - // FieldHclID holds the string denoting the hcl_id field in the database. - FieldHclID = "hcl_id" + // FieldHCLID holds the string denoting the hcl_id field in the database. + FieldHCLID = "hcl_id" // FieldName holds the string denoting the name field in the database. FieldName = "name" // FieldCidr holds the string denoting the cidr field in the database. @@ -57,7 +59,7 @@ const ( // Columns holds all SQL columns for network fields. var Columns = []string{ FieldID, - FieldHclID, + FieldHCLID, FieldName, FieldCidr, FieldVdiVisible, @@ -90,3 +92,87 @@ var ( // DefaultID holds the default value on creation for the "id" field. DefaultID func() uuid.UUID ) + +// OrderOption defines the ordering options for the Network queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByHCLID orders the results by the hcl_id field. +func ByHCLID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldHCLID, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByCidr orders the results by the cidr field. +func ByCidr(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCidr, opts...).ToFunc() +} + +// ByVdiVisible orders the results by the vdi_visible field. +func ByVdiVisible(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldVdiVisible, opts...).ToFunc() +} + +// ByNetworkToEnvironmentField orders the results by NetworkToEnvironment field. +func ByNetworkToEnvironmentField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newNetworkToEnvironmentStep(), sql.OrderByField(field, opts...)) + } +} + +// ByNetworkToHostDependencyCount orders the results by NetworkToHostDependency count. +func ByNetworkToHostDependencyCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newNetworkToHostDependencyStep(), opts...) + } +} + +// ByNetworkToHostDependency orders the results by NetworkToHostDependency terms. +func ByNetworkToHostDependency(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newNetworkToHostDependencyStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByNetworkToIncludedNetworkCount orders the results by NetworkToIncludedNetwork count. +func ByNetworkToIncludedNetworkCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newNetworkToIncludedNetworkStep(), opts...) + } +} + +// ByNetworkToIncludedNetwork orders the results by NetworkToIncludedNetwork terms. +func ByNetworkToIncludedNetwork(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newNetworkToIncludedNetworkStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newNetworkToEnvironmentStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(NetworkToEnvironmentInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, NetworkToEnvironmentTable, NetworkToEnvironmentColumn), + ) +} +func newNetworkToHostDependencyStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(NetworkToHostDependencyInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, NetworkToHostDependencyTable, NetworkToHostDependencyColumn), + ) +} +func newNetworkToIncludedNetworkStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(NetworkToIncludedNetworkInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, NetworkToIncludedNetworkTable, NetworkToIncludedNetworkColumn), + ) +} diff --git a/ent/network/where.go b/ent/network/where.go index 3bacd591..c4d9a094 100755 --- a/ent/network/where.go +++ b/ent/network/where.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package network @@ -11,460 +11,272 @@ import ( // ID filters vertices based on their ID field. func ID(id uuid.UUID) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Network(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id uuid.UUID) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Network(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id uuid.UUID) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.Network(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...uuid.UUID) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.Network(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...uuid.UUID) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.Network(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id uuid.UUID) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.Network(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id uuid.UUID) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.Network(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id uuid.UUID) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.Network(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id uuid.UUID) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.Network(sql.FieldLTE(FieldID, id)) } -// HclID applies equality check predicate on the "hcl_id" field. It's identical to HclIDEQ. -func HclID(v string) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldHclID), v)) - }) +// HCLID applies equality check predicate on the "hcl_id" field. It's identical to HCLIDEQ. +func HCLID(v string) predicate.Network { + return predicate.Network(sql.FieldEQ(FieldHCLID, v)) } // Name applies equality check predicate on the "name" field. It's identical to NameEQ. func Name(v string) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldName), v)) - }) + return predicate.Network(sql.FieldEQ(FieldName, v)) } // Cidr applies equality check predicate on the "cidr" field. It's identical to CidrEQ. func Cidr(v string) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCidr), v)) - }) + return predicate.Network(sql.FieldEQ(FieldCidr, v)) } // VdiVisible applies equality check predicate on the "vdi_visible" field. It's identical to VdiVisibleEQ. func VdiVisible(v bool) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldVdiVisible), v)) - }) + return predicate.Network(sql.FieldEQ(FieldVdiVisible, v)) } -// HclIDEQ applies the EQ predicate on the "hcl_id" field. -func HclIDEQ(v string) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldHclID), v)) - }) +// HCLIDEQ applies the EQ predicate on the "hcl_id" field. +func HCLIDEQ(v string) predicate.Network { + return predicate.Network(sql.FieldEQ(FieldHCLID, v)) } -// HclIDNEQ applies the NEQ predicate on the "hcl_id" field. -func HclIDNEQ(v string) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldHclID), v)) - }) +// HCLIDNEQ applies the NEQ predicate on the "hcl_id" field. +func HCLIDNEQ(v string) predicate.Network { + return predicate.Network(sql.FieldNEQ(FieldHCLID, v)) } -// HclIDIn applies the In predicate on the "hcl_id" field. -func HclIDIn(vs ...string) predicate.Network { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Network(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldHclID), v...)) - }) +// HCLIDIn applies the In predicate on the "hcl_id" field. +func HCLIDIn(vs ...string) predicate.Network { + return predicate.Network(sql.FieldIn(FieldHCLID, vs...)) } -// HclIDNotIn applies the NotIn predicate on the "hcl_id" field. -func HclIDNotIn(vs ...string) predicate.Network { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Network(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldHclID), v...)) - }) +// HCLIDNotIn applies the NotIn predicate on the "hcl_id" field. +func HCLIDNotIn(vs ...string) predicate.Network { + return predicate.Network(sql.FieldNotIn(FieldHCLID, vs...)) } -// HclIDGT applies the GT predicate on the "hcl_id" field. -func HclIDGT(v string) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldHclID), v)) - }) +// HCLIDGT applies the GT predicate on the "hcl_id" field. +func HCLIDGT(v string) predicate.Network { + return predicate.Network(sql.FieldGT(FieldHCLID, v)) } -// HclIDGTE applies the GTE predicate on the "hcl_id" field. -func HclIDGTE(v string) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldHclID), v)) - }) +// HCLIDGTE applies the GTE predicate on the "hcl_id" field. +func HCLIDGTE(v string) predicate.Network { + return predicate.Network(sql.FieldGTE(FieldHCLID, v)) } -// HclIDLT applies the LT predicate on the "hcl_id" field. -func HclIDLT(v string) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldHclID), v)) - }) +// HCLIDLT applies the LT predicate on the "hcl_id" field. +func HCLIDLT(v string) predicate.Network { + return predicate.Network(sql.FieldLT(FieldHCLID, v)) } -// HclIDLTE applies the LTE predicate on the "hcl_id" field. -func HclIDLTE(v string) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldHclID), v)) - }) +// HCLIDLTE applies the LTE predicate on the "hcl_id" field. +func HCLIDLTE(v string) predicate.Network { + return predicate.Network(sql.FieldLTE(FieldHCLID, v)) } -// HclIDContains applies the Contains predicate on the "hcl_id" field. -func HclIDContains(v string) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldHclID), v)) - }) +// HCLIDContains applies the Contains predicate on the "hcl_id" field. +func HCLIDContains(v string) predicate.Network { + return predicate.Network(sql.FieldContains(FieldHCLID, v)) } -// HclIDHasPrefix applies the HasPrefix predicate on the "hcl_id" field. -func HclIDHasPrefix(v string) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldHclID), v)) - }) +// HCLIDHasPrefix applies the HasPrefix predicate on the "hcl_id" field. +func HCLIDHasPrefix(v string) predicate.Network { + return predicate.Network(sql.FieldHasPrefix(FieldHCLID, v)) } -// HclIDHasSuffix applies the HasSuffix predicate on the "hcl_id" field. -func HclIDHasSuffix(v string) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldHclID), v)) - }) +// HCLIDHasSuffix applies the HasSuffix predicate on the "hcl_id" field. +func HCLIDHasSuffix(v string) predicate.Network { + return predicate.Network(sql.FieldHasSuffix(FieldHCLID, v)) } -// HclIDEqualFold applies the EqualFold predicate on the "hcl_id" field. -func HclIDEqualFold(v string) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldHclID), v)) - }) +// HCLIDEqualFold applies the EqualFold predicate on the "hcl_id" field. +func HCLIDEqualFold(v string) predicate.Network { + return predicate.Network(sql.FieldEqualFold(FieldHCLID, v)) } -// HclIDContainsFold applies the ContainsFold predicate on the "hcl_id" field. -func HclIDContainsFold(v string) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldHclID), v)) - }) +// HCLIDContainsFold applies the ContainsFold predicate on the "hcl_id" field. +func HCLIDContainsFold(v string) predicate.Network { + return predicate.Network(sql.FieldContainsFold(FieldHCLID, v)) } // NameEQ applies the EQ predicate on the "name" field. func NameEQ(v string) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldName), v)) - }) + return predicate.Network(sql.FieldEQ(FieldName, v)) } // NameNEQ applies the NEQ predicate on the "name" field. func NameNEQ(v string) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldName), v)) - }) + return predicate.Network(sql.FieldNEQ(FieldName, v)) } // NameIn applies the In predicate on the "name" field. func NameIn(vs ...string) predicate.Network { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Network(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldName), v...)) - }) + return predicate.Network(sql.FieldIn(FieldName, vs...)) } // NameNotIn applies the NotIn predicate on the "name" field. func NameNotIn(vs ...string) predicate.Network { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Network(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldName), v...)) - }) + return predicate.Network(sql.FieldNotIn(FieldName, vs...)) } // NameGT applies the GT predicate on the "name" field. func NameGT(v string) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldName), v)) - }) + return predicate.Network(sql.FieldGT(FieldName, v)) } // NameGTE applies the GTE predicate on the "name" field. func NameGTE(v string) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldName), v)) - }) + return predicate.Network(sql.FieldGTE(FieldName, v)) } // NameLT applies the LT predicate on the "name" field. func NameLT(v string) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldName), v)) - }) + return predicate.Network(sql.FieldLT(FieldName, v)) } // NameLTE applies the LTE predicate on the "name" field. func NameLTE(v string) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldName), v)) - }) + return predicate.Network(sql.FieldLTE(FieldName, v)) } // NameContains applies the Contains predicate on the "name" field. func NameContains(v string) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldName), v)) - }) + return predicate.Network(sql.FieldContains(FieldName, v)) } // NameHasPrefix applies the HasPrefix predicate on the "name" field. func NameHasPrefix(v string) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldName), v)) - }) + return predicate.Network(sql.FieldHasPrefix(FieldName, v)) } // NameHasSuffix applies the HasSuffix predicate on the "name" field. func NameHasSuffix(v string) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldName), v)) - }) + return predicate.Network(sql.FieldHasSuffix(FieldName, v)) } // NameEqualFold applies the EqualFold predicate on the "name" field. func NameEqualFold(v string) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldName), v)) - }) + return predicate.Network(sql.FieldEqualFold(FieldName, v)) } // NameContainsFold applies the ContainsFold predicate on the "name" field. func NameContainsFold(v string) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldName), v)) - }) + return predicate.Network(sql.FieldContainsFold(FieldName, v)) } // CidrEQ applies the EQ predicate on the "cidr" field. func CidrEQ(v string) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCidr), v)) - }) + return predicate.Network(sql.FieldEQ(FieldCidr, v)) } // CidrNEQ applies the NEQ predicate on the "cidr" field. func CidrNEQ(v string) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldCidr), v)) - }) + return predicate.Network(sql.FieldNEQ(FieldCidr, v)) } // CidrIn applies the In predicate on the "cidr" field. func CidrIn(vs ...string) predicate.Network { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Network(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldCidr), v...)) - }) + return predicate.Network(sql.FieldIn(FieldCidr, vs...)) } // CidrNotIn applies the NotIn predicate on the "cidr" field. func CidrNotIn(vs ...string) predicate.Network { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Network(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldCidr), v...)) - }) + return predicate.Network(sql.FieldNotIn(FieldCidr, vs...)) } // CidrGT applies the GT predicate on the "cidr" field. func CidrGT(v string) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldCidr), v)) - }) + return predicate.Network(sql.FieldGT(FieldCidr, v)) } // CidrGTE applies the GTE predicate on the "cidr" field. func CidrGTE(v string) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldCidr), v)) - }) + return predicate.Network(sql.FieldGTE(FieldCidr, v)) } // CidrLT applies the LT predicate on the "cidr" field. func CidrLT(v string) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldCidr), v)) - }) + return predicate.Network(sql.FieldLT(FieldCidr, v)) } // CidrLTE applies the LTE predicate on the "cidr" field. func CidrLTE(v string) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldCidr), v)) - }) + return predicate.Network(sql.FieldLTE(FieldCidr, v)) } // CidrContains applies the Contains predicate on the "cidr" field. func CidrContains(v string) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldCidr), v)) - }) + return predicate.Network(sql.FieldContains(FieldCidr, v)) } // CidrHasPrefix applies the HasPrefix predicate on the "cidr" field. func CidrHasPrefix(v string) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldCidr), v)) - }) + return predicate.Network(sql.FieldHasPrefix(FieldCidr, v)) } // CidrHasSuffix applies the HasSuffix predicate on the "cidr" field. func CidrHasSuffix(v string) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldCidr), v)) - }) + return predicate.Network(sql.FieldHasSuffix(FieldCidr, v)) } // CidrEqualFold applies the EqualFold predicate on the "cidr" field. func CidrEqualFold(v string) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldCidr), v)) - }) + return predicate.Network(sql.FieldEqualFold(FieldCidr, v)) } // CidrContainsFold applies the ContainsFold predicate on the "cidr" field. func CidrContainsFold(v string) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldCidr), v)) - }) + return predicate.Network(sql.FieldContainsFold(FieldCidr, v)) } // VdiVisibleEQ applies the EQ predicate on the "vdi_visible" field. func VdiVisibleEQ(v bool) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldVdiVisible), v)) - }) + return predicate.Network(sql.FieldEQ(FieldVdiVisible, v)) } // VdiVisibleNEQ applies the NEQ predicate on the "vdi_visible" field. func VdiVisibleNEQ(v bool) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldVdiVisible), v)) - }) + return predicate.Network(sql.FieldNEQ(FieldVdiVisible, v)) } // HasNetworkToEnvironment applies the HasEdge predicate on the "NetworkToEnvironment" edge. @@ -472,7 +284,6 @@ func HasNetworkToEnvironment() predicate.Network { return predicate.Network(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(NetworkToEnvironmentTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, true, NetworkToEnvironmentTable, NetworkToEnvironmentColumn), ) sqlgraph.HasNeighbors(s, step) @@ -482,11 +293,7 @@ func HasNetworkToEnvironment() predicate.Network { // HasNetworkToEnvironmentWith applies the HasEdge predicate on the "NetworkToEnvironment" edge with a given conditions (other predicates). func HasNetworkToEnvironmentWith(preds ...predicate.Environment) predicate.Network { return predicate.Network(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(NetworkToEnvironmentInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, NetworkToEnvironmentTable, NetworkToEnvironmentColumn), - ) + step := newNetworkToEnvironmentStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -500,7 +307,6 @@ func HasNetworkToHostDependency() predicate.Network { return predicate.Network(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(NetworkToHostDependencyTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, true, NetworkToHostDependencyTable, NetworkToHostDependencyColumn), ) sqlgraph.HasNeighbors(s, step) @@ -510,11 +316,7 @@ func HasNetworkToHostDependency() predicate.Network { // HasNetworkToHostDependencyWith applies the HasEdge predicate on the "NetworkToHostDependency" edge with a given conditions (other predicates). func HasNetworkToHostDependencyWith(preds ...predicate.HostDependency) predicate.Network { return predicate.Network(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(NetworkToHostDependencyInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, true, NetworkToHostDependencyTable, NetworkToHostDependencyColumn), - ) + step := newNetworkToHostDependencyStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -528,7 +330,6 @@ func HasNetworkToIncludedNetwork() predicate.Network { return predicate.Network(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(NetworkToIncludedNetworkTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, true, NetworkToIncludedNetworkTable, NetworkToIncludedNetworkColumn), ) sqlgraph.HasNeighbors(s, step) @@ -538,11 +339,7 @@ func HasNetworkToIncludedNetwork() predicate.Network { // HasNetworkToIncludedNetworkWith applies the HasEdge predicate on the "NetworkToIncludedNetwork" edge with a given conditions (other predicates). func HasNetworkToIncludedNetworkWith(preds ...predicate.IncludedNetwork) predicate.Network { return predicate.Network(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(NetworkToIncludedNetworkInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, true, NetworkToIncludedNetworkTable, NetworkToIncludedNetworkColumn), - ) + step := newNetworkToIncludedNetworkStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -553,32 +350,15 @@ func HasNetworkToIncludedNetworkWith(preds ...predicate.IncludedNetwork) predica // And groups predicates with the AND operator between them. func And(predicates ...predicate.Network) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Network(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.Network) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Network(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.Network) predicate.Network { - return predicate.Network(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.Network(sql.NotPredicates(p)) } diff --git a/ent/network_create.go b/ent/network_create.go index af73b5d7..028a7aab 100755 --- a/ent/network_create.go +++ b/ent/network_create.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -23,9 +23,9 @@ type NetworkCreate struct { hooks []Hook } -// SetHclID sets the "hcl_id" field. -func (nc *NetworkCreate) SetHclID(s string) *NetworkCreate { - nc.mutation.SetHclID(s) +// SetHCLID sets the "hcl_id" field. +func (nc *NetworkCreate) SetHCLID(s string) *NetworkCreate { + nc.mutation.SetHCLID(s) return nc } @@ -129,44 +129,8 @@ func (nc *NetworkCreate) Mutation() *NetworkMutation { // Save creates the Network in the database. func (nc *NetworkCreate) Save(ctx context.Context) (*Network, error) { - var ( - err error - node *Network - ) nc.defaults() - if len(nc.hooks) == 0 { - if err = nc.check(); err != nil { - return nil, err - } - node, err = nc.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*NetworkMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = nc.check(); err != nil { - return nil, err - } - nc.mutation = mutation - if node, err = nc.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(nc.hooks) - 1; i >= 0; i-- { - if nc.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = nc.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, nc.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, nc.sqlSave, nc.mutation, nc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -201,7 +165,7 @@ func (nc *NetworkCreate) defaults() { // check runs all checks and user-defined validators on the builder. func (nc *NetworkCreate) check() error { - if _, ok := nc.mutation.HclID(); !ok { + if _, ok := nc.mutation.HCLID(); !ok { return &ValidationError{Name: "hcl_id", err: errors.New(`ent: missing required field "Network.hcl_id"`)} } if _, ok := nc.mutation.Name(); !ok { @@ -223,10 +187,13 @@ func (nc *NetworkCreate) check() error { } func (nc *NetworkCreate) sqlSave(ctx context.Context) (*Network, error) { + if err := nc.check(); err != nil { + return nil, err + } _node, _spec := nc.createSpec() if err := sqlgraph.CreateNode(ctx, nc.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } @@ -237,70 +204,42 @@ func (nc *NetworkCreate) sqlSave(ctx context.Context) (*Network, error) { return nil, err } } + nc.mutation.id = &_node.ID + nc.mutation.done = true return _node, nil } func (nc *NetworkCreate) createSpec() (*Network, *sqlgraph.CreateSpec) { var ( _node = &Network{config: nc.config} - _spec = &sqlgraph.CreateSpec{ - Table: network.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: network.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(network.Table, sqlgraph.NewFieldSpec(network.FieldID, field.TypeUUID)) ) if id, ok := nc.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } - if value, ok := nc.mutation.HclID(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: network.FieldHclID, - }) - _node.HclID = value + if value, ok := nc.mutation.HCLID(); ok { + _spec.SetField(network.FieldHCLID, field.TypeString, value) + _node.HCLID = value } if value, ok := nc.mutation.Name(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: network.FieldName, - }) + _spec.SetField(network.FieldName, field.TypeString, value) _node.Name = value } if value, ok := nc.mutation.Cidr(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: network.FieldCidr, - }) + _spec.SetField(network.FieldCidr, field.TypeString, value) _node.Cidr = value } if value, ok := nc.mutation.VdiVisible(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: network.FieldVdiVisible, - }) + _spec.SetField(network.FieldVdiVisible, field.TypeBool, value) _node.VdiVisible = value } if value, ok := nc.mutation.Vars(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: network.FieldVars, - }) + _spec.SetField(network.FieldVars, field.TypeJSON, value) _node.Vars = value } if value, ok := nc.mutation.Tags(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: network.FieldTags, - }) + _spec.SetField(network.FieldTags, field.TypeJSON, value) _node.Tags = value } if nodes := nc.mutation.NetworkToEnvironmentIDs(); len(nodes) > 0 { @@ -311,10 +250,7 @@ func (nc *NetworkCreate) createSpec() (*Network, *sqlgraph.CreateSpec) { Columns: []string{network.NetworkToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -331,10 +267,7 @@ func (nc *NetworkCreate) createSpec() (*Network, *sqlgraph.CreateSpec) { Columns: []string{network.NetworkToHostDependencyColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: hostdependency.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(hostdependency.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -350,10 +283,7 @@ func (nc *NetworkCreate) createSpec() (*Network, *sqlgraph.CreateSpec) { Columns: []string{network.NetworkToIncludedNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: includednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(includednetwork.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -367,11 +297,15 @@ func (nc *NetworkCreate) createSpec() (*Network, *sqlgraph.CreateSpec) { // NetworkCreateBulk is the builder for creating many Network entities in bulk. type NetworkCreateBulk struct { config + err error builders []*NetworkCreate } // Save creates the Network entities in the database. func (ncb *NetworkCreateBulk) Save(ctx context.Context) ([]*Network, error) { + if ncb.err != nil { + return nil, ncb.err + } specs := make([]*sqlgraph.CreateSpec, len(ncb.builders)) nodes := make([]*Network, len(ncb.builders)) mutators := make([]Mutator, len(ncb.builders)) @@ -388,8 +322,8 @@ func (ncb *NetworkCreateBulk) Save(ctx context.Context) ([]*Network, error) { return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, ncb.builders[i+1].mutation) } else { @@ -397,7 +331,7 @@ func (ncb *NetworkCreateBulk) Save(ctx context.Context) ([]*Network, error) { // Invoke the actual operation on the latest mutation in the chain. if err = sqlgraph.BatchCreate(ctx, ncb.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } } } diff --git a/ent/network_delete.go b/ent/network_delete.go index 9a988dbb..ce837e1d 100755 --- a/ent/network_delete.go +++ b/ent/network_delete.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (nd *NetworkDelete) Where(ps ...predicate.Network) *NetworkDelete { // Exec executes the deletion query and returns how many vertices were deleted. func (nd *NetworkDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(nd.hooks) == 0 { - affected, err = nd.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*NetworkMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - nd.mutation = mutation - affected, err = nd.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(nd.hooks) - 1; i >= 0; i-- { - if nd.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = nd.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, nd.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, nd.sqlExec, nd.mutation, nd.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (nd *NetworkDelete) ExecX(ctx context.Context) int { } func (nd *NetworkDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: network.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: network.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(network.Table, sqlgraph.NewFieldSpec(network.FieldID, field.TypeUUID)) if ps := nd.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -84,7 +48,12 @@ func (nd *NetworkDelete) sqlExec(ctx context.Context) (int, error) { } } } - return sqlgraph.DeleteNodes(ctx, nd.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, nd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + nd.mutation.done = true + return affected, err } // NetworkDeleteOne is the builder for deleting a single Network entity. @@ -92,6 +61,12 @@ type NetworkDeleteOne struct { nd *NetworkDelete } +// Where appends a list predicates to the NetworkDelete builder. +func (ndo *NetworkDeleteOne) Where(ps ...predicate.Network) *NetworkDeleteOne { + ndo.nd.mutation.Where(ps...) + return ndo +} + // Exec executes the deletion query. func (ndo *NetworkDeleteOne) Exec(ctx context.Context) error { n, err := ndo.nd.Exec(ctx) @@ -107,5 +82,7 @@ func (ndo *NetworkDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (ndo *NetworkDeleteOne) ExecX(ctx context.Context) { - ndo.nd.ExecX(ctx) + if err := ndo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/ent/network_query.go b/ent/network_query.go index 536ae70c..86ebbc31 100755 --- a/ent/network_query.go +++ b/ent/network_query.go @@ -1,11 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" "database/sql/driver" - "errors" "fmt" "math" @@ -23,17 +22,18 @@ import ( // NetworkQuery is the builder for querying Network entities. type NetworkQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string - predicates []predicate.Network - // eager-loading edges. - withNetworkToEnvironment *EnvironmentQuery - withNetworkToHostDependency *HostDependencyQuery - withNetworkToIncludedNetwork *IncludedNetworkQuery - withFKs bool + ctx *QueryContext + order []network.OrderOption + inters []Interceptor + predicates []predicate.Network + withNetworkToEnvironment *EnvironmentQuery + withNetworkToHostDependency *HostDependencyQuery + withNetworkToIncludedNetwork *IncludedNetworkQuery + withFKs bool + modifiers []func(*sql.Selector) + loadTotal []func(context.Context, []*Network) error + withNamedNetworkToHostDependency map[string]*HostDependencyQuery + withNamedNetworkToIncludedNetwork map[string]*IncludedNetworkQuery // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -45,34 +45,34 @@ func (nq *NetworkQuery) Where(ps ...predicate.Network) *NetworkQuery { return nq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (nq *NetworkQuery) Limit(limit int) *NetworkQuery { - nq.limit = &limit + nq.ctx.Limit = &limit return nq } -// Offset adds an offset step to the query. +// Offset to start from. func (nq *NetworkQuery) Offset(offset int) *NetworkQuery { - nq.offset = &offset + nq.ctx.Offset = &offset return nq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (nq *NetworkQuery) Unique(unique bool) *NetworkQuery { - nq.unique = &unique + nq.ctx.Unique = &unique return nq } -// Order adds an order step to the query. -func (nq *NetworkQuery) Order(o ...OrderFunc) *NetworkQuery { +// Order specifies how the records should be ordered. +func (nq *NetworkQuery) Order(o ...network.OrderOption) *NetworkQuery { nq.order = append(nq.order, o...) return nq } // QueryNetworkToEnvironment chains the current query on the "NetworkToEnvironment" edge. func (nq *NetworkQuery) QueryNetworkToEnvironment() *EnvironmentQuery { - query := &EnvironmentQuery{config: nq.config} + query := (&EnvironmentClient{config: nq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := nq.prepareQuery(ctx); err != nil { return nil, err @@ -94,7 +94,7 @@ func (nq *NetworkQuery) QueryNetworkToEnvironment() *EnvironmentQuery { // QueryNetworkToHostDependency chains the current query on the "NetworkToHostDependency" edge. func (nq *NetworkQuery) QueryNetworkToHostDependency() *HostDependencyQuery { - query := &HostDependencyQuery{config: nq.config} + query := (&HostDependencyClient{config: nq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := nq.prepareQuery(ctx); err != nil { return nil, err @@ -116,7 +116,7 @@ func (nq *NetworkQuery) QueryNetworkToHostDependency() *HostDependencyQuery { // QueryNetworkToIncludedNetwork chains the current query on the "NetworkToIncludedNetwork" edge. func (nq *NetworkQuery) QueryNetworkToIncludedNetwork() *IncludedNetworkQuery { - query := &IncludedNetworkQuery{config: nq.config} + query := (&IncludedNetworkClient{config: nq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := nq.prepareQuery(ctx); err != nil { return nil, err @@ -139,7 +139,7 @@ func (nq *NetworkQuery) QueryNetworkToIncludedNetwork() *IncludedNetworkQuery { // First returns the first Network entity from the query. // Returns a *NotFoundError when no Network was found. func (nq *NetworkQuery) First(ctx context.Context) (*Network, error) { - nodes, err := nq.Limit(1).All(ctx) + nodes, err := nq.Limit(1).All(setContextOp(ctx, nq.ctx, "First")) if err != nil { return nil, err } @@ -162,7 +162,7 @@ func (nq *NetworkQuery) FirstX(ctx context.Context) *Network { // Returns a *NotFoundError when no Network ID was found. func (nq *NetworkQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = nq.Limit(1).IDs(ctx); err != nil { + if ids, err = nq.Limit(1).IDs(setContextOp(ctx, nq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -185,7 +185,7 @@ func (nq *NetworkQuery) FirstIDX(ctx context.Context) uuid.UUID { // Returns a *NotSingularError when more than one Network entity is found. // Returns a *NotFoundError when no Network entities are found. func (nq *NetworkQuery) Only(ctx context.Context) (*Network, error) { - nodes, err := nq.Limit(2).All(ctx) + nodes, err := nq.Limit(2).All(setContextOp(ctx, nq.ctx, "Only")) if err != nil { return nil, err } @@ -213,7 +213,7 @@ func (nq *NetworkQuery) OnlyX(ctx context.Context) *Network { // Returns a *NotFoundError when no entities are found. func (nq *NetworkQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = nq.Limit(2).IDs(ctx); err != nil { + if ids, err = nq.Limit(2).IDs(setContextOp(ctx, nq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -238,10 +238,12 @@ func (nq *NetworkQuery) OnlyIDX(ctx context.Context) uuid.UUID { // All executes the query and returns a list of Networks. func (nq *NetworkQuery) All(ctx context.Context) ([]*Network, error) { + ctx = setContextOp(ctx, nq.ctx, "All") if err := nq.prepareQuery(ctx); err != nil { return nil, err } - return nq.sqlAll(ctx) + qr := querierAll[[]*Network, *NetworkQuery]() + return withInterceptors[[]*Network](ctx, nq, qr, nq.inters) } // AllX is like All, but panics if an error occurs. @@ -254,9 +256,12 @@ func (nq *NetworkQuery) AllX(ctx context.Context) []*Network { } // IDs executes the query and returns a list of Network IDs. -func (nq *NetworkQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { - var ids []uuid.UUID - if err := nq.Select(network.FieldID).Scan(ctx, &ids); err != nil { +func (nq *NetworkQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if nq.ctx.Unique == nil && nq.path != nil { + nq.Unique(true) + } + ctx = setContextOp(ctx, nq.ctx, "IDs") + if err = nq.Select(network.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -273,10 +278,11 @@ func (nq *NetworkQuery) IDsX(ctx context.Context) []uuid.UUID { // Count returns the count of the given query. func (nq *NetworkQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, nq.ctx, "Count") if err := nq.prepareQuery(ctx); err != nil { return 0, err } - return nq.sqlCount(ctx) + return withInterceptors[int](ctx, nq, querierCount[*NetworkQuery](), nq.inters) } // CountX is like Count, but panics if an error occurs. @@ -290,10 +296,15 @@ func (nq *NetworkQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (nq *NetworkQuery) Exist(ctx context.Context) (bool, error) { - if err := nq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, nq.ctx, "Exist") + switch _, err := nq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return nq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -313,24 +324,23 @@ func (nq *NetworkQuery) Clone() *NetworkQuery { } return &NetworkQuery{ config: nq.config, - limit: nq.limit, - offset: nq.offset, - order: append([]OrderFunc{}, nq.order...), + ctx: nq.ctx.Clone(), + order: append([]network.OrderOption{}, nq.order...), + inters: append([]Interceptor{}, nq.inters...), predicates: append([]predicate.Network{}, nq.predicates...), withNetworkToEnvironment: nq.withNetworkToEnvironment.Clone(), withNetworkToHostDependency: nq.withNetworkToHostDependency.Clone(), withNetworkToIncludedNetwork: nq.withNetworkToIncludedNetwork.Clone(), // clone intermediate query. - sql: nq.sql.Clone(), - path: nq.path, - unique: nq.unique, + sql: nq.sql.Clone(), + path: nq.path, } } // WithNetworkToEnvironment tells the query-builder to eager-load the nodes that are connected to // the "NetworkToEnvironment" edge. The optional arguments are used to configure the query builder of the edge. func (nq *NetworkQuery) WithNetworkToEnvironment(opts ...func(*EnvironmentQuery)) *NetworkQuery { - query := &EnvironmentQuery{config: nq.config} + query := (&EnvironmentClient{config: nq.config}).Query() for _, opt := range opts { opt(query) } @@ -341,7 +351,7 @@ func (nq *NetworkQuery) WithNetworkToEnvironment(opts ...func(*EnvironmentQuery) // WithNetworkToHostDependency tells the query-builder to eager-load the nodes that are connected to // the "NetworkToHostDependency" edge. The optional arguments are used to configure the query builder of the edge. func (nq *NetworkQuery) WithNetworkToHostDependency(opts ...func(*HostDependencyQuery)) *NetworkQuery { - query := &HostDependencyQuery{config: nq.config} + query := (&HostDependencyClient{config: nq.config}).Query() for _, opt := range opts { opt(query) } @@ -352,7 +362,7 @@ func (nq *NetworkQuery) WithNetworkToHostDependency(opts ...func(*HostDependency // WithNetworkToIncludedNetwork tells the query-builder to eager-load the nodes that are connected to // the "NetworkToIncludedNetwork" edge. The optional arguments are used to configure the query builder of the edge. func (nq *NetworkQuery) WithNetworkToIncludedNetwork(opts ...func(*IncludedNetworkQuery)) *NetworkQuery { - query := &IncludedNetworkQuery{config: nq.config} + query := (&IncludedNetworkClient{config: nq.config}).Query() for _, opt := range opts { opt(query) } @@ -366,25 +376,21 @@ func (nq *NetworkQuery) WithNetworkToIncludedNetwork(opts ...func(*IncludedNetwo // Example: // // var v []struct { -// HclID string `json:"hcl_id,omitempty" hcl:"id,label"` +// HCLID string `json:"hcl_id,omitempty" hcl:"id,label"` // Count int `json:"count,omitempty"` // } // // client.Network.Query(). -// GroupBy(network.FieldHclID). +// GroupBy(network.FieldHCLID). // Aggregate(ent.Count()). // Scan(ctx, &v) -// func (nq *NetworkQuery) GroupBy(field string, fields ...string) *NetworkGroupBy { - group := &NetworkGroupBy{config: nq.config} - group.fields = append([]string{field}, fields...) - group.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := nq.prepareQuery(ctx); err != nil { - return nil, err - } - return nq.sqlQuery(ctx), nil - } - return group + nq.ctx.Fields = append([]string{field}, fields...) + grbuild := &NetworkGroupBy{build: nq} + grbuild.flds = &nq.ctx.Fields + grbuild.label = network.Label + grbuild.scan = grbuild.Scan + return grbuild } // Select allows the selection one or more fields/columns for the given query, @@ -393,20 +399,37 @@ func (nq *NetworkQuery) GroupBy(field string, fields ...string) *NetworkGroupBy // Example: // // var v []struct { -// HclID string `json:"hcl_id,omitempty" hcl:"id,label"` +// HCLID string `json:"hcl_id,omitempty" hcl:"id,label"` // } // // client.Network.Query(). -// Select(network.FieldHclID). +// Select(network.FieldHCLID). // Scan(ctx, &v) -// func (nq *NetworkQuery) Select(fields ...string) *NetworkSelect { - nq.fields = append(nq.fields, fields...) - return &NetworkSelect{NetworkQuery: nq} + nq.ctx.Fields = append(nq.ctx.Fields, fields...) + sbuild := &NetworkSelect{NetworkQuery: nq} + sbuild.label = network.Label + sbuild.flds, sbuild.scan = &nq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a NetworkSelect configured with the given aggregations. +func (nq *NetworkQuery) Aggregate(fns ...AggregateFunc) *NetworkSelect { + return nq.Select().Aggregate(fns...) } func (nq *NetworkQuery) prepareQuery(ctx context.Context) error { - for _, f := range nq.fields { + for _, inter := range nq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, nq); err != nil { + return err + } + } + } + for _, f := range nq.ctx.Fields { if !network.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -421,7 +444,7 @@ func (nq *NetworkQuery) prepareQuery(ctx context.Context) error { return nil } -func (nq *NetworkQuery) sqlAll(ctx context.Context) ([]*Network, error) { +func (nq *NetworkQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Network, error) { var ( nodes = []*Network{} withFKs = nq.withFKs @@ -438,150 +461,189 @@ func (nq *NetworkQuery) sqlAll(ctx context.Context) ([]*Network, error) { if withFKs { _spec.Node.Columns = append(_spec.Node.Columns, network.ForeignKeys...) } - _spec.ScanValues = func(columns []string) ([]interface{}, error) { + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Network).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { node := &Network{config: nq.config} nodes = append(nodes, node) - return node.scanValues(columns) - } - _spec.Assign = func(columns []string, values []interface{}) error { - if len(nodes) == 0 { - return fmt.Errorf("ent: Assign called without calling ScanValues") - } - node := nodes[len(nodes)-1] node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(nq.modifiers) > 0 { + _spec.Modifiers = nq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } if err := sqlgraph.QueryNodes(ctx, nq.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := nq.withNetworkToEnvironment; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*Network) - for i := range nodes { - if nodes[i].environment_environment_to_network == nil { - continue - } - fk := *nodes[i].environment_environment_to_network - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + if err := nq.loadNetworkToEnvironment(ctx, query, nodes, nil, + func(n *Network, e *Environment) { n.Edges.NetworkToEnvironment = e }); err != nil { + return nil, err } - query.Where(environment.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + } + if query := nq.withNetworkToHostDependency; query != nil { + if err := nq.loadNetworkToHostDependency(ctx, query, nodes, + func(n *Network) { n.Edges.NetworkToHostDependency = []*HostDependency{} }, + func(n *Network, e *HostDependency) { + n.Edges.NetworkToHostDependency = append(n.Edges.NetworkToHostDependency, e) + }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "environment_environment_to_network" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.NetworkToEnvironment = n - } + } + if query := nq.withNetworkToIncludedNetwork; query != nil { + if err := nq.loadNetworkToIncludedNetwork(ctx, query, nodes, + func(n *Network) { n.Edges.NetworkToIncludedNetwork = []*IncludedNetwork{} }, + func(n *Network, e *IncludedNetwork) { + n.Edges.NetworkToIncludedNetwork = append(n.Edges.NetworkToIncludedNetwork, e) + }); err != nil { + return nil, err } } - - if query := nq.withNetworkToHostDependency; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*Network) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.NetworkToHostDependency = []*HostDependency{} + for name, query := range nq.withNamedNetworkToHostDependency { + if err := nq.loadNetworkToHostDependency(ctx, query, nodes, + func(n *Network) { n.appendNamedNetworkToHostDependency(name) }, + func(n *Network, e *HostDependency) { n.appendNamedNetworkToHostDependency(name, e) }); err != nil { + return nil, err } - query.withFKs = true - query.Where(predicate.HostDependency(func(s *sql.Selector) { - s.Where(sql.InValues(network.NetworkToHostDependencyColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { + } + for name, query := range nq.withNamedNetworkToIncludedNetwork { + if err := nq.loadNetworkToIncludedNetwork(ctx, query, nodes, + func(n *Network) { n.appendNamedNetworkToIncludedNetwork(name) }, + func(n *Network, e *IncludedNetwork) { n.appendNamedNetworkToIncludedNetwork(name, e) }); err != nil { return nil, err } - for _, n := range neighbors { - fk := n.host_dependency_host_dependency_to_network - if fk == nil { - return nil, fmt.Errorf(`foreign-key "host_dependency_host_dependency_to_network" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "host_dependency_host_dependency_to_network" returned %v for node %v`, *fk, n.ID) - } - node.Edges.NetworkToHostDependency = append(node.Edges.NetworkToHostDependency, n) + } + for i := range nq.loadTotal { + if err := nq.loadTotal[i](ctx, nodes); err != nil { + return nil, err } } + return nodes, nil +} - if query := nq.withNetworkToIncludedNetwork; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*Network) +func (nq *NetworkQuery) loadNetworkToEnvironment(ctx context.Context, query *EnvironmentQuery, nodes []*Network, init func(*Network), assign func(*Network, *Environment)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*Network) + for i := range nodes { + if nodes[i].environment_environment_to_network == nil { + continue + } + fk := *nodes[i].environment_environment_to_network + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(environment.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "environment_environment_to_network" returned %v`, n.ID) + } for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.NetworkToIncludedNetwork = []*IncludedNetwork{} + assign(nodes[i], n) } - query.withFKs = true - query.Where(predicate.IncludedNetwork(func(s *sql.Selector) { - s.Where(sql.InValues(network.NetworkToIncludedNetworkColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + } + return nil +} +func (nq *NetworkQuery) loadNetworkToHostDependency(ctx context.Context, query *HostDependencyQuery, nodes []*Network, init func(*Network), assign func(*Network, *HostDependency)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Network) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) } - for _, n := range neighbors { - fk := n.included_network_included_network_to_network - if fk == nil { - return nil, fmt.Errorf(`foreign-key "included_network_included_network_to_network" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "included_network_included_network_to_network" returned %v for node %v`, *fk, n.ID) - } - node.Edges.NetworkToIncludedNetwork = append(node.Edges.NetworkToIncludedNetwork, n) + } + query.withFKs = true + query.Where(predicate.HostDependency(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(network.NetworkToHostDependencyColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.host_dependency_host_dependency_to_network + if fk == nil { + return fmt.Errorf(`foreign-key "host_dependency_host_dependency_to_network" is nil for node %v`, n.ID) } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "host_dependency_host_dependency_to_network" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) } - - return nodes, nil + return nil +} +func (nq *NetworkQuery) loadNetworkToIncludedNetwork(ctx context.Context, query *IncludedNetworkQuery, nodes []*Network, init func(*Network), assign func(*Network, *IncludedNetwork)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Network) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.IncludedNetwork(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(network.NetworkToIncludedNetworkColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.included_network_included_network_to_network + if fk == nil { + return fmt.Errorf(`foreign-key "included_network_included_network_to_network" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "included_network_included_network_to_network" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil } func (nq *NetworkQuery) sqlCount(ctx context.Context) (int, error) { _spec := nq.querySpec() - _spec.Node.Columns = nq.fields - if len(nq.fields) > 0 { - _spec.Unique = nq.unique != nil && *nq.unique + if len(nq.modifiers) > 0 { + _spec.Modifiers = nq.modifiers } - return sqlgraph.CountNodes(ctx, nq.driver, _spec) -} - -func (nq *NetworkQuery) sqlExist(ctx context.Context) (bool, error) { - n, err := nq.sqlCount(ctx) - if err != nil { - return false, fmt.Errorf("ent: check existence: %w", err) + _spec.Node.Columns = nq.ctx.Fields + if len(nq.ctx.Fields) > 0 { + _spec.Unique = nq.ctx.Unique != nil && *nq.ctx.Unique } - return n > 0, nil + return sqlgraph.CountNodes(ctx, nq.driver, _spec) } func (nq *NetworkQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: network.Table, - Columns: network.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: network.FieldID, - }, - }, - From: nq.sql, - Unique: true, - } - if unique := nq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(network.Table, network.Columns, sqlgraph.NewFieldSpec(network.FieldID, field.TypeUUID)) + _spec.From = nq.sql + if unique := nq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if nq.path != nil { + _spec.Unique = true } - if fields := nq.fields; len(fields) > 0 { + if fields := nq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, network.FieldID) for i := range fields { @@ -597,10 +659,10 @@ func (nq *NetworkQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := nq.limit; limit != nil { + if limit := nq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := nq.offset; offset != nil { + if offset := nq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := nq.order; len(ps) > 0 { @@ -616,7 +678,7 @@ func (nq *NetworkQuery) querySpec() *sqlgraph.QuerySpec { func (nq *NetworkQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(nq.driver.Dialect()) t1 := builder.Table(network.Table) - columns := nq.fields + columns := nq.ctx.Fields if len(columns) == 0 { columns = network.Columns } @@ -625,7 +687,7 @@ func (nq *NetworkQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = nq.sql selector.Select(selector.Columns(columns...)...) } - if nq.unique != nil && *nq.unique { + if nq.ctx.Unique != nil && *nq.ctx.Unique { selector.Distinct() } for _, p := range nq.predicates { @@ -634,498 +696,128 @@ func (nq *NetworkQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range nq.order { p(selector) } - if offset := nq.offset; offset != nil { + if offset := nq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := nq.limit; limit != nil { + if limit := nq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector } -// NetworkGroupBy is the group-by builder for Network entities. -type NetworkGroupBy struct { - config - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) -} - -// Aggregate adds the given aggregation functions to the group-by query. -func (ngb *NetworkGroupBy) Aggregate(fns ...AggregateFunc) *NetworkGroupBy { - ngb.fns = append(ngb.fns, fns...) - return ngb -} - -// Scan applies the group-by query and scans the result into the given value. -func (ngb *NetworkGroupBy) Scan(ctx context.Context, v interface{}) error { - query, err := ngb.path(ctx) - if err != nil { - return err - } - ngb.sql = query - return ngb.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (ngb *NetworkGroupBy) ScanX(ctx context.Context, v interface{}) { - if err := ngb.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from group-by. -// It is only allowed when executing a group-by query with one field. -func (ngb *NetworkGroupBy) Strings(ctx context.Context) ([]string, error) { - if len(ngb.fields) > 1 { - return nil, errors.New("ent: NetworkGroupBy.Strings is not achievable when grouping more than 1 field") - } - var v []string - if err := ngb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (ngb *NetworkGroupBy) StringsX(ctx context.Context) []string { - v, err := ngb.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (ngb *NetworkGroupBy) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = ngb.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{network.Label} - default: - err = fmt.Errorf("ent: NetworkGroupBy.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (ngb *NetworkGroupBy) StringX(ctx context.Context) string { - v, err := ngb.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from group-by. -// It is only allowed when executing a group-by query with one field. -func (ngb *NetworkGroupBy) Ints(ctx context.Context) ([]int, error) { - if len(ngb.fields) > 1 { - return nil, errors.New("ent: NetworkGroupBy.Ints is not achievable when grouping more than 1 field") - } - var v []int - if err := ngb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (ngb *NetworkGroupBy) IntsX(ctx context.Context) []int { - v, err := ngb.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (ngb *NetworkGroupBy) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = ngb.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{network.Label} - default: - err = fmt.Errorf("ent: NetworkGroupBy.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (ngb *NetworkGroupBy) IntX(ctx context.Context) int { - v, err := ngb.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from group-by. -// It is only allowed when executing a group-by query with one field. -func (ngb *NetworkGroupBy) Float64s(ctx context.Context) ([]float64, error) { - if len(ngb.fields) > 1 { - return nil, errors.New("ent: NetworkGroupBy.Float64s is not achievable when grouping more than 1 field") - } - var v []float64 - if err := ngb.Scan(ctx, &v); err != nil { - return nil, err +// WithNamedNetworkToHostDependency tells the query-builder to eager-load the nodes that are connected to the "NetworkToHostDependency" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (nq *NetworkQuery) WithNamedNetworkToHostDependency(name string, opts ...func(*HostDependencyQuery)) *NetworkQuery { + query := (&HostDependencyClient{config: nq.config}).Query() + for _, opt := range opts { + opt(query) } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (ngb *NetworkGroupBy) Float64sX(ctx context.Context) []float64 { - v, err := ngb.Float64s(ctx) - if err != nil { - panic(err) + if nq.withNamedNetworkToHostDependency == nil { + nq.withNamedNetworkToHostDependency = make(map[string]*HostDependencyQuery) } - return v + nq.withNamedNetworkToHostDependency[name] = query + return nq } -// Float64 returns a single float64 from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (ngb *NetworkGroupBy) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = ngb.Float64s(ctx); err != nil { - return +// WithNamedNetworkToIncludedNetwork tells the query-builder to eager-load the nodes that are connected to the "NetworkToIncludedNetwork" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (nq *NetworkQuery) WithNamedNetworkToIncludedNetwork(name string, opts ...func(*IncludedNetworkQuery)) *NetworkQuery { + query := (&IncludedNetworkClient{config: nq.config}).Query() + for _, opt := range opts { + opt(query) } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{network.Label} - default: - err = fmt.Errorf("ent: NetworkGroupBy.Float64s returned %d results when one was expected", len(v)) + if nq.withNamedNetworkToIncludedNetwork == nil { + nq.withNamedNetworkToIncludedNetwork = make(map[string]*IncludedNetworkQuery) } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (ngb *NetworkGroupBy) Float64X(ctx context.Context) float64 { - v, err := ngb.Float64(ctx) - if err != nil { - panic(err) - } - return v + nq.withNamedNetworkToIncludedNetwork[name] = query + return nq } -// Bools returns list of bools from group-by. -// It is only allowed when executing a group-by query with one field. -func (ngb *NetworkGroupBy) Bools(ctx context.Context) ([]bool, error) { - if len(ngb.fields) > 1 { - return nil, errors.New("ent: NetworkGroupBy.Bools is not achievable when grouping more than 1 field") - } - var v []bool - if err := ngb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil +// NetworkGroupBy is the group-by builder for Network entities. +type NetworkGroupBy struct { + selector + build *NetworkQuery } -// BoolsX is like Bools, but panics if an error occurs. -func (ngb *NetworkGroupBy) BoolsX(ctx context.Context) []bool { - v, err := ngb.Bools(ctx) - if err != nil { - panic(err) - } - return v +// Aggregate adds the given aggregation functions to the group-by query. +func (ngb *NetworkGroupBy) Aggregate(fns ...AggregateFunc) *NetworkGroupBy { + ngb.fns = append(ngb.fns, fns...) + return ngb } -// Bool returns a single bool from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (ngb *NetworkGroupBy) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = ngb.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{network.Label} - default: - err = fmt.Errorf("ent: NetworkGroupBy.Bools returned %d results when one was expected", len(v)) +// Scan applies the selector query and scans the result into the given value. +func (ngb *NetworkGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ngb.build.ctx, "GroupBy") + if err := ngb.build.prepareQuery(ctx); err != nil { + return err } - return + return scanWithInterceptors[*NetworkQuery, *NetworkGroupBy](ctx, ngb.build, ngb, ngb.build.inters, v) } -// BoolX is like Bool, but panics if an error occurs. -func (ngb *NetworkGroupBy) BoolX(ctx context.Context) bool { - v, err := ngb.Bool(ctx) - if err != nil { - panic(err) +func (ngb *NetworkGroupBy) sqlScan(ctx context.Context, root *NetworkQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(ngb.fns)) + for _, fn := range ngb.fns { + aggregation = append(aggregation, fn(selector)) } - return v -} - -func (ngb *NetworkGroupBy) sqlScan(ctx context.Context, v interface{}) error { - for _, f := range ngb.fields { - if !network.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*ngb.flds)+len(ngb.fns)) + for _, f := range *ngb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := ngb.sqlQuery() + selector.GroupBy(selector.Columns(*ngb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := ngb.driver.Query(ctx, query, args, rows); err != nil { + if err := ngb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (ngb *NetworkGroupBy) sqlQuery() *sql.Selector { - selector := ngb.sql.Select() - aggregation := make([]string, 0, len(ngb.fns)) - for _, fn := range ngb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(ngb.fields)+len(ngb.fns)) - for _, f := range ngb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(ngb.fields...)...) -} - // NetworkSelect is the builder for selecting fields of Network entities. type NetworkSelect struct { *NetworkQuery - // intermediate query (i.e. traversal path). - sql *sql.Selector + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (ns *NetworkSelect) Aggregate(fns ...AggregateFunc) *NetworkSelect { + ns.fns = append(ns.fns, fns...) + return ns } // Scan applies the selector query and scans the result into the given value. -func (ns *NetworkSelect) Scan(ctx context.Context, v interface{}) error { +func (ns *NetworkSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ns.ctx, "Select") if err := ns.prepareQuery(ctx); err != nil { return err } - ns.sql = ns.NetworkQuery.sqlQuery(ctx) - return ns.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (ns *NetworkSelect) ScanX(ctx context.Context, v interface{}) { - if err := ns.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from a selector. It is only allowed when selecting one field. -func (ns *NetworkSelect) Strings(ctx context.Context) ([]string, error) { - if len(ns.fields) > 1 { - return nil, errors.New("ent: NetworkSelect.Strings is not achievable when selecting more than 1 field") - } - var v []string - if err := ns.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (ns *NetworkSelect) StringsX(ctx context.Context) []string { - v, err := ns.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a selector. It is only allowed when selecting one field. -func (ns *NetworkSelect) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = ns.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{network.Label} - default: - err = fmt.Errorf("ent: NetworkSelect.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (ns *NetworkSelect) StringX(ctx context.Context) string { - v, err := ns.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from a selector. It is only allowed when selecting one field. -func (ns *NetworkSelect) Ints(ctx context.Context) ([]int, error) { - if len(ns.fields) > 1 { - return nil, errors.New("ent: NetworkSelect.Ints is not achievable when selecting more than 1 field") - } - var v []int - if err := ns.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (ns *NetworkSelect) IntsX(ctx context.Context) []int { - v, err := ns.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a selector. It is only allowed when selecting one field. -func (ns *NetworkSelect) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = ns.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{network.Label} - default: - err = fmt.Errorf("ent: NetworkSelect.Ints returned %d results when one was expected", len(v)) - } - return + return scanWithInterceptors[*NetworkQuery, *NetworkSelect](ctx, ns.NetworkQuery, ns, ns.inters, v) } -// IntX is like Int, but panics if an error occurs. -func (ns *NetworkSelect) IntX(ctx context.Context) int { - v, err := ns.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. -func (ns *NetworkSelect) Float64s(ctx context.Context) ([]float64, error) { - if len(ns.fields) > 1 { - return nil, errors.New("ent: NetworkSelect.Float64s is not achievable when selecting more than 1 field") - } - var v []float64 - if err := ns.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (ns *NetworkSelect) Float64sX(ctx context.Context) []float64 { - v, err := ns.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. -func (ns *NetworkSelect) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = ns.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{network.Label} - default: - err = fmt.Errorf("ent: NetworkSelect.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (ns *NetworkSelect) Float64X(ctx context.Context) float64 { - v, err := ns.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from a selector. It is only allowed when selecting one field. -func (ns *NetworkSelect) Bools(ctx context.Context) ([]bool, error) { - if len(ns.fields) > 1 { - return nil, errors.New("ent: NetworkSelect.Bools is not achievable when selecting more than 1 field") - } - var v []bool - if err := ns.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (ns *NetworkSelect) BoolsX(ctx context.Context) []bool { - v, err := ns.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a selector. It is only allowed when selecting one field. -func (ns *NetworkSelect) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = ns.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{network.Label} - default: - err = fmt.Errorf("ent: NetworkSelect.Bools returned %d results when one was expected", len(v)) +func (ns *NetworkSelect) sqlScan(ctx context.Context, root *NetworkQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(ns.fns)) + for _, fn := range ns.fns { + aggregation = append(aggregation, fn(selector)) } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (ns *NetworkSelect) BoolX(ctx context.Context) bool { - v, err := ns.Bool(ctx) - if err != nil { - panic(err) + switch n := len(*ns.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) } - return v -} - -func (ns *NetworkSelect) sqlScan(ctx context.Context, v interface{}) error { rows := &sql.Rows{} - query, args := ns.sql.Query() + query, args := selector.Query() if err := ns.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/ent/network_update.go b/ent/network_update.go index c8d28abe..3cbd1f1e 100755 --- a/ent/network_update.go +++ b/ent/network_update.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -31,9 +31,17 @@ func (nu *NetworkUpdate) Where(ps ...predicate.Network) *NetworkUpdate { return nu } -// SetHclID sets the "hcl_id" field. -func (nu *NetworkUpdate) SetHclID(s string) *NetworkUpdate { - nu.mutation.SetHclID(s) +// SetHCLID sets the "hcl_id" field. +func (nu *NetworkUpdate) SetHCLID(s string) *NetworkUpdate { + nu.mutation.SetHCLID(s) + return nu +} + +// SetNillableHCLID sets the "hcl_id" field if the given value is not nil. +func (nu *NetworkUpdate) SetNillableHCLID(s *string) *NetworkUpdate { + if s != nil { + nu.SetHCLID(*s) + } return nu } @@ -43,18 +51,42 @@ func (nu *NetworkUpdate) SetName(s string) *NetworkUpdate { return nu } +// SetNillableName sets the "name" field if the given value is not nil. +func (nu *NetworkUpdate) SetNillableName(s *string) *NetworkUpdate { + if s != nil { + nu.SetName(*s) + } + return nu +} + // SetCidr sets the "cidr" field. func (nu *NetworkUpdate) SetCidr(s string) *NetworkUpdate { nu.mutation.SetCidr(s) return nu } +// SetNillableCidr sets the "cidr" field if the given value is not nil. +func (nu *NetworkUpdate) SetNillableCidr(s *string) *NetworkUpdate { + if s != nil { + nu.SetCidr(*s) + } + return nu +} + // SetVdiVisible sets the "vdi_visible" field. func (nu *NetworkUpdate) SetVdiVisible(b bool) *NetworkUpdate { nu.mutation.SetVdiVisible(b) return nu } +// SetNillableVdiVisible sets the "vdi_visible" field if the given value is not nil. +func (nu *NetworkUpdate) SetNillableVdiVisible(b *bool) *NetworkUpdate { + if b != nil { + nu.SetVdiVisible(*b) + } + return nu +} + // SetVars sets the "vars" field. func (nu *NetworkUpdate) SetVars(m map[string]string) *NetworkUpdate { nu.mutation.SetVars(m) @@ -171,34 +203,7 @@ func (nu *NetworkUpdate) RemoveNetworkToIncludedNetwork(i ...*IncludedNetwork) * // Save executes the query and returns the number of nodes affected by the update operation. func (nu *NetworkUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(nu.hooks) == 0 { - affected, err = nu.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*NetworkMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - nu.mutation = mutation - affected, err = nu.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(nu.hooks) - 1; i >= 0; i-- { - if nu.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = nu.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, nu.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, nu.sqlSave, nu.mutation, nu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -224,16 +229,7 @@ func (nu *NetworkUpdate) ExecX(ctx context.Context) { } func (nu *NetworkUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: network.Table, - Columns: network.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: network.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(network.Table, network.Columns, sqlgraph.NewFieldSpec(network.FieldID, field.TypeUUID)) if ps := nu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -241,47 +237,23 @@ func (nu *NetworkUpdate) sqlSave(ctx context.Context) (n int, err error) { } } } - if value, ok := nu.mutation.HclID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: network.FieldHclID, - }) + if value, ok := nu.mutation.HCLID(); ok { + _spec.SetField(network.FieldHCLID, field.TypeString, value) } if value, ok := nu.mutation.Name(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: network.FieldName, - }) + _spec.SetField(network.FieldName, field.TypeString, value) } if value, ok := nu.mutation.Cidr(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: network.FieldCidr, - }) + _spec.SetField(network.FieldCidr, field.TypeString, value) } if value, ok := nu.mutation.VdiVisible(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: network.FieldVdiVisible, - }) + _spec.SetField(network.FieldVdiVisible, field.TypeBool, value) } if value, ok := nu.mutation.Vars(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: network.FieldVars, - }) + _spec.SetField(network.FieldVars, field.TypeJSON, value) } if value, ok := nu.mutation.Tags(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: network.FieldTags, - }) + _spec.SetField(network.FieldTags, field.TypeJSON, value) } if nu.mutation.NetworkToEnvironmentCleared() { edge := &sqlgraph.EdgeSpec{ @@ -291,10 +263,7 @@ func (nu *NetworkUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{network.NetworkToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -307,10 +276,7 @@ func (nu *NetworkUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{network.NetworkToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -326,10 +292,7 @@ func (nu *NetworkUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{network.NetworkToHostDependencyColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: hostdependency.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(hostdependency.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -342,10 +305,7 @@ func (nu *NetworkUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{network.NetworkToHostDependencyColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: hostdependency.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(hostdependency.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -361,10 +321,7 @@ func (nu *NetworkUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{network.NetworkToHostDependencyColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: hostdependency.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(hostdependency.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -380,10 +337,7 @@ func (nu *NetworkUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{network.NetworkToIncludedNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: includednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(includednetwork.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -396,10 +350,7 @@ func (nu *NetworkUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{network.NetworkToIncludedNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: includednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(includednetwork.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -415,10 +366,7 @@ func (nu *NetworkUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{network.NetworkToIncludedNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: includednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(includednetwork.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -430,10 +378,11 @@ func (nu *NetworkUpdate) sqlSave(ctx context.Context) (n int, err error) { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{network.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return 0, err } + nu.mutation.done = true return n, nil } @@ -445,9 +394,17 @@ type NetworkUpdateOne struct { mutation *NetworkMutation } -// SetHclID sets the "hcl_id" field. -func (nuo *NetworkUpdateOne) SetHclID(s string) *NetworkUpdateOne { - nuo.mutation.SetHclID(s) +// SetHCLID sets the "hcl_id" field. +func (nuo *NetworkUpdateOne) SetHCLID(s string) *NetworkUpdateOne { + nuo.mutation.SetHCLID(s) + return nuo +} + +// SetNillableHCLID sets the "hcl_id" field if the given value is not nil. +func (nuo *NetworkUpdateOne) SetNillableHCLID(s *string) *NetworkUpdateOne { + if s != nil { + nuo.SetHCLID(*s) + } return nuo } @@ -457,18 +414,42 @@ func (nuo *NetworkUpdateOne) SetName(s string) *NetworkUpdateOne { return nuo } +// SetNillableName sets the "name" field if the given value is not nil. +func (nuo *NetworkUpdateOne) SetNillableName(s *string) *NetworkUpdateOne { + if s != nil { + nuo.SetName(*s) + } + return nuo +} + // SetCidr sets the "cidr" field. func (nuo *NetworkUpdateOne) SetCidr(s string) *NetworkUpdateOne { nuo.mutation.SetCidr(s) return nuo } +// SetNillableCidr sets the "cidr" field if the given value is not nil. +func (nuo *NetworkUpdateOne) SetNillableCidr(s *string) *NetworkUpdateOne { + if s != nil { + nuo.SetCidr(*s) + } + return nuo +} + // SetVdiVisible sets the "vdi_visible" field. func (nuo *NetworkUpdateOne) SetVdiVisible(b bool) *NetworkUpdateOne { nuo.mutation.SetVdiVisible(b) return nuo } +// SetNillableVdiVisible sets the "vdi_visible" field if the given value is not nil. +func (nuo *NetworkUpdateOne) SetNillableVdiVisible(b *bool) *NetworkUpdateOne { + if b != nil { + nuo.SetVdiVisible(*b) + } + return nuo +} + // SetVars sets the "vars" field. func (nuo *NetworkUpdateOne) SetVars(m map[string]string) *NetworkUpdateOne { nuo.mutation.SetVars(m) @@ -583,6 +564,12 @@ func (nuo *NetworkUpdateOne) RemoveNetworkToIncludedNetwork(i ...*IncludedNetwor return nuo.RemoveNetworkToIncludedNetworkIDs(ids...) } +// Where appends a list predicates to the NetworkUpdate builder. +func (nuo *NetworkUpdateOne) Where(ps ...predicate.Network) *NetworkUpdateOne { + nuo.mutation.Where(ps...) + return nuo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (nuo *NetworkUpdateOne) Select(field string, fields ...string) *NetworkUpdateOne { @@ -592,34 +579,7 @@ func (nuo *NetworkUpdateOne) Select(field string, fields ...string) *NetworkUpda // Save executes the query and returns the updated Network entity. func (nuo *NetworkUpdateOne) Save(ctx context.Context) (*Network, error) { - var ( - err error - node *Network - ) - if len(nuo.hooks) == 0 { - node, err = nuo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*NetworkMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - nuo.mutation = mutation - node, err = nuo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(nuo.hooks) - 1; i >= 0; i-- { - if nuo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = nuo.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, nuo.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, nuo.sqlSave, nuo.mutation, nuo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -645,16 +605,7 @@ func (nuo *NetworkUpdateOne) ExecX(ctx context.Context) { } func (nuo *NetworkUpdateOne) sqlSave(ctx context.Context) (_node *Network, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: network.Table, - Columns: network.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: network.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(network.Table, network.Columns, sqlgraph.NewFieldSpec(network.FieldID, field.TypeUUID)) id, ok := nuo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Network.id" for update`)} @@ -679,47 +630,23 @@ func (nuo *NetworkUpdateOne) sqlSave(ctx context.Context) (_node *Network, err e } } } - if value, ok := nuo.mutation.HclID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: network.FieldHclID, - }) + if value, ok := nuo.mutation.HCLID(); ok { + _spec.SetField(network.FieldHCLID, field.TypeString, value) } if value, ok := nuo.mutation.Name(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: network.FieldName, - }) + _spec.SetField(network.FieldName, field.TypeString, value) } if value, ok := nuo.mutation.Cidr(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: network.FieldCidr, - }) + _spec.SetField(network.FieldCidr, field.TypeString, value) } if value, ok := nuo.mutation.VdiVisible(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: network.FieldVdiVisible, - }) + _spec.SetField(network.FieldVdiVisible, field.TypeBool, value) } if value, ok := nuo.mutation.Vars(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: network.FieldVars, - }) + _spec.SetField(network.FieldVars, field.TypeJSON, value) } if value, ok := nuo.mutation.Tags(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: network.FieldTags, - }) + _spec.SetField(network.FieldTags, field.TypeJSON, value) } if nuo.mutation.NetworkToEnvironmentCleared() { edge := &sqlgraph.EdgeSpec{ @@ -729,10 +656,7 @@ func (nuo *NetworkUpdateOne) sqlSave(ctx context.Context) (_node *Network, err e Columns: []string{network.NetworkToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -745,10 +669,7 @@ func (nuo *NetworkUpdateOne) sqlSave(ctx context.Context) (_node *Network, err e Columns: []string{network.NetworkToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -764,10 +685,7 @@ func (nuo *NetworkUpdateOne) sqlSave(ctx context.Context) (_node *Network, err e Columns: []string{network.NetworkToHostDependencyColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: hostdependency.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(hostdependency.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -780,10 +698,7 @@ func (nuo *NetworkUpdateOne) sqlSave(ctx context.Context) (_node *Network, err e Columns: []string{network.NetworkToHostDependencyColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: hostdependency.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(hostdependency.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -799,10 +714,7 @@ func (nuo *NetworkUpdateOne) sqlSave(ctx context.Context) (_node *Network, err e Columns: []string{network.NetworkToHostDependencyColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: hostdependency.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(hostdependency.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -818,10 +730,7 @@ func (nuo *NetworkUpdateOne) sqlSave(ctx context.Context) (_node *Network, err e Columns: []string{network.NetworkToIncludedNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: includednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(includednetwork.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -834,10 +743,7 @@ func (nuo *NetworkUpdateOne) sqlSave(ctx context.Context) (_node *Network, err e Columns: []string{network.NetworkToIncludedNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: includednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(includednetwork.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -853,10 +759,7 @@ func (nuo *NetworkUpdateOne) sqlSave(ctx context.Context) (_node *Network, err e Columns: []string{network.NetworkToIncludedNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: includednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(includednetwork.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -871,9 +774,10 @@ func (nuo *NetworkUpdateOne) sqlSave(ctx context.Context) (_node *Network, err e if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{network.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } + nuo.mutation.done = true return _node, nil } diff --git a/ent/plan.go b/ent/plan.go index 456d265a..a1917602 100755 --- a/ent/plan.go +++ b/ent/plan.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -6,6 +6,7 @@ import ( "fmt" "strings" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/gen0cide/laforge/ent/build" "github.com/gen0cide/laforge/ent/plan" @@ -32,6 +33,7 @@ type Plan struct { // The values are being populated by the PlanQuery when eager-loading is set. Edges PlanEdges `json:"edges"` + // vvvvvvvvvvvv CUSTOM vvvvvvvvvvvv // Edges put into the main struct to be loaded via hcl // PrevPlan holds the value of the PrevPlan edge. HCLPrevPlan []*Plan `json:"PrevPlan,omitempty"` @@ -51,8 +53,9 @@ type Plan struct { HCLPlanToStatus *Status `json:"PlanToStatus,omitempty"` // PlanToPlanDiffs holds the value of the PlanToPlanDiffs edge. HCLPlanToPlanDiffs []*PlanDiff `json:"PlanToPlanDiffs,omitempty"` - // + // ^^^^^^^^^^^^ CUSTOM ^^^^^^^^^^^^^ plan_plan_to_build *uuid.UUID + selectValues sql.SelectValues } // PlanEdges holds the relations/edges for other nodes in the graph. @@ -78,6 +81,12 @@ type PlanEdges struct { // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. loadedTypes [9]bool + // totalCount holds the count of the edges above. + totalCount [9]map[string]int + + namedPrevPlan map[string][]*Plan + namedNextPlan map[string][]*Plan + namedPlanToPlanDiffs map[string][]*PlanDiff } // PrevPlanOrErr returns the PrevPlan value or an error if the edge @@ -103,8 +112,7 @@ func (e PlanEdges) NextPlanOrErr() ([]*Plan, error) { func (e PlanEdges) PlanToBuildOrErr() (*Build, error) { if e.loadedTypes[2] { if e.PlanToBuild == nil { - // The edge PlanToBuild was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: build.Label} } return e.PlanToBuild, nil @@ -117,8 +125,7 @@ func (e PlanEdges) PlanToBuildOrErr() (*Build, error) { func (e PlanEdges) PlanToTeamOrErr() (*Team, error) { if e.loadedTypes[3] { if e.PlanToTeam == nil { - // The edge PlanToTeam was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: team.Label} } return e.PlanToTeam, nil @@ -131,8 +138,7 @@ func (e PlanEdges) PlanToTeamOrErr() (*Team, error) { func (e PlanEdges) PlanToProvisionedNetworkOrErr() (*ProvisionedNetwork, error) { if e.loadedTypes[4] { if e.PlanToProvisionedNetwork == nil { - // The edge PlanToProvisionedNetwork was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: provisionednetwork.Label} } return e.PlanToProvisionedNetwork, nil @@ -145,8 +151,7 @@ func (e PlanEdges) PlanToProvisionedNetworkOrErr() (*ProvisionedNetwork, error) func (e PlanEdges) PlanToProvisionedHostOrErr() (*ProvisionedHost, error) { if e.loadedTypes[5] { if e.PlanToProvisionedHost == nil { - // The edge PlanToProvisionedHost was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: provisionedhost.Label} } return e.PlanToProvisionedHost, nil @@ -159,8 +164,7 @@ func (e PlanEdges) PlanToProvisionedHostOrErr() (*ProvisionedHost, error) { func (e PlanEdges) PlanToProvisioningStepOrErr() (*ProvisioningStep, error) { if e.loadedTypes[6] { if e.PlanToProvisioningStep == nil { - // The edge PlanToProvisioningStep was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: provisioningstep.Label} } return e.PlanToProvisioningStep, nil @@ -173,8 +177,7 @@ func (e PlanEdges) PlanToProvisioningStepOrErr() (*ProvisioningStep, error) { func (e PlanEdges) PlanToStatusOrErr() (*Status, error) { if e.loadedTypes[7] { if e.PlanToStatus == nil { - // The edge PlanToStatus was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: status.Label} } return e.PlanToStatus, nil @@ -192,8 +195,8 @@ func (e PlanEdges) PlanToPlanDiffsOrErr() ([]*PlanDiff, error) { } // scanValues returns the types for scanning values from sql.Rows. -func (*Plan) scanValues(columns []string) ([]interface{}, error) { - values := make([]interface{}, len(columns)) +func (*Plan) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) for i := range columns { switch columns[i] { case plan.FieldStepNumber: @@ -205,7 +208,7 @@ func (*Plan) scanValues(columns []string) ([]interface{}, error) { case plan.ForeignKeys[0]: // plan_plan_to_build values[i] = &sql.NullScanner{S: new(uuid.UUID)} default: - return nil, fmt.Errorf("unexpected column %q for type Plan", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -213,7 +216,7 @@ func (*Plan) scanValues(columns []string) ([]interface{}, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the Plan fields. -func (pl *Plan) assignValues(columns []string, values []interface{}) error { +func (pl *Plan) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -250,71 +253,79 @@ func (pl *Plan) assignValues(columns []string, values []interface{}) error { pl.plan_plan_to_build = new(uuid.UUID) *pl.plan_plan_to_build = *value.S.(*uuid.UUID) } + default: + pl.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the Plan. +// This includes values selected through modifiers, order, etc. +func (pl *Plan) Value(name string) (ent.Value, error) { + return pl.selectValues.Get(name) +} + // QueryPrevPlan queries the "PrevPlan" edge of the Plan entity. func (pl *Plan) QueryPrevPlan() *PlanQuery { - return (&PlanClient{config: pl.config}).QueryPrevPlan(pl) + return NewPlanClient(pl.config).QueryPrevPlan(pl) } // QueryNextPlan queries the "NextPlan" edge of the Plan entity. func (pl *Plan) QueryNextPlan() *PlanQuery { - return (&PlanClient{config: pl.config}).QueryNextPlan(pl) + return NewPlanClient(pl.config).QueryNextPlan(pl) } // QueryPlanToBuild queries the "PlanToBuild" edge of the Plan entity. func (pl *Plan) QueryPlanToBuild() *BuildQuery { - return (&PlanClient{config: pl.config}).QueryPlanToBuild(pl) + return NewPlanClient(pl.config).QueryPlanToBuild(pl) } // QueryPlanToTeam queries the "PlanToTeam" edge of the Plan entity. func (pl *Plan) QueryPlanToTeam() *TeamQuery { - return (&PlanClient{config: pl.config}).QueryPlanToTeam(pl) + return NewPlanClient(pl.config).QueryPlanToTeam(pl) } // QueryPlanToProvisionedNetwork queries the "PlanToProvisionedNetwork" edge of the Plan entity. func (pl *Plan) QueryPlanToProvisionedNetwork() *ProvisionedNetworkQuery { - return (&PlanClient{config: pl.config}).QueryPlanToProvisionedNetwork(pl) + return NewPlanClient(pl.config).QueryPlanToProvisionedNetwork(pl) } // QueryPlanToProvisionedHost queries the "PlanToProvisionedHost" edge of the Plan entity. func (pl *Plan) QueryPlanToProvisionedHost() *ProvisionedHostQuery { - return (&PlanClient{config: pl.config}).QueryPlanToProvisionedHost(pl) + return NewPlanClient(pl.config).QueryPlanToProvisionedHost(pl) } // QueryPlanToProvisioningStep queries the "PlanToProvisioningStep" edge of the Plan entity. func (pl *Plan) QueryPlanToProvisioningStep() *ProvisioningStepQuery { - return (&PlanClient{config: pl.config}).QueryPlanToProvisioningStep(pl) + return NewPlanClient(pl.config).QueryPlanToProvisioningStep(pl) } // QueryPlanToStatus queries the "PlanToStatus" edge of the Plan entity. func (pl *Plan) QueryPlanToStatus() *StatusQuery { - return (&PlanClient{config: pl.config}).QueryPlanToStatus(pl) + return NewPlanClient(pl.config).QueryPlanToStatus(pl) } // QueryPlanToPlanDiffs queries the "PlanToPlanDiffs" edge of the Plan entity. func (pl *Plan) QueryPlanToPlanDiffs() *PlanDiffQuery { - return (&PlanClient{config: pl.config}).QueryPlanToPlanDiffs(pl) + return NewPlanClient(pl.config).QueryPlanToPlanDiffs(pl) } // Update returns a builder for updating this Plan. // Note that you need to call Plan.Unwrap() before calling this method if this Plan // was returned from a transaction, and the transaction was committed or rolled back. func (pl *Plan) Update() *PlanUpdateOne { - return (&PlanClient{config: pl.config}).UpdateOne(pl) + return NewPlanClient(pl.config).UpdateOne(pl) } // Unwrap unwraps the Plan entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. func (pl *Plan) Unwrap() *Plan { - tx, ok := pl.config.driver.(*txDriver) + _tx, ok := pl.config.driver.(*txDriver) if !ok { panic("ent: Plan is not a transactional entity") } - pl.config.driver = tx.drv + pl.config.driver = _tx.drv return pl } @@ -322,22 +333,90 @@ func (pl *Plan) Unwrap() *Plan { func (pl *Plan) String() string { var builder strings.Builder builder.WriteString("Plan(") - builder.WriteString(fmt.Sprintf("id=%v", pl.ID)) - builder.WriteString(", step_number=") + builder.WriteString(fmt.Sprintf("id=%v, ", pl.ID)) + builder.WriteString("step_number=") builder.WriteString(fmt.Sprintf("%v", pl.StepNumber)) - builder.WriteString(", type=") + builder.WriteString(", ") + builder.WriteString("type=") builder.WriteString(fmt.Sprintf("%v", pl.Type)) - builder.WriteString(", build_id=") + builder.WriteString(", ") + builder.WriteString("build_id=") builder.WriteString(pl.BuildID) builder.WriteByte(')') return builder.String() } -// Plans is a parsable slice of Plan. -type Plans []*Plan +// NamedPrevPlan returns the PrevPlan named value or an error if the edge was not +// loaded in eager-loading with this name. +func (pl *Plan) NamedPrevPlan(name string) ([]*Plan, error) { + if pl.Edges.namedPrevPlan == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := pl.Edges.namedPrevPlan[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} + +func (pl *Plan) appendNamedPrevPlan(name string, edges ...*Plan) { + if pl.Edges.namedPrevPlan == nil { + pl.Edges.namedPrevPlan = make(map[string][]*Plan) + } + if len(edges) == 0 { + pl.Edges.namedPrevPlan[name] = []*Plan{} + } else { + pl.Edges.namedPrevPlan[name] = append(pl.Edges.namedPrevPlan[name], edges...) + } +} + +// NamedNextPlan returns the NextPlan named value or an error if the edge was not +// loaded in eager-loading with this name. +func (pl *Plan) NamedNextPlan(name string) ([]*Plan, error) { + if pl.Edges.namedNextPlan == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := pl.Edges.namedNextPlan[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} + +func (pl *Plan) appendNamedNextPlan(name string, edges ...*Plan) { + if pl.Edges.namedNextPlan == nil { + pl.Edges.namedNextPlan = make(map[string][]*Plan) + } + if len(edges) == 0 { + pl.Edges.namedNextPlan[name] = []*Plan{} + } else { + pl.Edges.namedNextPlan[name] = append(pl.Edges.namedNextPlan[name], edges...) + } +} + +// NamedPlanToPlanDiffs returns the PlanToPlanDiffs named value or an error if the edge was not +// loaded in eager-loading with this name. +func (pl *Plan) NamedPlanToPlanDiffs(name string) ([]*PlanDiff, error) { + if pl.Edges.namedPlanToPlanDiffs == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := pl.Edges.namedPlanToPlanDiffs[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} -func (pl Plans) config(cfg config) { - for _i := range pl { - pl[_i].config = cfg +func (pl *Plan) appendNamedPlanToPlanDiffs(name string, edges ...*PlanDiff) { + if pl.Edges.namedPlanToPlanDiffs == nil { + pl.Edges.namedPlanToPlanDiffs = make(map[string][]*PlanDiff) + } + if len(edges) == 0 { + pl.Edges.namedPlanToPlanDiffs[name] = []*PlanDiff{} + } else { + pl.Edges.namedPlanToPlanDiffs[name] = append(pl.Edges.namedPlanToPlanDiffs[name], edges...) } } + +// Plans is a parsable slice of Plan. +type Plans []*Plan diff --git a/ent/plan/plan.go b/ent/plan/plan.go index 38c09595..0a344e96 100755 --- a/ent/plan/plan.go +++ b/ent/plan/plan.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package plan @@ -7,6 +7,8 @@ import ( "io" "strconv" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -165,19 +167,189 @@ func TypeValidator(_type Type) error { } } +// OrderOption defines the ordering options for the Plan queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByStepNumber orders the results by the step_number field. +func ByStepNumber(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStepNumber, opts...).ToFunc() +} + +// ByType orders the results by the type field. +func ByType(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldType, opts...).ToFunc() +} + +// ByBuildID orders the results by the build_id field. +func ByBuildID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldBuildID, opts...).ToFunc() +} + +// ByPrevPlanCount orders the results by PrevPlan count. +func ByPrevPlanCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newPrevPlanStep(), opts...) + } +} + +// ByPrevPlan orders the results by PrevPlan terms. +func ByPrevPlan(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newPrevPlanStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByNextPlanCount orders the results by NextPlan count. +func ByNextPlanCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newNextPlanStep(), opts...) + } +} + +// ByNextPlan orders the results by NextPlan terms. +func ByNextPlan(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newNextPlanStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByPlanToBuildField orders the results by PlanToBuild field. +func ByPlanToBuildField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newPlanToBuildStep(), sql.OrderByField(field, opts...)) + } +} + +// ByPlanToTeamField orders the results by PlanToTeam field. +func ByPlanToTeamField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newPlanToTeamStep(), sql.OrderByField(field, opts...)) + } +} + +// ByPlanToProvisionedNetworkField orders the results by PlanToProvisionedNetwork field. +func ByPlanToProvisionedNetworkField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newPlanToProvisionedNetworkStep(), sql.OrderByField(field, opts...)) + } +} + +// ByPlanToProvisionedHostField orders the results by PlanToProvisionedHost field. +func ByPlanToProvisionedHostField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newPlanToProvisionedHostStep(), sql.OrderByField(field, opts...)) + } +} + +// ByPlanToProvisioningStepField orders the results by PlanToProvisioningStep field. +func ByPlanToProvisioningStepField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newPlanToProvisioningStepStep(), sql.OrderByField(field, opts...)) + } +} + +// ByPlanToStatusField orders the results by PlanToStatus field. +func ByPlanToStatusField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newPlanToStatusStep(), sql.OrderByField(field, opts...)) + } +} + +// ByPlanToPlanDiffsCount orders the results by PlanToPlanDiffs count. +func ByPlanToPlanDiffsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newPlanToPlanDiffsStep(), opts...) + } +} + +// ByPlanToPlanDiffs orders the results by PlanToPlanDiffs terms. +func ByPlanToPlanDiffs(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newPlanToPlanDiffsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newPrevPlanStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, PrevPlanTable, PrevPlanPrimaryKey...), + ) +} +func newNextPlanStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, NextPlanTable, NextPlanPrimaryKey...), + ) +} +func newPlanToBuildStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(PlanToBuildInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, PlanToBuildTable, PlanToBuildColumn), + ) +} +func newPlanToTeamStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(PlanToTeamInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2O, false, PlanToTeamTable, PlanToTeamColumn), + ) +} +func newPlanToProvisionedNetworkStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(PlanToProvisionedNetworkInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2O, false, PlanToProvisionedNetworkTable, PlanToProvisionedNetworkColumn), + ) +} +func newPlanToProvisionedHostStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(PlanToProvisionedHostInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2O, false, PlanToProvisionedHostTable, PlanToProvisionedHostColumn), + ) +} +func newPlanToProvisioningStepStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(PlanToProvisioningStepInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2O, false, PlanToProvisioningStepTable, PlanToProvisioningStepColumn), + ) +} +func newPlanToStatusStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(PlanToStatusInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2O, false, PlanToStatusTable, PlanToStatusColumn), + ) +} +func newPlanToPlanDiffsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(PlanToPlanDiffsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, PlanToPlanDiffsTable, PlanToPlanDiffsColumn), + ) +} + // MarshalGQL implements graphql.Marshaler interface. -func (_type Type) MarshalGQL(w io.Writer) { - io.WriteString(w, strconv.Quote(_type.String())) +func (e Type) MarshalGQL(w io.Writer) { + io.WriteString(w, strconv.Quote(e.String())) } // UnmarshalGQL implements graphql.Unmarshaler interface. -func (_type *Type) UnmarshalGQL(val interface{}) error { +func (e *Type) UnmarshalGQL(val interface{}) error { str, ok := val.(string) if !ok { return fmt.Errorf("enum %T must be a string", val) } - *_type = Type(str) - if err := TypeValidator(*_type); err != nil { + *e = Type(str) + if err := TypeValidator(*e); err != nil { return fmt.Errorf("%s is not a valid Type", str) } return nil diff --git a/ent/plan/where.go b/ent/plan/where.go index 8a7eb50a..d9b17910 100755 --- a/ent/plan/where.go +++ b/ent/plan/where.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package plan @@ -11,334 +11,182 @@ import ( // ID filters vertices based on their ID field. func ID(id uuid.UUID) predicate.Plan { - return predicate.Plan(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Plan(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id uuid.UUID) predicate.Plan { - return predicate.Plan(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Plan(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id uuid.UUID) predicate.Plan { - return predicate.Plan(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.Plan(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...uuid.UUID) predicate.Plan { - return predicate.Plan(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.Plan(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...uuid.UUID) predicate.Plan { - return predicate.Plan(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.Plan(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id uuid.UUID) predicate.Plan { - return predicate.Plan(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.Plan(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id uuid.UUID) predicate.Plan { - return predicate.Plan(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.Plan(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id uuid.UUID) predicate.Plan { - return predicate.Plan(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.Plan(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id uuid.UUID) predicate.Plan { - return predicate.Plan(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.Plan(sql.FieldLTE(FieldID, id)) } // StepNumber applies equality check predicate on the "step_number" field. It's identical to StepNumberEQ. func StepNumber(v int) predicate.Plan { - return predicate.Plan(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldStepNumber), v)) - }) + return predicate.Plan(sql.FieldEQ(FieldStepNumber, v)) } // BuildID applies equality check predicate on the "build_id" field. It's identical to BuildIDEQ. func BuildID(v string) predicate.Plan { - return predicate.Plan(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldBuildID), v)) - }) + return predicate.Plan(sql.FieldEQ(FieldBuildID, v)) } // StepNumberEQ applies the EQ predicate on the "step_number" field. func StepNumberEQ(v int) predicate.Plan { - return predicate.Plan(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldStepNumber), v)) - }) + return predicate.Plan(sql.FieldEQ(FieldStepNumber, v)) } // StepNumberNEQ applies the NEQ predicate on the "step_number" field. func StepNumberNEQ(v int) predicate.Plan { - return predicate.Plan(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldStepNumber), v)) - }) + return predicate.Plan(sql.FieldNEQ(FieldStepNumber, v)) } // StepNumberIn applies the In predicate on the "step_number" field. func StepNumberIn(vs ...int) predicate.Plan { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Plan(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldStepNumber), v...)) - }) + return predicate.Plan(sql.FieldIn(FieldStepNumber, vs...)) } // StepNumberNotIn applies the NotIn predicate on the "step_number" field. func StepNumberNotIn(vs ...int) predicate.Plan { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Plan(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldStepNumber), v...)) - }) + return predicate.Plan(sql.FieldNotIn(FieldStepNumber, vs...)) } // StepNumberGT applies the GT predicate on the "step_number" field. func StepNumberGT(v int) predicate.Plan { - return predicate.Plan(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldStepNumber), v)) - }) + return predicate.Plan(sql.FieldGT(FieldStepNumber, v)) } // StepNumberGTE applies the GTE predicate on the "step_number" field. func StepNumberGTE(v int) predicate.Plan { - return predicate.Plan(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldStepNumber), v)) - }) + return predicate.Plan(sql.FieldGTE(FieldStepNumber, v)) } // StepNumberLT applies the LT predicate on the "step_number" field. func StepNumberLT(v int) predicate.Plan { - return predicate.Plan(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldStepNumber), v)) - }) + return predicate.Plan(sql.FieldLT(FieldStepNumber, v)) } // StepNumberLTE applies the LTE predicate on the "step_number" field. func StepNumberLTE(v int) predicate.Plan { - return predicate.Plan(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldStepNumber), v)) - }) + return predicate.Plan(sql.FieldLTE(FieldStepNumber, v)) } // TypeEQ applies the EQ predicate on the "type" field. func TypeEQ(v Type) predicate.Plan { - return predicate.Plan(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldType), v)) - }) + return predicate.Plan(sql.FieldEQ(FieldType, v)) } // TypeNEQ applies the NEQ predicate on the "type" field. func TypeNEQ(v Type) predicate.Plan { - return predicate.Plan(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldType), v)) - }) + return predicate.Plan(sql.FieldNEQ(FieldType, v)) } // TypeIn applies the In predicate on the "type" field. func TypeIn(vs ...Type) predicate.Plan { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Plan(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldType), v...)) - }) + return predicate.Plan(sql.FieldIn(FieldType, vs...)) } // TypeNotIn applies the NotIn predicate on the "type" field. func TypeNotIn(vs ...Type) predicate.Plan { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Plan(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldType), v...)) - }) + return predicate.Plan(sql.FieldNotIn(FieldType, vs...)) } // BuildIDEQ applies the EQ predicate on the "build_id" field. func BuildIDEQ(v string) predicate.Plan { - return predicate.Plan(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldBuildID), v)) - }) + return predicate.Plan(sql.FieldEQ(FieldBuildID, v)) } // BuildIDNEQ applies the NEQ predicate on the "build_id" field. func BuildIDNEQ(v string) predicate.Plan { - return predicate.Plan(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldBuildID), v)) - }) + return predicate.Plan(sql.FieldNEQ(FieldBuildID, v)) } // BuildIDIn applies the In predicate on the "build_id" field. func BuildIDIn(vs ...string) predicate.Plan { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Plan(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldBuildID), v...)) - }) + return predicate.Plan(sql.FieldIn(FieldBuildID, vs...)) } // BuildIDNotIn applies the NotIn predicate on the "build_id" field. func BuildIDNotIn(vs ...string) predicate.Plan { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Plan(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldBuildID), v...)) - }) + return predicate.Plan(sql.FieldNotIn(FieldBuildID, vs...)) } // BuildIDGT applies the GT predicate on the "build_id" field. func BuildIDGT(v string) predicate.Plan { - return predicate.Plan(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldBuildID), v)) - }) + return predicate.Plan(sql.FieldGT(FieldBuildID, v)) } // BuildIDGTE applies the GTE predicate on the "build_id" field. func BuildIDGTE(v string) predicate.Plan { - return predicate.Plan(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldBuildID), v)) - }) + return predicate.Plan(sql.FieldGTE(FieldBuildID, v)) } // BuildIDLT applies the LT predicate on the "build_id" field. func BuildIDLT(v string) predicate.Plan { - return predicate.Plan(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldBuildID), v)) - }) + return predicate.Plan(sql.FieldLT(FieldBuildID, v)) } // BuildIDLTE applies the LTE predicate on the "build_id" field. func BuildIDLTE(v string) predicate.Plan { - return predicate.Plan(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldBuildID), v)) - }) + return predicate.Plan(sql.FieldLTE(FieldBuildID, v)) } // BuildIDContains applies the Contains predicate on the "build_id" field. func BuildIDContains(v string) predicate.Plan { - return predicate.Plan(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldBuildID), v)) - }) + return predicate.Plan(sql.FieldContains(FieldBuildID, v)) } // BuildIDHasPrefix applies the HasPrefix predicate on the "build_id" field. func BuildIDHasPrefix(v string) predicate.Plan { - return predicate.Plan(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldBuildID), v)) - }) + return predicate.Plan(sql.FieldHasPrefix(FieldBuildID, v)) } // BuildIDHasSuffix applies the HasSuffix predicate on the "build_id" field. func BuildIDHasSuffix(v string) predicate.Plan { - return predicate.Plan(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldBuildID), v)) - }) + return predicate.Plan(sql.FieldHasSuffix(FieldBuildID, v)) } // BuildIDEqualFold applies the EqualFold predicate on the "build_id" field. func BuildIDEqualFold(v string) predicate.Plan { - return predicate.Plan(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldBuildID), v)) - }) + return predicate.Plan(sql.FieldEqualFold(FieldBuildID, v)) } // BuildIDContainsFold applies the ContainsFold predicate on the "build_id" field. func BuildIDContainsFold(v string) predicate.Plan { - return predicate.Plan(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldBuildID), v)) - }) + return predicate.Plan(sql.FieldContainsFold(FieldBuildID, v)) } // HasPrevPlan applies the HasEdge predicate on the "PrevPlan" edge. @@ -346,7 +194,6 @@ func HasPrevPlan() predicate.Plan { return predicate.Plan(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(PrevPlanTable, FieldID), sqlgraph.Edge(sqlgraph.M2M, true, PrevPlanTable, PrevPlanPrimaryKey...), ) sqlgraph.HasNeighbors(s, step) @@ -356,11 +203,7 @@ func HasPrevPlan() predicate.Plan { // HasPrevPlanWith applies the HasEdge predicate on the "PrevPlan" edge with a given conditions (other predicates). func HasPrevPlanWith(preds ...predicate.Plan) predicate.Plan { return predicate.Plan(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(Table, FieldID), - sqlgraph.Edge(sqlgraph.M2M, true, PrevPlanTable, PrevPlanPrimaryKey...), - ) + step := newPrevPlanStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -374,7 +217,6 @@ func HasNextPlan() predicate.Plan { return predicate.Plan(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(NextPlanTable, FieldID), sqlgraph.Edge(sqlgraph.M2M, false, NextPlanTable, NextPlanPrimaryKey...), ) sqlgraph.HasNeighbors(s, step) @@ -384,11 +226,7 @@ func HasNextPlan() predicate.Plan { // HasNextPlanWith applies the HasEdge predicate on the "NextPlan" edge with a given conditions (other predicates). func HasNextPlanWith(preds ...predicate.Plan) predicate.Plan { return predicate.Plan(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(Table, FieldID), - sqlgraph.Edge(sqlgraph.M2M, false, NextPlanTable, NextPlanPrimaryKey...), - ) + step := newNextPlanStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -402,7 +240,6 @@ func HasPlanToBuild() predicate.Plan { return predicate.Plan(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(PlanToBuildTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, false, PlanToBuildTable, PlanToBuildColumn), ) sqlgraph.HasNeighbors(s, step) @@ -412,11 +249,7 @@ func HasPlanToBuild() predicate.Plan { // HasPlanToBuildWith applies the HasEdge predicate on the "PlanToBuild" edge with a given conditions (other predicates). func HasPlanToBuildWith(preds ...predicate.Build) predicate.Plan { return predicate.Plan(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(PlanToBuildInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, PlanToBuildTable, PlanToBuildColumn), - ) + step := newPlanToBuildStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -430,7 +263,6 @@ func HasPlanToTeam() predicate.Plan { return predicate.Plan(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(PlanToTeamTable, FieldID), sqlgraph.Edge(sqlgraph.O2O, false, PlanToTeamTable, PlanToTeamColumn), ) sqlgraph.HasNeighbors(s, step) @@ -440,11 +272,7 @@ func HasPlanToTeam() predicate.Plan { // HasPlanToTeamWith applies the HasEdge predicate on the "PlanToTeam" edge with a given conditions (other predicates). func HasPlanToTeamWith(preds ...predicate.Team) predicate.Plan { return predicate.Plan(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(PlanToTeamInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2O, false, PlanToTeamTable, PlanToTeamColumn), - ) + step := newPlanToTeamStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -458,7 +286,6 @@ func HasPlanToProvisionedNetwork() predicate.Plan { return predicate.Plan(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(PlanToProvisionedNetworkTable, FieldID), sqlgraph.Edge(sqlgraph.O2O, false, PlanToProvisionedNetworkTable, PlanToProvisionedNetworkColumn), ) sqlgraph.HasNeighbors(s, step) @@ -468,11 +295,7 @@ func HasPlanToProvisionedNetwork() predicate.Plan { // HasPlanToProvisionedNetworkWith applies the HasEdge predicate on the "PlanToProvisionedNetwork" edge with a given conditions (other predicates). func HasPlanToProvisionedNetworkWith(preds ...predicate.ProvisionedNetwork) predicate.Plan { return predicate.Plan(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(PlanToProvisionedNetworkInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2O, false, PlanToProvisionedNetworkTable, PlanToProvisionedNetworkColumn), - ) + step := newPlanToProvisionedNetworkStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -486,7 +309,6 @@ func HasPlanToProvisionedHost() predicate.Plan { return predicate.Plan(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(PlanToProvisionedHostTable, FieldID), sqlgraph.Edge(sqlgraph.O2O, false, PlanToProvisionedHostTable, PlanToProvisionedHostColumn), ) sqlgraph.HasNeighbors(s, step) @@ -496,11 +318,7 @@ func HasPlanToProvisionedHost() predicate.Plan { // HasPlanToProvisionedHostWith applies the HasEdge predicate on the "PlanToProvisionedHost" edge with a given conditions (other predicates). func HasPlanToProvisionedHostWith(preds ...predicate.ProvisionedHost) predicate.Plan { return predicate.Plan(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(PlanToProvisionedHostInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2O, false, PlanToProvisionedHostTable, PlanToProvisionedHostColumn), - ) + step := newPlanToProvisionedHostStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -514,7 +332,6 @@ func HasPlanToProvisioningStep() predicate.Plan { return predicate.Plan(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(PlanToProvisioningStepTable, FieldID), sqlgraph.Edge(sqlgraph.O2O, false, PlanToProvisioningStepTable, PlanToProvisioningStepColumn), ) sqlgraph.HasNeighbors(s, step) @@ -524,11 +341,7 @@ func HasPlanToProvisioningStep() predicate.Plan { // HasPlanToProvisioningStepWith applies the HasEdge predicate on the "PlanToProvisioningStep" edge with a given conditions (other predicates). func HasPlanToProvisioningStepWith(preds ...predicate.ProvisioningStep) predicate.Plan { return predicate.Plan(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(PlanToProvisioningStepInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2O, false, PlanToProvisioningStepTable, PlanToProvisioningStepColumn), - ) + step := newPlanToProvisioningStepStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -542,7 +355,6 @@ func HasPlanToStatus() predicate.Plan { return predicate.Plan(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(PlanToStatusTable, FieldID), sqlgraph.Edge(sqlgraph.O2O, false, PlanToStatusTable, PlanToStatusColumn), ) sqlgraph.HasNeighbors(s, step) @@ -552,11 +364,7 @@ func HasPlanToStatus() predicate.Plan { // HasPlanToStatusWith applies the HasEdge predicate on the "PlanToStatus" edge with a given conditions (other predicates). func HasPlanToStatusWith(preds ...predicate.Status) predicate.Plan { return predicate.Plan(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(PlanToStatusInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2O, false, PlanToStatusTable, PlanToStatusColumn), - ) + step := newPlanToStatusStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -570,7 +378,6 @@ func HasPlanToPlanDiffs() predicate.Plan { return predicate.Plan(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(PlanToPlanDiffsTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, true, PlanToPlanDiffsTable, PlanToPlanDiffsColumn), ) sqlgraph.HasNeighbors(s, step) @@ -580,11 +387,7 @@ func HasPlanToPlanDiffs() predicate.Plan { // HasPlanToPlanDiffsWith applies the HasEdge predicate on the "PlanToPlanDiffs" edge with a given conditions (other predicates). func HasPlanToPlanDiffsWith(preds ...predicate.PlanDiff) predicate.Plan { return predicate.Plan(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(PlanToPlanDiffsInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, true, PlanToPlanDiffsTable, PlanToPlanDiffsColumn), - ) + step := newPlanToPlanDiffsStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -595,32 +398,15 @@ func HasPlanToPlanDiffsWith(preds ...predicate.PlanDiff) predicate.Plan { // And groups predicates with the AND operator between them. func And(predicates ...predicate.Plan) predicate.Plan { - return predicate.Plan(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Plan(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.Plan) predicate.Plan { - return predicate.Plan(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Plan(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.Plan) predicate.Plan { - return predicate.Plan(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.Plan(sql.NotPredicates(p)) } diff --git a/ent/plan_create.go b/ent/plan_create.go index 5aea7b5b..b90cfdc5 100755 --- a/ent/plan_create.go +++ b/ent/plan_create.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -217,44 +217,8 @@ func (pc *PlanCreate) Mutation() *PlanMutation { // Save creates the Plan in the database. func (pc *PlanCreate) Save(ctx context.Context) (*Plan, error) { - var ( - err error - node *Plan - ) pc.defaults() - if len(pc.hooks) == 0 { - if err = pc.check(); err != nil { - return nil, err - } - node, err = pc.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*PlanMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = pc.check(); err != nil { - return nil, err - } - pc.mutation = mutation - if node, err = pc.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(pc.hooks) - 1; i >= 0; i-- { - if pc.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = pc.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, pc.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, pc.sqlSave, pc.mutation, pc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -310,10 +274,13 @@ func (pc *PlanCreate) check() error { } func (pc *PlanCreate) sqlSave(ctx context.Context) (*Plan, error) { + if err := pc.check(); err != nil { + return nil, err + } _node, _spec := pc.createSpec() if err := sqlgraph.CreateNode(ctx, pc.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } @@ -324,46 +291,30 @@ func (pc *PlanCreate) sqlSave(ctx context.Context) (*Plan, error) { return nil, err } } + pc.mutation.id = &_node.ID + pc.mutation.done = true return _node, nil } func (pc *PlanCreate) createSpec() (*Plan, *sqlgraph.CreateSpec) { var ( _node = &Plan{config: pc.config} - _spec = &sqlgraph.CreateSpec{ - Table: plan.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(plan.Table, sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID)) ) if id, ok := pc.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } if value, ok := pc.mutation.StepNumber(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: plan.FieldStepNumber, - }) + _spec.SetField(plan.FieldStepNumber, field.TypeInt, value) _node.StepNumber = value } if value, ok := pc.mutation.GetType(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: plan.FieldType, - }) + _spec.SetField(plan.FieldType, field.TypeEnum, value) _node.Type = value } if value, ok := pc.mutation.BuildID(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: plan.FieldBuildID, - }) + _spec.SetField(plan.FieldBuildID, field.TypeString, value) _node.BuildID = value } if nodes := pc.mutation.PrevPlanIDs(); len(nodes) > 0 { @@ -374,10 +325,7 @@ func (pc *PlanCreate) createSpec() (*Plan, *sqlgraph.CreateSpec) { Columns: plan.PrevPlanPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -393,10 +341,7 @@ func (pc *PlanCreate) createSpec() (*Plan, *sqlgraph.CreateSpec) { Columns: plan.NextPlanPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -412,10 +357,7 @@ func (pc *PlanCreate) createSpec() (*Plan, *sqlgraph.CreateSpec) { Columns: []string{plan.PlanToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -432,10 +374,7 @@ func (pc *PlanCreate) createSpec() (*Plan, *sqlgraph.CreateSpec) { Columns: []string{plan.PlanToTeamColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: team.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(team.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -451,10 +390,7 @@ func (pc *PlanCreate) createSpec() (*Plan, *sqlgraph.CreateSpec) { Columns: []string{plan.PlanToProvisionedNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionednetwork.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -470,10 +406,7 @@ func (pc *PlanCreate) createSpec() (*Plan, *sqlgraph.CreateSpec) { Columns: []string{plan.PlanToProvisionedHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionedhost.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionedhost.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -489,10 +422,7 @@ func (pc *PlanCreate) createSpec() (*Plan, *sqlgraph.CreateSpec) { Columns: []string{plan.PlanToProvisioningStepColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisioningstep.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisioningstep.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -508,10 +438,7 @@ func (pc *PlanCreate) createSpec() (*Plan, *sqlgraph.CreateSpec) { Columns: []string{plan.PlanToStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -527,10 +454,7 @@ func (pc *PlanCreate) createSpec() (*Plan, *sqlgraph.CreateSpec) { Columns: []string{plan.PlanToPlanDiffsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plandiff.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plandiff.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -544,11 +468,15 @@ func (pc *PlanCreate) createSpec() (*Plan, *sqlgraph.CreateSpec) { // PlanCreateBulk is the builder for creating many Plan entities in bulk. type PlanCreateBulk struct { config + err error builders []*PlanCreate } // Save creates the Plan entities in the database. func (pcb *PlanCreateBulk) Save(ctx context.Context) ([]*Plan, error) { + if pcb.err != nil { + return nil, pcb.err + } specs := make([]*sqlgraph.CreateSpec, len(pcb.builders)) nodes := make([]*Plan, len(pcb.builders)) mutators := make([]Mutator, len(pcb.builders)) @@ -565,8 +493,8 @@ func (pcb *PlanCreateBulk) Save(ctx context.Context) ([]*Plan, error) { return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, pcb.builders[i+1].mutation) } else { @@ -574,7 +502,7 @@ func (pcb *PlanCreateBulk) Save(ctx context.Context) ([]*Plan, error) { // Invoke the actual operation on the latest mutation in the chain. if err = sqlgraph.BatchCreate(ctx, pcb.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } } } diff --git a/ent/plan_delete.go b/ent/plan_delete.go index c35c9ca9..ddb79177 100755 --- a/ent/plan_delete.go +++ b/ent/plan_delete.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (pd *PlanDelete) Where(ps ...predicate.Plan) *PlanDelete { // Exec executes the deletion query and returns how many vertices were deleted. func (pd *PlanDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(pd.hooks) == 0 { - affected, err = pd.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*PlanMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - pd.mutation = mutation - affected, err = pd.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(pd.hooks) - 1; i >= 0; i-- { - if pd.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = pd.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, pd.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, pd.sqlExec, pd.mutation, pd.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (pd *PlanDelete) ExecX(ctx context.Context) int { } func (pd *PlanDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: plan.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(plan.Table, sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID)) if ps := pd.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -84,7 +48,12 @@ func (pd *PlanDelete) sqlExec(ctx context.Context) (int, error) { } } } - return sqlgraph.DeleteNodes(ctx, pd.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, pd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + pd.mutation.done = true + return affected, err } // PlanDeleteOne is the builder for deleting a single Plan entity. @@ -92,6 +61,12 @@ type PlanDeleteOne struct { pd *PlanDelete } +// Where appends a list predicates to the PlanDelete builder. +func (pdo *PlanDeleteOne) Where(ps ...predicate.Plan) *PlanDeleteOne { + pdo.pd.mutation.Where(ps...) + return pdo +} + // Exec executes the deletion query. func (pdo *PlanDeleteOne) Exec(ctx context.Context) error { n, err := pdo.pd.Exec(ctx) @@ -107,5 +82,7 @@ func (pdo *PlanDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (pdo *PlanDeleteOne) ExecX(ctx context.Context) { - pdo.pd.ExecX(ctx) + if err := pdo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/ent/plan_query.go b/ent/plan_query.go index af3792a7..52f4ce48 100755 --- a/ent/plan_query.go +++ b/ent/plan_query.go @@ -1,11 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" "database/sql/driver" - "errors" "fmt" "math" @@ -27,13 +26,10 @@ import ( // PlanQuery is the builder for querying Plan entities. type PlanQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string - predicates []predicate.Plan - // eager-loading edges. + ctx *QueryContext + order []plan.OrderOption + inters []Interceptor + predicates []predicate.Plan withPrevPlan *PlanQuery withNextPlan *PlanQuery withPlanToBuild *BuildQuery @@ -44,6 +40,11 @@ type PlanQuery struct { withPlanToStatus *StatusQuery withPlanToPlanDiffs *PlanDiffQuery withFKs bool + modifiers []func(*sql.Selector) + loadTotal []func(context.Context, []*Plan) error + withNamedPrevPlan map[string]*PlanQuery + withNamedNextPlan map[string]*PlanQuery + withNamedPlanToPlanDiffs map[string]*PlanDiffQuery // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -55,34 +56,34 @@ func (pq *PlanQuery) Where(ps ...predicate.Plan) *PlanQuery { return pq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (pq *PlanQuery) Limit(limit int) *PlanQuery { - pq.limit = &limit + pq.ctx.Limit = &limit return pq } -// Offset adds an offset step to the query. +// Offset to start from. func (pq *PlanQuery) Offset(offset int) *PlanQuery { - pq.offset = &offset + pq.ctx.Offset = &offset return pq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (pq *PlanQuery) Unique(unique bool) *PlanQuery { - pq.unique = &unique + pq.ctx.Unique = &unique return pq } -// Order adds an order step to the query. -func (pq *PlanQuery) Order(o ...OrderFunc) *PlanQuery { +// Order specifies how the records should be ordered. +func (pq *PlanQuery) Order(o ...plan.OrderOption) *PlanQuery { pq.order = append(pq.order, o...) return pq } // QueryPrevPlan chains the current query on the "PrevPlan" edge. func (pq *PlanQuery) QueryPrevPlan() *PlanQuery { - query := &PlanQuery{config: pq.config} + query := (&PlanClient{config: pq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := pq.prepareQuery(ctx); err != nil { return nil, err @@ -104,7 +105,7 @@ func (pq *PlanQuery) QueryPrevPlan() *PlanQuery { // QueryNextPlan chains the current query on the "NextPlan" edge. func (pq *PlanQuery) QueryNextPlan() *PlanQuery { - query := &PlanQuery{config: pq.config} + query := (&PlanClient{config: pq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := pq.prepareQuery(ctx); err != nil { return nil, err @@ -126,7 +127,7 @@ func (pq *PlanQuery) QueryNextPlan() *PlanQuery { // QueryPlanToBuild chains the current query on the "PlanToBuild" edge. func (pq *PlanQuery) QueryPlanToBuild() *BuildQuery { - query := &BuildQuery{config: pq.config} + query := (&BuildClient{config: pq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := pq.prepareQuery(ctx); err != nil { return nil, err @@ -148,7 +149,7 @@ func (pq *PlanQuery) QueryPlanToBuild() *BuildQuery { // QueryPlanToTeam chains the current query on the "PlanToTeam" edge. func (pq *PlanQuery) QueryPlanToTeam() *TeamQuery { - query := &TeamQuery{config: pq.config} + query := (&TeamClient{config: pq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := pq.prepareQuery(ctx); err != nil { return nil, err @@ -170,7 +171,7 @@ func (pq *PlanQuery) QueryPlanToTeam() *TeamQuery { // QueryPlanToProvisionedNetwork chains the current query on the "PlanToProvisionedNetwork" edge. func (pq *PlanQuery) QueryPlanToProvisionedNetwork() *ProvisionedNetworkQuery { - query := &ProvisionedNetworkQuery{config: pq.config} + query := (&ProvisionedNetworkClient{config: pq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := pq.prepareQuery(ctx); err != nil { return nil, err @@ -192,7 +193,7 @@ func (pq *PlanQuery) QueryPlanToProvisionedNetwork() *ProvisionedNetworkQuery { // QueryPlanToProvisionedHost chains the current query on the "PlanToProvisionedHost" edge. func (pq *PlanQuery) QueryPlanToProvisionedHost() *ProvisionedHostQuery { - query := &ProvisionedHostQuery{config: pq.config} + query := (&ProvisionedHostClient{config: pq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := pq.prepareQuery(ctx); err != nil { return nil, err @@ -214,7 +215,7 @@ func (pq *PlanQuery) QueryPlanToProvisionedHost() *ProvisionedHostQuery { // QueryPlanToProvisioningStep chains the current query on the "PlanToProvisioningStep" edge. func (pq *PlanQuery) QueryPlanToProvisioningStep() *ProvisioningStepQuery { - query := &ProvisioningStepQuery{config: pq.config} + query := (&ProvisioningStepClient{config: pq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := pq.prepareQuery(ctx); err != nil { return nil, err @@ -236,7 +237,7 @@ func (pq *PlanQuery) QueryPlanToProvisioningStep() *ProvisioningStepQuery { // QueryPlanToStatus chains the current query on the "PlanToStatus" edge. func (pq *PlanQuery) QueryPlanToStatus() *StatusQuery { - query := &StatusQuery{config: pq.config} + query := (&StatusClient{config: pq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := pq.prepareQuery(ctx); err != nil { return nil, err @@ -258,7 +259,7 @@ func (pq *PlanQuery) QueryPlanToStatus() *StatusQuery { // QueryPlanToPlanDiffs chains the current query on the "PlanToPlanDiffs" edge. func (pq *PlanQuery) QueryPlanToPlanDiffs() *PlanDiffQuery { - query := &PlanDiffQuery{config: pq.config} + query := (&PlanDiffClient{config: pq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := pq.prepareQuery(ctx); err != nil { return nil, err @@ -281,7 +282,7 @@ func (pq *PlanQuery) QueryPlanToPlanDiffs() *PlanDiffQuery { // First returns the first Plan entity from the query. // Returns a *NotFoundError when no Plan was found. func (pq *PlanQuery) First(ctx context.Context) (*Plan, error) { - nodes, err := pq.Limit(1).All(ctx) + nodes, err := pq.Limit(1).All(setContextOp(ctx, pq.ctx, "First")) if err != nil { return nil, err } @@ -304,7 +305,7 @@ func (pq *PlanQuery) FirstX(ctx context.Context) *Plan { // Returns a *NotFoundError when no Plan ID was found. func (pq *PlanQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = pq.Limit(1).IDs(ctx); err != nil { + if ids, err = pq.Limit(1).IDs(setContextOp(ctx, pq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -327,7 +328,7 @@ func (pq *PlanQuery) FirstIDX(ctx context.Context) uuid.UUID { // Returns a *NotSingularError when more than one Plan entity is found. // Returns a *NotFoundError when no Plan entities are found. func (pq *PlanQuery) Only(ctx context.Context) (*Plan, error) { - nodes, err := pq.Limit(2).All(ctx) + nodes, err := pq.Limit(2).All(setContextOp(ctx, pq.ctx, "Only")) if err != nil { return nil, err } @@ -355,7 +356,7 @@ func (pq *PlanQuery) OnlyX(ctx context.Context) *Plan { // Returns a *NotFoundError when no entities are found. func (pq *PlanQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = pq.Limit(2).IDs(ctx); err != nil { + if ids, err = pq.Limit(2).IDs(setContextOp(ctx, pq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -380,10 +381,12 @@ func (pq *PlanQuery) OnlyIDX(ctx context.Context) uuid.UUID { // All executes the query and returns a list of Plans. func (pq *PlanQuery) All(ctx context.Context) ([]*Plan, error) { + ctx = setContextOp(ctx, pq.ctx, "All") if err := pq.prepareQuery(ctx); err != nil { return nil, err } - return pq.sqlAll(ctx) + qr := querierAll[[]*Plan, *PlanQuery]() + return withInterceptors[[]*Plan](ctx, pq, qr, pq.inters) } // AllX is like All, but panics if an error occurs. @@ -396,9 +399,12 @@ func (pq *PlanQuery) AllX(ctx context.Context) []*Plan { } // IDs executes the query and returns a list of Plan IDs. -func (pq *PlanQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { - var ids []uuid.UUID - if err := pq.Select(plan.FieldID).Scan(ctx, &ids); err != nil { +func (pq *PlanQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if pq.ctx.Unique == nil && pq.path != nil { + pq.Unique(true) + } + ctx = setContextOp(ctx, pq.ctx, "IDs") + if err = pq.Select(plan.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -415,10 +421,11 @@ func (pq *PlanQuery) IDsX(ctx context.Context) []uuid.UUID { // Count returns the count of the given query. func (pq *PlanQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, pq.ctx, "Count") if err := pq.prepareQuery(ctx); err != nil { return 0, err } - return pq.sqlCount(ctx) + return withInterceptors[int](ctx, pq, querierCount[*PlanQuery](), pq.inters) } // CountX is like Count, but panics if an error occurs. @@ -432,10 +439,15 @@ func (pq *PlanQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (pq *PlanQuery) Exist(ctx context.Context) (bool, error) { - if err := pq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, pq.ctx, "Exist") + switch _, err := pq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return pq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -455,9 +467,9 @@ func (pq *PlanQuery) Clone() *PlanQuery { } return &PlanQuery{ config: pq.config, - limit: pq.limit, - offset: pq.offset, - order: append([]OrderFunc{}, pq.order...), + ctx: pq.ctx.Clone(), + order: append([]plan.OrderOption{}, pq.order...), + inters: append([]Interceptor{}, pq.inters...), predicates: append([]predicate.Plan{}, pq.predicates...), withPrevPlan: pq.withPrevPlan.Clone(), withNextPlan: pq.withNextPlan.Clone(), @@ -469,16 +481,15 @@ func (pq *PlanQuery) Clone() *PlanQuery { withPlanToStatus: pq.withPlanToStatus.Clone(), withPlanToPlanDiffs: pq.withPlanToPlanDiffs.Clone(), // clone intermediate query. - sql: pq.sql.Clone(), - path: pq.path, - unique: pq.unique, + sql: pq.sql.Clone(), + path: pq.path, } } // WithPrevPlan tells the query-builder to eager-load the nodes that are connected to // the "PrevPlan" edge. The optional arguments are used to configure the query builder of the edge. func (pq *PlanQuery) WithPrevPlan(opts ...func(*PlanQuery)) *PlanQuery { - query := &PlanQuery{config: pq.config} + query := (&PlanClient{config: pq.config}).Query() for _, opt := range opts { opt(query) } @@ -489,7 +500,7 @@ func (pq *PlanQuery) WithPrevPlan(opts ...func(*PlanQuery)) *PlanQuery { // WithNextPlan tells the query-builder to eager-load the nodes that are connected to // the "NextPlan" edge. The optional arguments are used to configure the query builder of the edge. func (pq *PlanQuery) WithNextPlan(opts ...func(*PlanQuery)) *PlanQuery { - query := &PlanQuery{config: pq.config} + query := (&PlanClient{config: pq.config}).Query() for _, opt := range opts { opt(query) } @@ -500,7 +511,7 @@ func (pq *PlanQuery) WithNextPlan(opts ...func(*PlanQuery)) *PlanQuery { // WithPlanToBuild tells the query-builder to eager-load the nodes that are connected to // the "PlanToBuild" edge. The optional arguments are used to configure the query builder of the edge. func (pq *PlanQuery) WithPlanToBuild(opts ...func(*BuildQuery)) *PlanQuery { - query := &BuildQuery{config: pq.config} + query := (&BuildClient{config: pq.config}).Query() for _, opt := range opts { opt(query) } @@ -511,7 +522,7 @@ func (pq *PlanQuery) WithPlanToBuild(opts ...func(*BuildQuery)) *PlanQuery { // WithPlanToTeam tells the query-builder to eager-load the nodes that are connected to // the "PlanToTeam" edge. The optional arguments are used to configure the query builder of the edge. func (pq *PlanQuery) WithPlanToTeam(opts ...func(*TeamQuery)) *PlanQuery { - query := &TeamQuery{config: pq.config} + query := (&TeamClient{config: pq.config}).Query() for _, opt := range opts { opt(query) } @@ -522,7 +533,7 @@ func (pq *PlanQuery) WithPlanToTeam(opts ...func(*TeamQuery)) *PlanQuery { // WithPlanToProvisionedNetwork tells the query-builder to eager-load the nodes that are connected to // the "PlanToProvisionedNetwork" edge. The optional arguments are used to configure the query builder of the edge. func (pq *PlanQuery) WithPlanToProvisionedNetwork(opts ...func(*ProvisionedNetworkQuery)) *PlanQuery { - query := &ProvisionedNetworkQuery{config: pq.config} + query := (&ProvisionedNetworkClient{config: pq.config}).Query() for _, opt := range opts { opt(query) } @@ -533,7 +544,7 @@ func (pq *PlanQuery) WithPlanToProvisionedNetwork(opts ...func(*ProvisionedNetwo // WithPlanToProvisionedHost tells the query-builder to eager-load the nodes that are connected to // the "PlanToProvisionedHost" edge. The optional arguments are used to configure the query builder of the edge. func (pq *PlanQuery) WithPlanToProvisionedHost(opts ...func(*ProvisionedHostQuery)) *PlanQuery { - query := &ProvisionedHostQuery{config: pq.config} + query := (&ProvisionedHostClient{config: pq.config}).Query() for _, opt := range opts { opt(query) } @@ -544,7 +555,7 @@ func (pq *PlanQuery) WithPlanToProvisionedHost(opts ...func(*ProvisionedHostQuer // WithPlanToProvisioningStep tells the query-builder to eager-load the nodes that are connected to // the "PlanToProvisioningStep" edge. The optional arguments are used to configure the query builder of the edge. func (pq *PlanQuery) WithPlanToProvisioningStep(opts ...func(*ProvisioningStepQuery)) *PlanQuery { - query := &ProvisioningStepQuery{config: pq.config} + query := (&ProvisioningStepClient{config: pq.config}).Query() for _, opt := range opts { opt(query) } @@ -555,7 +566,7 @@ func (pq *PlanQuery) WithPlanToProvisioningStep(opts ...func(*ProvisioningStepQu // WithPlanToStatus tells the query-builder to eager-load the nodes that are connected to // the "PlanToStatus" edge. The optional arguments are used to configure the query builder of the edge. func (pq *PlanQuery) WithPlanToStatus(opts ...func(*StatusQuery)) *PlanQuery { - query := &StatusQuery{config: pq.config} + query := (&StatusClient{config: pq.config}).Query() for _, opt := range opts { opt(query) } @@ -566,7 +577,7 @@ func (pq *PlanQuery) WithPlanToStatus(opts ...func(*StatusQuery)) *PlanQuery { // WithPlanToPlanDiffs tells the query-builder to eager-load the nodes that are connected to // the "PlanToPlanDiffs" edge. The optional arguments are used to configure the query builder of the edge. func (pq *PlanQuery) WithPlanToPlanDiffs(opts ...func(*PlanDiffQuery)) *PlanQuery { - query := &PlanDiffQuery{config: pq.config} + query := (&PlanDiffClient{config: pq.config}).Query() for _, opt := range opts { opt(query) } @@ -588,17 +599,13 @@ func (pq *PlanQuery) WithPlanToPlanDiffs(opts ...func(*PlanDiffQuery)) *PlanQuer // GroupBy(plan.FieldStepNumber). // Aggregate(ent.Count()). // Scan(ctx, &v) -// func (pq *PlanQuery) GroupBy(field string, fields ...string) *PlanGroupBy { - group := &PlanGroupBy{config: pq.config} - group.fields = append([]string{field}, fields...) - group.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := pq.prepareQuery(ctx); err != nil { - return nil, err - } - return pq.sqlQuery(ctx), nil - } - return group + pq.ctx.Fields = append([]string{field}, fields...) + grbuild := &PlanGroupBy{build: pq} + grbuild.flds = &pq.ctx.Fields + grbuild.label = plan.Label + grbuild.scan = grbuild.Scan + return grbuild } // Select allows the selection one or more fields/columns for the given query, @@ -613,14 +620,31 @@ func (pq *PlanQuery) GroupBy(field string, fields ...string) *PlanGroupBy { // client.Plan.Query(). // Select(plan.FieldStepNumber). // Scan(ctx, &v) -// func (pq *PlanQuery) Select(fields ...string) *PlanSelect { - pq.fields = append(pq.fields, fields...) - return &PlanSelect{PlanQuery: pq} + pq.ctx.Fields = append(pq.ctx.Fields, fields...) + sbuild := &PlanSelect{PlanQuery: pq} + sbuild.label = plan.Label + sbuild.flds, sbuild.scan = &pq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a PlanSelect configured with the given aggregations. +func (pq *PlanQuery) Aggregate(fns ...AggregateFunc) *PlanSelect { + return pq.Select().Aggregate(fns...) } func (pq *PlanQuery) prepareQuery(ctx context.Context) error { - for _, f := range pq.fields { + for _, inter := range pq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, pq); err != nil { + return err + } + } + } + for _, f := range pq.ctx.Fields { if !plan.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -635,7 +659,7 @@ func (pq *PlanQuery) prepareQuery(ctx context.Context) error { return nil } -func (pq *PlanQuery) sqlAll(ctx context.Context) ([]*Plan, error) { +func (pq *PlanQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Plan, error) { var ( nodes = []*Plan{} withFKs = pq.withFKs @@ -658,391 +682,460 @@ func (pq *PlanQuery) sqlAll(ctx context.Context) ([]*Plan, error) { if withFKs { _spec.Node.Columns = append(_spec.Node.Columns, plan.ForeignKeys...) } - _spec.ScanValues = func(columns []string) ([]interface{}, error) { + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Plan).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { node := &Plan{config: pq.config} nodes = append(nodes, node) - return node.scanValues(columns) - } - _spec.Assign = func(columns []string, values []interface{}) error { - if len(nodes) == 0 { - return fmt.Errorf("ent: Assign called without calling ScanValues") - } - node := nodes[len(nodes)-1] node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(pq.modifiers) > 0 { + _spec.Modifiers = pq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } if err := sqlgraph.QueryNodes(ctx, pq.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := pq.withPrevPlan; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - ids := make(map[uuid.UUID]*Plan, len(nodes)) - for _, node := range nodes { - ids[node.ID] = node - fks = append(fks, node.ID) - node.Edges.PrevPlan = []*Plan{} + if err := pq.loadPrevPlan(ctx, query, nodes, + func(n *Plan) { n.Edges.PrevPlan = []*Plan{} }, + func(n *Plan, e *Plan) { n.Edges.PrevPlan = append(n.Edges.PrevPlan, e) }); err != nil { + return nil, err } - var ( - edgeids []uuid.UUID - edges = make(map[uuid.UUID][]*Plan) - ) - _spec := &sqlgraph.EdgeQuerySpec{ - Edge: &sqlgraph.EdgeSpec{ - Inverse: true, - Table: plan.PrevPlanTable, - Columns: plan.PrevPlanPrimaryKey, - }, - Predicate: func(s *sql.Selector) { - s.Where(sql.InValues(plan.PrevPlanPrimaryKey[1], fks...)) - }, - ScanValues: func() [2]interface{} { - return [2]interface{}{new(uuid.UUID), new(uuid.UUID)} - }, - Assign: func(out, in interface{}) error { - eout, ok := out.(*uuid.UUID) - if !ok || eout == nil { - return fmt.Errorf("unexpected id value for edge-out") - } - ein, ok := in.(*uuid.UUID) - if !ok || ein == nil { - return fmt.Errorf("unexpected id value for edge-in") - } - outValue := *eout - inValue := *ein - node, ok := ids[outValue] - if !ok { - return fmt.Errorf("unexpected node id in edges: %v", outValue) - } - if _, ok := edges[inValue]; !ok { - edgeids = append(edgeids, inValue) - } - edges[inValue] = append(edges[inValue], node) - return nil - }, + } + if query := pq.withNextPlan; query != nil { + if err := pq.loadNextPlan(ctx, query, nodes, + func(n *Plan) { n.Edges.NextPlan = []*Plan{} }, + func(n *Plan, e *Plan) { n.Edges.NextPlan = append(n.Edges.NextPlan, e) }); err != nil { + return nil, err } - if err := sqlgraph.QueryEdges(ctx, pq.driver, _spec); err != nil { - return nil, fmt.Errorf(`query edges "PrevPlan": %w`, err) + } + if query := pq.withPlanToBuild; query != nil { + if err := pq.loadPlanToBuild(ctx, query, nodes, nil, + func(n *Plan, e *Build) { n.Edges.PlanToBuild = e }); err != nil { + return nil, err } - query.Where(plan.IDIn(edgeids...)) - neighbors, err := query.All(ctx) - if err != nil { + } + if query := pq.withPlanToTeam; query != nil { + if err := pq.loadPlanToTeam(ctx, query, nodes, nil, + func(n *Plan, e *Team) { n.Edges.PlanToTeam = e }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := edges[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected "PrevPlan" node returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.PrevPlan = append(nodes[i].Edges.PrevPlan, n) - } + } + if query := pq.withPlanToProvisionedNetwork; query != nil { + if err := pq.loadPlanToProvisionedNetwork(ctx, query, nodes, nil, + func(n *Plan, e *ProvisionedNetwork) { n.Edges.PlanToProvisionedNetwork = e }); err != nil { + return nil, err } } - - if query := pq.withNextPlan; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - ids := make(map[uuid.UUID]*Plan, len(nodes)) - for _, node := range nodes { - ids[node.ID] = node - fks = append(fks, node.ID) - node.Edges.NextPlan = []*Plan{} + if query := pq.withPlanToProvisionedHost; query != nil { + if err := pq.loadPlanToProvisionedHost(ctx, query, nodes, nil, + func(n *Plan, e *ProvisionedHost) { n.Edges.PlanToProvisionedHost = e }); err != nil { + return nil, err } - var ( - edgeids []uuid.UUID - edges = make(map[uuid.UUID][]*Plan) - ) - _spec := &sqlgraph.EdgeQuerySpec{ - Edge: &sqlgraph.EdgeSpec{ - Inverse: false, - Table: plan.NextPlanTable, - Columns: plan.NextPlanPrimaryKey, - }, - Predicate: func(s *sql.Selector) { - s.Where(sql.InValues(plan.NextPlanPrimaryKey[0], fks...)) - }, - ScanValues: func() [2]interface{} { - return [2]interface{}{new(uuid.UUID), new(uuid.UUID)} - }, - Assign: func(out, in interface{}) error { - eout, ok := out.(*uuid.UUID) - if !ok || eout == nil { - return fmt.Errorf("unexpected id value for edge-out") - } - ein, ok := in.(*uuid.UUID) - if !ok || ein == nil { - return fmt.Errorf("unexpected id value for edge-in") - } - outValue := *eout - inValue := *ein - node, ok := ids[outValue] - if !ok { - return fmt.Errorf("unexpected node id in edges: %v", outValue) - } - if _, ok := edges[inValue]; !ok { - edgeids = append(edgeids, inValue) - } - edges[inValue] = append(edges[inValue], node) - return nil - }, + } + if query := pq.withPlanToProvisioningStep; query != nil { + if err := pq.loadPlanToProvisioningStep(ctx, query, nodes, nil, + func(n *Plan, e *ProvisioningStep) { n.Edges.PlanToProvisioningStep = e }); err != nil { + return nil, err } - if err := sqlgraph.QueryEdges(ctx, pq.driver, _spec); err != nil { - return nil, fmt.Errorf(`query edges "NextPlan": %w`, err) + } + if query := pq.withPlanToStatus; query != nil { + if err := pq.loadPlanToStatus(ctx, query, nodes, nil, + func(n *Plan, e *Status) { n.Edges.PlanToStatus = e }); err != nil { + return nil, err } - query.Where(plan.IDIn(edgeids...)) - neighbors, err := query.All(ctx) - if err != nil { + } + if query := pq.withPlanToPlanDiffs; query != nil { + if err := pq.loadPlanToPlanDiffs(ctx, query, nodes, + func(n *Plan) { n.Edges.PlanToPlanDiffs = []*PlanDiff{} }, + func(n *Plan, e *PlanDiff) { n.Edges.PlanToPlanDiffs = append(n.Edges.PlanToPlanDiffs, e) }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := edges[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected "NextPlan" node returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.NextPlan = append(nodes[i].Edges.NextPlan, n) - } + } + for name, query := range pq.withNamedPrevPlan { + if err := pq.loadPrevPlan(ctx, query, nodes, + func(n *Plan) { n.appendNamedPrevPlan(name) }, + func(n *Plan, e *Plan) { n.appendNamedPrevPlan(name, e) }); err != nil { + return nil, err } } - - if query := pq.withPlanToBuild; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*Plan) - for i := range nodes { - if nodes[i].plan_plan_to_build == nil { - continue - } - fk := *nodes[i].plan_plan_to_build - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + for name, query := range pq.withNamedNextPlan { + if err := pq.loadNextPlan(ctx, query, nodes, + func(n *Plan) { n.appendNamedNextPlan(name) }, + func(n *Plan, e *Plan) { n.appendNamedNextPlan(name, e) }); err != nil { + return nil, err } - query.Where(build.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + } + for name, query := range pq.withNamedPlanToPlanDiffs { + if err := pq.loadPlanToPlanDiffs(ctx, query, nodes, + func(n *Plan) { n.appendNamedPlanToPlanDiffs(name) }, + func(n *Plan, e *PlanDiff) { n.appendNamedPlanToPlanDiffs(name, e) }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "plan_plan_to_build" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.PlanToBuild = n - } + } + for i := range pq.loadTotal { + if err := pq.loadTotal[i](ctx, nodes); err != nil { + return nil, err } } + return nodes, nil +} - if query := pq.withPlanToTeam; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*Plan) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - } - query.withFKs = true - query.Where(predicate.Team(func(s *sql.Selector) { - s.Where(sql.InValues(plan.PlanToTeamColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err +func (pq *PlanQuery) loadPrevPlan(ctx context.Context, query *PlanQuery, nodes []*Plan, init func(*Plan), assign func(*Plan, *Plan)) error { + edgeIDs := make([]driver.Value, len(nodes)) + byID := make(map[uuid.UUID]*Plan) + nids := make(map[uuid.UUID]map[*Plan]struct{}) + for i, node := range nodes { + edgeIDs[i] = node.ID + byID[node.ID] = node + if init != nil { + init(node) } - for _, n := range neighbors { - fk := n.plan_plan_to_team - if fk == nil { - return nil, fmt.Errorf(`foreign-key "plan_plan_to_team" is nil for node %v`, n.ID) + } + query.Where(func(s *sql.Selector) { + joinT := sql.Table(plan.PrevPlanTable) + s.Join(joinT).On(s.C(plan.FieldID), joinT.C(plan.PrevPlanPrimaryKey[0])) + s.Where(sql.InValues(joinT.C(plan.PrevPlanPrimaryKey[1]), edgeIDs...)) + columns := s.SelectedColumns() + s.Select(joinT.C(plan.PrevPlanPrimaryKey[1])) + s.AppendSelect(columns...) + s.SetDistinct(false) + }) + if err := query.prepareQuery(ctx); err != nil { + return err + } + qr := QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + return query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) { + assign := spec.Assign + values := spec.ScanValues + spec.ScanValues = func(columns []string) ([]any, error) { + values, err := values(columns[1:]) + if err != nil { + return nil, err + } + return append([]any{new(uuid.UUID)}, values...), nil } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "plan_plan_to_team" returned %v for node %v`, *fk, n.ID) + spec.Assign = func(columns []string, values []any) error { + outValue := *values[0].(*uuid.UUID) + inValue := *values[1].(*uuid.UUID) + if nids[inValue] == nil { + nids[inValue] = map[*Plan]struct{}{byID[outValue]: {}} + return assign(columns[1:], values[1:]) + } + nids[inValue][byID[outValue]] = struct{}{} + return nil } - node.Edges.PlanToTeam = n - } + }) + }) + neighbors, err := withInterceptors[[]*Plan](ctx, query, qr, query.inters) + if err != nil { + return err } - - if query := pq.withPlanToProvisionedNetwork; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*Plan) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] + for _, n := range neighbors { + nodes, ok := nids[n.ID] + if !ok { + return fmt.Errorf(`unexpected "PrevPlan" node returned %v`, n.ID) } - query.withFKs = true - query.Where(predicate.ProvisionedNetwork(func(s *sql.Selector) { - s.Where(sql.InValues(plan.PlanToProvisionedNetworkColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + for kn := range nodes { + assign(kn, n) } - for _, n := range neighbors { - fk := n.plan_plan_to_provisioned_network - if fk == nil { - return nil, fmt.Errorf(`foreign-key "plan_plan_to_provisioned_network" is nil for node %v`, n.ID) + } + return nil +} +func (pq *PlanQuery) loadNextPlan(ctx context.Context, query *PlanQuery, nodes []*Plan, init func(*Plan), assign func(*Plan, *Plan)) error { + edgeIDs := make([]driver.Value, len(nodes)) + byID := make(map[uuid.UUID]*Plan) + nids := make(map[uuid.UUID]map[*Plan]struct{}) + for i, node := range nodes { + edgeIDs[i] = node.ID + byID[node.ID] = node + if init != nil { + init(node) + } + } + query.Where(func(s *sql.Selector) { + joinT := sql.Table(plan.NextPlanTable) + s.Join(joinT).On(s.C(plan.FieldID), joinT.C(plan.NextPlanPrimaryKey[1])) + s.Where(sql.InValues(joinT.C(plan.NextPlanPrimaryKey[0]), edgeIDs...)) + columns := s.SelectedColumns() + s.Select(joinT.C(plan.NextPlanPrimaryKey[0])) + s.AppendSelect(columns...) + s.SetDistinct(false) + }) + if err := query.prepareQuery(ctx); err != nil { + return err + } + qr := QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + return query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) { + assign := spec.Assign + values := spec.ScanValues + spec.ScanValues = func(columns []string) ([]any, error) { + values, err := values(columns[1:]) + if err != nil { + return nil, err + } + return append([]any{new(uuid.UUID)}, values...), nil } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "plan_plan_to_provisioned_network" returned %v for node %v`, *fk, n.ID) + spec.Assign = func(columns []string, values []any) error { + outValue := *values[0].(*uuid.UUID) + inValue := *values[1].(*uuid.UUID) + if nids[inValue] == nil { + nids[inValue] = map[*Plan]struct{}{byID[outValue]: {}} + return assign(columns[1:], values[1:]) + } + nids[inValue][byID[outValue]] = struct{}{} + return nil } - node.Edges.PlanToProvisionedNetwork = n + }) + }) + neighbors, err := withInterceptors[[]*Plan](ctx, query, qr, query.inters) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nids[n.ID] + if !ok { + return fmt.Errorf(`unexpected "NextPlan" node returned %v`, n.ID) + } + for kn := range nodes { + assign(kn, n) } } - - if query := pq.withPlanToProvisionedHost; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*Plan) + return nil +} +func (pq *PlanQuery) loadPlanToBuild(ctx context.Context, query *BuildQuery, nodes []*Plan, init func(*Plan), assign func(*Plan, *Build)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*Plan) + for i := range nodes { + if nodes[i].plan_plan_to_build == nil { + continue + } + fk := *nodes[i].plan_plan_to_build + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(build.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "plan_plan_to_build" returned %v`, n.ID) + } for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] + assign(nodes[i], n) } - query.withFKs = true - query.Where(predicate.ProvisionedHost(func(s *sql.Selector) { - s.Where(sql.InValues(plan.PlanToProvisionedHostColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + } + return nil +} +func (pq *PlanQuery) loadPlanToTeam(ctx context.Context, query *TeamQuery, nodes []*Plan, init func(*Plan), assign func(*Plan, *Team)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Plan) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + } + query.withFKs = true + query.Where(predicate.Team(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(plan.PlanToTeamColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.plan_plan_to_team + if fk == nil { + return fmt.Errorf(`foreign-key "plan_plan_to_team" is nil for node %v`, n.ID) } - for _, n := range neighbors { - fk := n.plan_plan_to_provisioned_host - if fk == nil { - return nil, fmt.Errorf(`foreign-key "plan_plan_to_provisioned_host" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "plan_plan_to_provisioned_host" returned %v for node %v`, *fk, n.ID) - } - node.Edges.PlanToProvisionedHost = n + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "plan_plan_to_team" returned %v for node %v`, *fk, n.ID) } + assign(node, n) } - - if query := pq.withPlanToProvisioningStep; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*Plan) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] + return nil +} +func (pq *PlanQuery) loadPlanToProvisionedNetwork(ctx context.Context, query *ProvisionedNetworkQuery, nodes []*Plan, init func(*Plan), assign func(*Plan, *ProvisionedNetwork)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Plan) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + } + query.withFKs = true + query.Where(predicate.ProvisionedNetwork(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(plan.PlanToProvisionedNetworkColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.plan_plan_to_provisioned_network + if fk == nil { + return fmt.Errorf(`foreign-key "plan_plan_to_provisioned_network" is nil for node %v`, n.ID) } - query.withFKs = true - query.Where(predicate.ProvisioningStep(func(s *sql.Selector) { - s.Where(sql.InValues(plan.PlanToProvisioningStepColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "plan_plan_to_provisioned_network" returned %v for node %v`, *fk, n.ID) } - for _, n := range neighbors { - fk := n.plan_plan_to_provisioning_step - if fk == nil { - return nil, fmt.Errorf(`foreign-key "plan_plan_to_provisioning_step" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "plan_plan_to_provisioning_step" returned %v for node %v`, *fk, n.ID) - } - node.Edges.PlanToProvisioningStep = n + assign(node, n) + } + return nil +} +func (pq *PlanQuery) loadPlanToProvisionedHost(ctx context.Context, query *ProvisionedHostQuery, nodes []*Plan, init func(*Plan), assign func(*Plan, *ProvisionedHost)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Plan) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + } + query.withFKs = true + query.Where(predicate.ProvisionedHost(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(plan.PlanToProvisionedHostColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.plan_plan_to_provisioned_host + if fk == nil { + return fmt.Errorf(`foreign-key "plan_plan_to_provisioned_host" is nil for node %v`, n.ID) } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "plan_plan_to_provisioned_host" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) } - - if query := pq.withPlanToStatus; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*Plan) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] + return nil +} +func (pq *PlanQuery) loadPlanToProvisioningStep(ctx context.Context, query *ProvisioningStepQuery, nodes []*Plan, init func(*Plan), assign func(*Plan, *ProvisioningStep)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Plan) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + } + query.withFKs = true + query.Where(predicate.ProvisioningStep(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(plan.PlanToProvisioningStepColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.plan_plan_to_provisioning_step + if fk == nil { + return fmt.Errorf(`foreign-key "plan_plan_to_provisioning_step" is nil for node %v`, n.ID) } - query.withFKs = true - query.Where(predicate.Status(func(s *sql.Selector) { - s.Where(sql.InValues(plan.PlanToStatusColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "plan_plan_to_provisioning_step" returned %v for node %v`, *fk, n.ID) } - for _, n := range neighbors { - fk := n.plan_plan_to_status - if fk == nil { - return nil, fmt.Errorf(`foreign-key "plan_plan_to_status" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "plan_plan_to_status" returned %v for node %v`, *fk, n.ID) - } - node.Edges.PlanToStatus = n + assign(node, n) + } + return nil +} +func (pq *PlanQuery) loadPlanToStatus(ctx context.Context, query *StatusQuery, nodes []*Plan, init func(*Plan), assign func(*Plan, *Status)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Plan) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + } + query.withFKs = true + query.Where(predicate.Status(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(plan.PlanToStatusColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.plan_plan_to_status + if fk == nil { + return fmt.Errorf(`foreign-key "plan_plan_to_status" is nil for node %v`, n.ID) } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "plan_plan_to_status" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) } - - if query := pq.withPlanToPlanDiffs; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*Plan) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.PlanToPlanDiffs = []*PlanDiff{} + return nil +} +func (pq *PlanQuery) loadPlanToPlanDiffs(ctx context.Context, query *PlanDiffQuery, nodes []*Plan, init func(*Plan), assign func(*Plan, *PlanDiff)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Plan) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) } - query.withFKs = true - query.Where(predicate.PlanDiff(func(s *sql.Selector) { - s.Where(sql.InValues(plan.PlanToPlanDiffsColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + } + query.withFKs = true + query.Where(predicate.PlanDiff(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(plan.PlanToPlanDiffsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.plan_diff_plan_diff_to_plan + if fk == nil { + return fmt.Errorf(`foreign-key "plan_diff_plan_diff_to_plan" is nil for node %v`, n.ID) } - for _, n := range neighbors { - fk := n.plan_diff_plan_diff_to_plan - if fk == nil { - return nil, fmt.Errorf(`foreign-key "plan_diff_plan_diff_to_plan" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "plan_diff_plan_diff_to_plan" returned %v for node %v`, *fk, n.ID) - } - node.Edges.PlanToPlanDiffs = append(node.Edges.PlanToPlanDiffs, n) + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "plan_diff_plan_diff_to_plan" returned %v for node %v`, *fk, n.ID) } + assign(node, n) } - - return nodes, nil + return nil } func (pq *PlanQuery) sqlCount(ctx context.Context) (int, error) { _spec := pq.querySpec() - _spec.Node.Columns = pq.fields - if len(pq.fields) > 0 { - _spec.Unique = pq.unique != nil && *pq.unique + if len(pq.modifiers) > 0 { + _spec.Modifiers = pq.modifiers } - return sqlgraph.CountNodes(ctx, pq.driver, _spec) -} - -func (pq *PlanQuery) sqlExist(ctx context.Context) (bool, error) { - n, err := pq.sqlCount(ctx) - if err != nil { - return false, fmt.Errorf("ent: check existence: %w", err) + _spec.Node.Columns = pq.ctx.Fields + if len(pq.ctx.Fields) > 0 { + _spec.Unique = pq.ctx.Unique != nil && *pq.ctx.Unique } - return n > 0, nil + return sqlgraph.CountNodes(ctx, pq.driver, _spec) } func (pq *PlanQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: plan.Table, - Columns: plan.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, - }, - From: pq.sql, - Unique: true, - } - if unique := pq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(plan.Table, plan.Columns, sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID)) + _spec.From = pq.sql + if unique := pq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if pq.path != nil { + _spec.Unique = true } - if fields := pq.fields; len(fields) > 0 { + if fields := pq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, plan.FieldID) for i := range fields { @@ -1058,10 +1151,10 @@ func (pq *PlanQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := pq.limit; limit != nil { + if limit := pq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := pq.offset; offset != nil { + if offset := pq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := pq.order; len(ps) > 0 { @@ -1077,7 +1170,7 @@ func (pq *PlanQuery) querySpec() *sqlgraph.QuerySpec { func (pq *PlanQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(pq.driver.Dialect()) t1 := builder.Table(plan.Table) - columns := pq.fields + columns := pq.ctx.Fields if len(columns) == 0 { columns = plan.Columns } @@ -1086,7 +1179,7 @@ func (pq *PlanQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = pq.sql selector.Select(selector.Columns(columns...)...) } - if pq.unique != nil && *pq.unique { + if pq.ctx.Unique != nil && *pq.ctx.Unique { selector.Distinct() } for _, p := range pq.predicates { @@ -1095,498 +1188,142 @@ func (pq *PlanQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range pq.order { p(selector) } - if offset := pq.offset; offset != nil { + if offset := pq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := pq.limit; limit != nil { + if limit := pq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector } -// PlanGroupBy is the group-by builder for Plan entities. -type PlanGroupBy struct { - config - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) -} - -// Aggregate adds the given aggregation functions to the group-by query. -func (pgb *PlanGroupBy) Aggregate(fns ...AggregateFunc) *PlanGroupBy { - pgb.fns = append(pgb.fns, fns...) - return pgb -} - -// Scan applies the group-by query and scans the result into the given value. -func (pgb *PlanGroupBy) Scan(ctx context.Context, v interface{}) error { - query, err := pgb.path(ctx) - if err != nil { - return err - } - pgb.sql = query - return pgb.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (pgb *PlanGroupBy) ScanX(ctx context.Context, v interface{}) { - if err := pgb.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from group-by. -// It is only allowed when executing a group-by query with one field. -func (pgb *PlanGroupBy) Strings(ctx context.Context) ([]string, error) { - if len(pgb.fields) > 1 { - return nil, errors.New("ent: PlanGroupBy.Strings is not achievable when grouping more than 1 field") - } - var v []string - if err := pgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (pgb *PlanGroupBy) StringsX(ctx context.Context) []string { - v, err := pgb.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (pgb *PlanGroupBy) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = pgb.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{plan.Label} - default: - err = fmt.Errorf("ent: PlanGroupBy.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (pgb *PlanGroupBy) StringX(ctx context.Context) string { - v, err := pgb.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from group-by. -// It is only allowed when executing a group-by query with one field. -func (pgb *PlanGroupBy) Ints(ctx context.Context) ([]int, error) { - if len(pgb.fields) > 1 { - return nil, errors.New("ent: PlanGroupBy.Ints is not achievable when grouping more than 1 field") - } - var v []int - if err := pgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (pgb *PlanGroupBy) IntsX(ctx context.Context) []int { - v, err := pgb.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (pgb *PlanGroupBy) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = pgb.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{plan.Label} - default: - err = fmt.Errorf("ent: PlanGroupBy.Ints returned %d results when one was expected", len(v)) +// WithNamedPrevPlan tells the query-builder to eager-load the nodes that are connected to the "PrevPlan" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (pq *PlanQuery) WithNamedPrevPlan(name string, opts ...func(*PlanQuery)) *PlanQuery { + query := (&PlanClient{config: pq.config}).Query() + for _, opt := range opts { + opt(query) } - return -} - -// IntX is like Int, but panics if an error occurs. -func (pgb *PlanGroupBy) IntX(ctx context.Context) int { - v, err := pgb.Int(ctx) - if err != nil { - panic(err) + if pq.withNamedPrevPlan == nil { + pq.withNamedPrevPlan = make(map[string]*PlanQuery) } - return v + pq.withNamedPrevPlan[name] = query + return pq } -// Float64s returns list of float64s from group-by. -// It is only allowed when executing a group-by query with one field. -func (pgb *PlanGroupBy) Float64s(ctx context.Context) ([]float64, error) { - if len(pgb.fields) > 1 { - return nil, errors.New("ent: PlanGroupBy.Float64s is not achievable when grouping more than 1 field") - } - var v []float64 - if err := pgb.Scan(ctx, &v); err != nil { - return nil, err +// WithNamedNextPlan tells the query-builder to eager-load the nodes that are connected to the "NextPlan" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (pq *PlanQuery) WithNamedNextPlan(name string, opts ...func(*PlanQuery)) *PlanQuery { + query := (&PlanClient{config: pq.config}).Query() + for _, opt := range opts { + opt(query) } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (pgb *PlanGroupBy) Float64sX(ctx context.Context) []float64 { - v, err := pgb.Float64s(ctx) - if err != nil { - panic(err) + if pq.withNamedNextPlan == nil { + pq.withNamedNextPlan = make(map[string]*PlanQuery) } - return v + pq.withNamedNextPlan[name] = query + return pq } -// Float64 returns a single float64 from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (pgb *PlanGroupBy) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = pgb.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{plan.Label} - default: - err = fmt.Errorf("ent: PlanGroupBy.Float64s returned %d results when one was expected", len(v)) +// WithNamedPlanToPlanDiffs tells the query-builder to eager-load the nodes that are connected to the "PlanToPlanDiffs" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (pq *PlanQuery) WithNamedPlanToPlanDiffs(name string, opts ...func(*PlanDiffQuery)) *PlanQuery { + query := (&PlanDiffClient{config: pq.config}).Query() + for _, opt := range opts { + opt(query) } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (pgb *PlanGroupBy) Float64X(ctx context.Context) float64 { - v, err := pgb.Float64(ctx) - if err != nil { - panic(err) + if pq.withNamedPlanToPlanDiffs == nil { + pq.withNamedPlanToPlanDiffs = make(map[string]*PlanDiffQuery) } - return v + pq.withNamedPlanToPlanDiffs[name] = query + return pq } -// Bools returns list of bools from group-by. -// It is only allowed when executing a group-by query with one field. -func (pgb *PlanGroupBy) Bools(ctx context.Context) ([]bool, error) { - if len(pgb.fields) > 1 { - return nil, errors.New("ent: PlanGroupBy.Bools is not achievable when grouping more than 1 field") - } - var v []bool - if err := pgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil +// PlanGroupBy is the group-by builder for Plan entities. +type PlanGroupBy struct { + selector + build *PlanQuery } -// BoolsX is like Bools, but panics if an error occurs. -func (pgb *PlanGroupBy) BoolsX(ctx context.Context) []bool { - v, err := pgb.Bools(ctx) - if err != nil { - panic(err) - } - return v +// Aggregate adds the given aggregation functions to the group-by query. +func (pgb *PlanGroupBy) Aggregate(fns ...AggregateFunc) *PlanGroupBy { + pgb.fns = append(pgb.fns, fns...) + return pgb } -// Bool returns a single bool from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (pgb *PlanGroupBy) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = pgb.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{plan.Label} - default: - err = fmt.Errorf("ent: PlanGroupBy.Bools returned %d results when one was expected", len(v)) +// Scan applies the selector query and scans the result into the given value. +func (pgb *PlanGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, pgb.build.ctx, "GroupBy") + if err := pgb.build.prepareQuery(ctx); err != nil { + return err } - return + return scanWithInterceptors[*PlanQuery, *PlanGroupBy](ctx, pgb.build, pgb, pgb.build.inters, v) } -// BoolX is like Bool, but panics if an error occurs. -func (pgb *PlanGroupBy) BoolX(ctx context.Context) bool { - v, err := pgb.Bool(ctx) - if err != nil { - panic(err) +func (pgb *PlanGroupBy) sqlScan(ctx context.Context, root *PlanQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(pgb.fns)) + for _, fn := range pgb.fns { + aggregation = append(aggregation, fn(selector)) } - return v -} - -func (pgb *PlanGroupBy) sqlScan(ctx context.Context, v interface{}) error { - for _, f := range pgb.fields { - if !plan.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*pgb.flds)+len(pgb.fns)) + for _, f := range *pgb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := pgb.sqlQuery() + selector.GroupBy(selector.Columns(*pgb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := pgb.driver.Query(ctx, query, args, rows); err != nil { + if err := pgb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (pgb *PlanGroupBy) sqlQuery() *sql.Selector { - selector := pgb.sql.Select() - aggregation := make([]string, 0, len(pgb.fns)) - for _, fn := range pgb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(pgb.fields)+len(pgb.fns)) - for _, f := range pgb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(pgb.fields...)...) -} - // PlanSelect is the builder for selecting fields of Plan entities. type PlanSelect struct { *PlanQuery - // intermediate query (i.e. traversal path). - sql *sql.Selector + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (ps *PlanSelect) Aggregate(fns ...AggregateFunc) *PlanSelect { + ps.fns = append(ps.fns, fns...) + return ps } // Scan applies the selector query and scans the result into the given value. -func (ps *PlanSelect) Scan(ctx context.Context, v interface{}) error { +func (ps *PlanSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ps.ctx, "Select") if err := ps.prepareQuery(ctx); err != nil { return err } - ps.sql = ps.PlanQuery.sqlQuery(ctx) - return ps.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (ps *PlanSelect) ScanX(ctx context.Context, v interface{}) { - if err := ps.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from a selector. It is only allowed when selecting one field. -func (ps *PlanSelect) Strings(ctx context.Context) ([]string, error) { - if len(ps.fields) > 1 { - return nil, errors.New("ent: PlanSelect.Strings is not achievable when selecting more than 1 field") - } - var v []string - if err := ps.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (ps *PlanSelect) StringsX(ctx context.Context) []string { - v, err := ps.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a selector. It is only allowed when selecting one field. -func (ps *PlanSelect) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = ps.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{plan.Label} - default: - err = fmt.Errorf("ent: PlanSelect.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (ps *PlanSelect) StringX(ctx context.Context) string { - v, err := ps.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from a selector. It is only allowed when selecting one field. -func (ps *PlanSelect) Ints(ctx context.Context) ([]int, error) { - if len(ps.fields) > 1 { - return nil, errors.New("ent: PlanSelect.Ints is not achievable when selecting more than 1 field") - } - var v []int - if err := ps.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (ps *PlanSelect) IntsX(ctx context.Context) []int { - v, err := ps.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a selector. It is only allowed when selecting one field. -func (ps *PlanSelect) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = ps.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{plan.Label} - default: - err = fmt.Errorf("ent: PlanSelect.Ints returned %d results when one was expected", len(v)) - } - return + return scanWithInterceptors[*PlanQuery, *PlanSelect](ctx, ps.PlanQuery, ps, ps.inters, v) } -// IntX is like Int, but panics if an error occurs. -func (ps *PlanSelect) IntX(ctx context.Context) int { - v, err := ps.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. -func (ps *PlanSelect) Float64s(ctx context.Context) ([]float64, error) { - if len(ps.fields) > 1 { - return nil, errors.New("ent: PlanSelect.Float64s is not achievable when selecting more than 1 field") - } - var v []float64 - if err := ps.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (ps *PlanSelect) Float64sX(ctx context.Context) []float64 { - v, err := ps.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. -func (ps *PlanSelect) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = ps.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{plan.Label} - default: - err = fmt.Errorf("ent: PlanSelect.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (ps *PlanSelect) Float64X(ctx context.Context) float64 { - v, err := ps.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from a selector. It is only allowed when selecting one field. -func (ps *PlanSelect) Bools(ctx context.Context) ([]bool, error) { - if len(ps.fields) > 1 { - return nil, errors.New("ent: PlanSelect.Bools is not achievable when selecting more than 1 field") - } - var v []bool - if err := ps.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (ps *PlanSelect) BoolsX(ctx context.Context) []bool { - v, err := ps.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a selector. It is only allowed when selecting one field. -func (ps *PlanSelect) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = ps.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{plan.Label} - default: - err = fmt.Errorf("ent: PlanSelect.Bools returned %d results when one was expected", len(v)) +func (ps *PlanSelect) sqlScan(ctx context.Context, root *PlanQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(ps.fns)) + for _, fn := range ps.fns { + aggregation = append(aggregation, fn(selector)) } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (ps *PlanSelect) BoolX(ctx context.Context) bool { - v, err := ps.Bool(ctx) - if err != nil { - panic(err) + switch n := len(*ps.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) } - return v -} - -func (ps *PlanSelect) sqlScan(ctx context.Context, v interface{}) error { rows := &sql.Rows{} - query, args := ps.sql.Query() + query, args := selector.Query() if err := ps.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/ent/plan_update.go b/ent/plan_update.go index f5a5e587..7de0f002 100755 --- a/ent/plan_update.go +++ b/ent/plan_update.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -42,6 +42,14 @@ func (pu *PlanUpdate) SetStepNumber(i int) *PlanUpdate { return pu } +// SetNillableStepNumber sets the "step_number" field if the given value is not nil. +func (pu *PlanUpdate) SetNillableStepNumber(i *int) *PlanUpdate { + if i != nil { + pu.SetStepNumber(*i) + } + return pu +} + // AddStepNumber adds i to the "step_number" field. func (pu *PlanUpdate) AddStepNumber(i int) *PlanUpdate { pu.mutation.AddStepNumber(i) @@ -54,12 +62,28 @@ func (pu *PlanUpdate) SetType(pl plan.Type) *PlanUpdate { return pu } +// SetNillableType sets the "type" field if the given value is not nil. +func (pu *PlanUpdate) SetNillableType(pl *plan.Type) *PlanUpdate { + if pl != nil { + pu.SetType(*pl) + } + return pu +} + // SetBuildID sets the "build_id" field. func (pu *PlanUpdate) SetBuildID(s string) *PlanUpdate { pu.mutation.SetBuildID(s) return pu } +// SetNillableBuildID sets the "build_id" field if the given value is not nil. +func (pu *PlanUpdate) SetNillableBuildID(s *string) *PlanUpdate { + if s != nil { + pu.SetBuildID(*s) + } + return pu +} + // AddPrevPlanIDs adds the "PrevPlan" edge to the Plan entity by IDs. func (pu *PlanUpdate) AddPrevPlanIDs(ids ...uuid.UUID) *PlanUpdate { pu.mutation.AddPrevPlanIDs(ids...) @@ -317,40 +341,7 @@ func (pu *PlanUpdate) RemovePlanToPlanDiffs(p ...*PlanDiff) *PlanUpdate { // Save executes the query and returns the number of nodes affected by the update operation. func (pu *PlanUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(pu.hooks) == 0 { - if err = pu.check(); err != nil { - return 0, err - } - affected, err = pu.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*PlanMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = pu.check(); err != nil { - return 0, err - } - pu.mutation = mutation - affected, err = pu.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(pu.hooks) - 1; i >= 0; i-- { - if pu.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = pu.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, pu.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, pu.sqlSave, pu.mutation, pu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -389,16 +380,10 @@ func (pu *PlanUpdate) check() error { } func (pu *PlanUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: plan.Table, - Columns: plan.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, - }, + if err := pu.check(); err != nil { + return n, err } + _spec := sqlgraph.NewUpdateSpec(plan.Table, plan.Columns, sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID)) if ps := pu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -407,32 +392,16 @@ func (pu *PlanUpdate) sqlSave(ctx context.Context) (n int, err error) { } } if value, ok := pu.mutation.StepNumber(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: plan.FieldStepNumber, - }) + _spec.SetField(plan.FieldStepNumber, field.TypeInt, value) } if value, ok := pu.mutation.AddedStepNumber(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: plan.FieldStepNumber, - }) + _spec.AddField(plan.FieldStepNumber, field.TypeInt, value) } if value, ok := pu.mutation.GetType(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: plan.FieldType, - }) + _spec.SetField(plan.FieldType, field.TypeEnum, value) } if value, ok := pu.mutation.BuildID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: plan.FieldBuildID, - }) + _spec.SetField(plan.FieldBuildID, field.TypeString, value) } if pu.mutation.PrevPlanCleared() { edge := &sqlgraph.EdgeSpec{ @@ -442,10 +411,7 @@ func (pu *PlanUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: plan.PrevPlanPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -458,10 +424,7 @@ func (pu *PlanUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: plan.PrevPlanPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -477,10 +440,7 @@ func (pu *PlanUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: plan.PrevPlanPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -496,10 +456,7 @@ func (pu *PlanUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: plan.NextPlanPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -512,10 +469,7 @@ func (pu *PlanUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: plan.NextPlanPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -531,10 +485,7 @@ func (pu *PlanUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: plan.NextPlanPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -550,10 +501,7 @@ func (pu *PlanUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{plan.PlanToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -566,10 +514,7 @@ func (pu *PlanUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{plan.PlanToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -585,10 +530,7 @@ func (pu *PlanUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{plan.PlanToTeamColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: team.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(team.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -601,10 +543,7 @@ func (pu *PlanUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{plan.PlanToTeamColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: team.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(team.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -620,10 +559,7 @@ func (pu *PlanUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{plan.PlanToProvisionedNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionednetwork.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -636,10 +572,7 @@ func (pu *PlanUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{plan.PlanToProvisionedNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionednetwork.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -655,10 +588,7 @@ func (pu *PlanUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{plan.PlanToProvisionedHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionedhost.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionedhost.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -671,10 +601,7 @@ func (pu *PlanUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{plan.PlanToProvisionedHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionedhost.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionedhost.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -690,10 +617,7 @@ func (pu *PlanUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{plan.PlanToProvisioningStepColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisioningstep.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisioningstep.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -706,10 +630,7 @@ func (pu *PlanUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{plan.PlanToProvisioningStepColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisioningstep.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisioningstep.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -725,10 +646,7 @@ func (pu *PlanUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{plan.PlanToStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -741,10 +659,7 @@ func (pu *PlanUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{plan.PlanToStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -760,10 +675,7 @@ func (pu *PlanUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{plan.PlanToPlanDiffsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plandiff.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plandiff.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -776,10 +688,7 @@ func (pu *PlanUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{plan.PlanToPlanDiffsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plandiff.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plandiff.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -795,10 +704,7 @@ func (pu *PlanUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{plan.PlanToPlanDiffsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plandiff.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plandiff.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -810,10 +716,11 @@ func (pu *PlanUpdate) sqlSave(ctx context.Context) (n int, err error) { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{plan.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return 0, err } + pu.mutation.done = true return n, nil } @@ -832,6 +739,14 @@ func (puo *PlanUpdateOne) SetStepNumber(i int) *PlanUpdateOne { return puo } +// SetNillableStepNumber sets the "step_number" field if the given value is not nil. +func (puo *PlanUpdateOne) SetNillableStepNumber(i *int) *PlanUpdateOne { + if i != nil { + puo.SetStepNumber(*i) + } + return puo +} + // AddStepNumber adds i to the "step_number" field. func (puo *PlanUpdateOne) AddStepNumber(i int) *PlanUpdateOne { puo.mutation.AddStepNumber(i) @@ -844,12 +759,28 @@ func (puo *PlanUpdateOne) SetType(pl plan.Type) *PlanUpdateOne { return puo } +// SetNillableType sets the "type" field if the given value is not nil. +func (puo *PlanUpdateOne) SetNillableType(pl *plan.Type) *PlanUpdateOne { + if pl != nil { + puo.SetType(*pl) + } + return puo +} + // SetBuildID sets the "build_id" field. func (puo *PlanUpdateOne) SetBuildID(s string) *PlanUpdateOne { puo.mutation.SetBuildID(s) return puo } +// SetNillableBuildID sets the "build_id" field if the given value is not nil. +func (puo *PlanUpdateOne) SetNillableBuildID(s *string) *PlanUpdateOne { + if s != nil { + puo.SetBuildID(*s) + } + return puo +} + // AddPrevPlanIDs adds the "PrevPlan" edge to the Plan entity by IDs. func (puo *PlanUpdateOne) AddPrevPlanIDs(ids ...uuid.UUID) *PlanUpdateOne { puo.mutation.AddPrevPlanIDs(ids...) @@ -1105,6 +1036,12 @@ func (puo *PlanUpdateOne) RemovePlanToPlanDiffs(p ...*PlanDiff) *PlanUpdateOne { return puo.RemovePlanToPlanDiffIDs(ids...) } +// Where appends a list predicates to the PlanUpdate builder. +func (puo *PlanUpdateOne) Where(ps ...predicate.Plan) *PlanUpdateOne { + puo.mutation.Where(ps...) + return puo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (puo *PlanUpdateOne) Select(field string, fields ...string) *PlanUpdateOne { @@ -1114,40 +1051,7 @@ func (puo *PlanUpdateOne) Select(field string, fields ...string) *PlanUpdateOne // Save executes the query and returns the updated Plan entity. func (puo *PlanUpdateOne) Save(ctx context.Context) (*Plan, error) { - var ( - err error - node *Plan - ) - if len(puo.hooks) == 0 { - if err = puo.check(); err != nil { - return nil, err - } - node, err = puo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*PlanMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = puo.check(); err != nil { - return nil, err - } - puo.mutation = mutation - node, err = puo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(puo.hooks) - 1; i >= 0; i-- { - if puo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = puo.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, puo.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, puo.sqlSave, puo.mutation, puo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -1186,16 +1090,10 @@ func (puo *PlanUpdateOne) check() error { } func (puo *PlanUpdateOne) sqlSave(ctx context.Context) (_node *Plan, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: plan.Table, - Columns: plan.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, - }, + if err := puo.check(); err != nil { + return _node, err } + _spec := sqlgraph.NewUpdateSpec(plan.Table, plan.Columns, sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID)) id, ok := puo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Plan.id" for update`)} @@ -1221,32 +1119,16 @@ func (puo *PlanUpdateOne) sqlSave(ctx context.Context) (_node *Plan, err error) } } if value, ok := puo.mutation.StepNumber(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: plan.FieldStepNumber, - }) + _spec.SetField(plan.FieldStepNumber, field.TypeInt, value) } if value, ok := puo.mutation.AddedStepNumber(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: plan.FieldStepNumber, - }) + _spec.AddField(plan.FieldStepNumber, field.TypeInt, value) } if value, ok := puo.mutation.GetType(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: plan.FieldType, - }) + _spec.SetField(plan.FieldType, field.TypeEnum, value) } if value, ok := puo.mutation.BuildID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: plan.FieldBuildID, - }) + _spec.SetField(plan.FieldBuildID, field.TypeString, value) } if puo.mutation.PrevPlanCleared() { edge := &sqlgraph.EdgeSpec{ @@ -1256,10 +1138,7 @@ func (puo *PlanUpdateOne) sqlSave(ctx context.Context) (_node *Plan, err error) Columns: plan.PrevPlanPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1272,10 +1151,7 @@ func (puo *PlanUpdateOne) sqlSave(ctx context.Context) (_node *Plan, err error) Columns: plan.PrevPlanPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1291,10 +1167,7 @@ func (puo *PlanUpdateOne) sqlSave(ctx context.Context) (_node *Plan, err error) Columns: plan.PrevPlanPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1310,10 +1183,7 @@ func (puo *PlanUpdateOne) sqlSave(ctx context.Context) (_node *Plan, err error) Columns: plan.NextPlanPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1326,10 +1196,7 @@ func (puo *PlanUpdateOne) sqlSave(ctx context.Context) (_node *Plan, err error) Columns: plan.NextPlanPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1345,10 +1212,7 @@ func (puo *PlanUpdateOne) sqlSave(ctx context.Context) (_node *Plan, err error) Columns: plan.NextPlanPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1364,10 +1228,7 @@ func (puo *PlanUpdateOne) sqlSave(ctx context.Context) (_node *Plan, err error) Columns: []string{plan.PlanToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1380,10 +1241,7 @@ func (puo *PlanUpdateOne) sqlSave(ctx context.Context) (_node *Plan, err error) Columns: []string{plan.PlanToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1399,10 +1257,7 @@ func (puo *PlanUpdateOne) sqlSave(ctx context.Context) (_node *Plan, err error) Columns: []string{plan.PlanToTeamColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: team.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(team.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1415,10 +1270,7 @@ func (puo *PlanUpdateOne) sqlSave(ctx context.Context) (_node *Plan, err error) Columns: []string{plan.PlanToTeamColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: team.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(team.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1434,10 +1286,7 @@ func (puo *PlanUpdateOne) sqlSave(ctx context.Context) (_node *Plan, err error) Columns: []string{plan.PlanToProvisionedNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionednetwork.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1450,10 +1299,7 @@ func (puo *PlanUpdateOne) sqlSave(ctx context.Context) (_node *Plan, err error) Columns: []string{plan.PlanToProvisionedNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionednetwork.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1469,10 +1315,7 @@ func (puo *PlanUpdateOne) sqlSave(ctx context.Context) (_node *Plan, err error) Columns: []string{plan.PlanToProvisionedHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionedhost.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionedhost.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1485,10 +1328,7 @@ func (puo *PlanUpdateOne) sqlSave(ctx context.Context) (_node *Plan, err error) Columns: []string{plan.PlanToProvisionedHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionedhost.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionedhost.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1504,10 +1344,7 @@ func (puo *PlanUpdateOne) sqlSave(ctx context.Context) (_node *Plan, err error) Columns: []string{plan.PlanToProvisioningStepColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisioningstep.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisioningstep.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1520,10 +1357,7 @@ func (puo *PlanUpdateOne) sqlSave(ctx context.Context) (_node *Plan, err error) Columns: []string{plan.PlanToProvisioningStepColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisioningstep.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisioningstep.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1539,10 +1373,7 @@ func (puo *PlanUpdateOne) sqlSave(ctx context.Context) (_node *Plan, err error) Columns: []string{plan.PlanToStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1555,10 +1386,7 @@ func (puo *PlanUpdateOne) sqlSave(ctx context.Context) (_node *Plan, err error) Columns: []string{plan.PlanToStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1574,10 +1402,7 @@ func (puo *PlanUpdateOne) sqlSave(ctx context.Context) (_node *Plan, err error) Columns: []string{plan.PlanToPlanDiffsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plandiff.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plandiff.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1590,10 +1415,7 @@ func (puo *PlanUpdateOne) sqlSave(ctx context.Context) (_node *Plan, err error) Columns: []string{plan.PlanToPlanDiffsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plandiff.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plandiff.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1609,10 +1431,7 @@ func (puo *PlanUpdateOne) sqlSave(ctx context.Context) (_node *Plan, err error) Columns: []string{plan.PlanToPlanDiffsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plandiff.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plandiff.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1627,9 +1446,10 @@ func (puo *PlanUpdateOne) sqlSave(ctx context.Context) (_node *Plan, err error) if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{plan.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } + puo.mutation.done = true return _node, nil } diff --git a/ent/plandiff.go b/ent/plandiff.go index 23af4db7..15dc955c 100755 --- a/ent/plandiff.go +++ b/ent/plandiff.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -6,6 +6,7 @@ import ( "fmt" "strings" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/gen0cide/laforge/ent/buildcommit" "github.com/gen0cide/laforge/ent/plan" @@ -26,14 +27,16 @@ type PlanDiff struct { // The values are being populated by the PlanDiffQuery when eager-loading is set. Edges PlanDiffEdges `json:"edges"` + // vvvvvvvvvvvv CUSTOM vvvvvvvvvvvv // Edges put into the main struct to be loaded via hcl // PlanDiffToBuildCommit holds the value of the PlanDiffToBuildCommit edge. HCLPlanDiffToBuildCommit *BuildCommit `json:"PlanDiffToBuildCommit,omitempty"` // PlanDiffToPlan holds the value of the PlanDiffToPlan edge. HCLPlanDiffToPlan *Plan `json:"PlanDiffToPlan,omitempty"` - // + // ^^^^^^^^^^^^ CUSTOM ^^^^^^^^^^^^^ plan_diff_plan_diff_to_build_commit *uuid.UUID plan_diff_plan_diff_to_plan *uuid.UUID + selectValues sql.SelectValues } // PlanDiffEdges holds the relations/edges for other nodes in the graph. @@ -45,6 +48,8 @@ type PlanDiffEdges struct { // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. loadedTypes [2]bool + // totalCount holds the count of the edges above. + totalCount [2]map[string]int } // PlanDiffToBuildCommitOrErr returns the PlanDiffToBuildCommit value or an error if the edge @@ -52,8 +57,7 @@ type PlanDiffEdges struct { func (e PlanDiffEdges) PlanDiffToBuildCommitOrErr() (*BuildCommit, error) { if e.loadedTypes[0] { if e.PlanDiffToBuildCommit == nil { - // The edge PlanDiffToBuildCommit was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: buildcommit.Label} } return e.PlanDiffToBuildCommit, nil @@ -66,8 +70,7 @@ func (e PlanDiffEdges) PlanDiffToBuildCommitOrErr() (*BuildCommit, error) { func (e PlanDiffEdges) PlanDiffToPlanOrErr() (*Plan, error) { if e.loadedTypes[1] { if e.PlanDiffToPlan == nil { - // The edge PlanDiffToPlan was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: plan.Label} } return e.PlanDiffToPlan, nil @@ -76,8 +79,8 @@ func (e PlanDiffEdges) PlanDiffToPlanOrErr() (*Plan, error) { } // scanValues returns the types for scanning values from sql.Rows. -func (*PlanDiff) scanValues(columns []string) ([]interface{}, error) { - values := make([]interface{}, len(columns)) +func (*PlanDiff) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) for i := range columns { switch columns[i] { case plandiff.FieldRevision: @@ -91,7 +94,7 @@ func (*PlanDiff) scanValues(columns []string) ([]interface{}, error) { case plandiff.ForeignKeys[1]: // plan_diff_plan_diff_to_plan values[i] = &sql.NullScanner{S: new(uuid.UUID)} default: - return nil, fmt.Errorf("unexpected column %q for type PlanDiff", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -99,7 +102,7 @@ func (*PlanDiff) scanValues(columns []string) ([]interface{}, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the PlanDiff fields. -func (pd *PlanDiff) assignValues(columns []string, values []interface{}) error { +func (pd *PlanDiff) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -137,36 +140,44 @@ func (pd *PlanDiff) assignValues(columns []string, values []interface{}) error { pd.plan_diff_plan_diff_to_plan = new(uuid.UUID) *pd.plan_diff_plan_diff_to_plan = *value.S.(*uuid.UUID) } + default: + pd.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the PlanDiff. +// This includes values selected through modifiers, order, etc. +func (pd *PlanDiff) Value(name string) (ent.Value, error) { + return pd.selectValues.Get(name) +} + // QueryPlanDiffToBuildCommit queries the "PlanDiffToBuildCommit" edge of the PlanDiff entity. func (pd *PlanDiff) QueryPlanDiffToBuildCommit() *BuildCommitQuery { - return (&PlanDiffClient{config: pd.config}).QueryPlanDiffToBuildCommit(pd) + return NewPlanDiffClient(pd.config).QueryPlanDiffToBuildCommit(pd) } // QueryPlanDiffToPlan queries the "PlanDiffToPlan" edge of the PlanDiff entity. func (pd *PlanDiff) QueryPlanDiffToPlan() *PlanQuery { - return (&PlanDiffClient{config: pd.config}).QueryPlanDiffToPlan(pd) + return NewPlanDiffClient(pd.config).QueryPlanDiffToPlan(pd) } // Update returns a builder for updating this PlanDiff. // Note that you need to call PlanDiff.Unwrap() before calling this method if this PlanDiff // was returned from a transaction, and the transaction was committed or rolled back. func (pd *PlanDiff) Update() *PlanDiffUpdateOne { - return (&PlanDiffClient{config: pd.config}).UpdateOne(pd) + return NewPlanDiffClient(pd.config).UpdateOne(pd) } // Unwrap unwraps the PlanDiff entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. func (pd *PlanDiff) Unwrap() *PlanDiff { - tx, ok := pd.config.driver.(*txDriver) + _tx, ok := pd.config.driver.(*txDriver) if !ok { panic("ent: PlanDiff is not a transactional entity") } - pd.config.driver = tx.drv + pd.config.driver = _tx.drv return pd } @@ -174,10 +185,11 @@ func (pd *PlanDiff) Unwrap() *PlanDiff { func (pd *PlanDiff) String() string { var builder strings.Builder builder.WriteString("PlanDiff(") - builder.WriteString(fmt.Sprintf("id=%v", pd.ID)) - builder.WriteString(", revision=") + builder.WriteString(fmt.Sprintf("id=%v, ", pd.ID)) + builder.WriteString("revision=") builder.WriteString(fmt.Sprintf("%v", pd.Revision)) - builder.WriteString(", new_state=") + builder.WriteString(", ") + builder.WriteString("new_state=") builder.WriteString(fmt.Sprintf("%v", pd.NewState)) builder.WriteByte(')') return builder.String() @@ -185,9 +197,3 @@ func (pd *PlanDiff) String() string { // PlanDiffs is a parsable slice of PlanDiff. type PlanDiffs []*PlanDiff - -func (pd PlanDiffs) config(cfg config) { - for _i := range pd { - pd[_i].config = cfg - } -} diff --git a/ent/plandiff/plandiff.go b/ent/plandiff/plandiff.go index 6417fd0f..e7c8ddb2 100755 --- a/ent/plandiff/plandiff.go +++ b/ent/plandiff/plandiff.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package plandiff @@ -7,6 +7,8 @@ import ( "io" "strconv" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -106,19 +108,65 @@ func NewStateValidator(ns NewState) error { } } +// OrderOption defines the ordering options for the PlanDiff queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByRevision orders the results by the revision field. +func ByRevision(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldRevision, opts...).ToFunc() +} + +// ByNewState orders the results by the new_state field. +func ByNewState(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldNewState, opts...).ToFunc() +} + +// ByPlanDiffToBuildCommitField orders the results by PlanDiffToBuildCommit field. +func ByPlanDiffToBuildCommitField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newPlanDiffToBuildCommitStep(), sql.OrderByField(field, opts...)) + } +} + +// ByPlanDiffToPlanField orders the results by PlanDiffToPlan field. +func ByPlanDiffToPlanField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newPlanDiffToPlanStep(), sql.OrderByField(field, opts...)) + } +} +func newPlanDiffToBuildCommitStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(PlanDiffToBuildCommitInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, PlanDiffToBuildCommitTable, PlanDiffToBuildCommitColumn), + ) +} +func newPlanDiffToPlanStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(PlanDiffToPlanInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, PlanDiffToPlanTable, PlanDiffToPlanColumn), + ) +} + // MarshalGQL implements graphql.Marshaler interface. -func (ns NewState) MarshalGQL(w io.Writer) { - io.WriteString(w, strconv.Quote(ns.String())) +func (e NewState) MarshalGQL(w io.Writer) { + io.WriteString(w, strconv.Quote(e.String())) } // UnmarshalGQL implements graphql.Unmarshaler interface. -func (ns *NewState) UnmarshalGQL(val interface{}) error { +func (e *NewState) UnmarshalGQL(val interface{}) error { str, ok := val.(string) if !ok { return fmt.Errorf("enum %T must be a string", val) } - *ns = NewState(str) - if err := NewStateValidator(*ns); err != nil { + *e = NewState(str) + if err := NewStateValidator(*e); err != nil { return fmt.Errorf("%s is not a valid NewState", str) } return nil diff --git a/ent/plandiff/where.go b/ent/plandiff/where.go index 9b652b3e..d177a7fd 100755 --- a/ent/plandiff/where.go +++ b/ent/plandiff/where.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package plandiff @@ -11,216 +11,112 @@ import ( // ID filters vertices based on their ID field. func ID(id uuid.UUID) predicate.PlanDiff { - return predicate.PlanDiff(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.PlanDiff(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id uuid.UUID) predicate.PlanDiff { - return predicate.PlanDiff(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.PlanDiff(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id uuid.UUID) predicate.PlanDiff { - return predicate.PlanDiff(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.PlanDiff(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...uuid.UUID) predicate.PlanDiff { - return predicate.PlanDiff(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.PlanDiff(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...uuid.UUID) predicate.PlanDiff { - return predicate.PlanDiff(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.PlanDiff(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id uuid.UUID) predicate.PlanDiff { - return predicate.PlanDiff(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.PlanDiff(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id uuid.UUID) predicate.PlanDiff { - return predicate.PlanDiff(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.PlanDiff(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id uuid.UUID) predicate.PlanDiff { - return predicate.PlanDiff(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.PlanDiff(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id uuid.UUID) predicate.PlanDiff { - return predicate.PlanDiff(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.PlanDiff(sql.FieldLTE(FieldID, id)) } // Revision applies equality check predicate on the "revision" field. It's identical to RevisionEQ. func Revision(v int) predicate.PlanDiff { - return predicate.PlanDiff(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldRevision), v)) - }) + return predicate.PlanDiff(sql.FieldEQ(FieldRevision, v)) } // RevisionEQ applies the EQ predicate on the "revision" field. func RevisionEQ(v int) predicate.PlanDiff { - return predicate.PlanDiff(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldRevision), v)) - }) + return predicate.PlanDiff(sql.FieldEQ(FieldRevision, v)) } // RevisionNEQ applies the NEQ predicate on the "revision" field. func RevisionNEQ(v int) predicate.PlanDiff { - return predicate.PlanDiff(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldRevision), v)) - }) + return predicate.PlanDiff(sql.FieldNEQ(FieldRevision, v)) } // RevisionIn applies the In predicate on the "revision" field. func RevisionIn(vs ...int) predicate.PlanDiff { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.PlanDiff(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldRevision), v...)) - }) + return predicate.PlanDiff(sql.FieldIn(FieldRevision, vs...)) } // RevisionNotIn applies the NotIn predicate on the "revision" field. func RevisionNotIn(vs ...int) predicate.PlanDiff { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.PlanDiff(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldRevision), v...)) - }) + return predicate.PlanDiff(sql.FieldNotIn(FieldRevision, vs...)) } // RevisionGT applies the GT predicate on the "revision" field. func RevisionGT(v int) predicate.PlanDiff { - return predicate.PlanDiff(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldRevision), v)) - }) + return predicate.PlanDiff(sql.FieldGT(FieldRevision, v)) } // RevisionGTE applies the GTE predicate on the "revision" field. func RevisionGTE(v int) predicate.PlanDiff { - return predicate.PlanDiff(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldRevision), v)) - }) + return predicate.PlanDiff(sql.FieldGTE(FieldRevision, v)) } // RevisionLT applies the LT predicate on the "revision" field. func RevisionLT(v int) predicate.PlanDiff { - return predicate.PlanDiff(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldRevision), v)) - }) + return predicate.PlanDiff(sql.FieldLT(FieldRevision, v)) } // RevisionLTE applies the LTE predicate on the "revision" field. func RevisionLTE(v int) predicate.PlanDiff { - return predicate.PlanDiff(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldRevision), v)) - }) + return predicate.PlanDiff(sql.FieldLTE(FieldRevision, v)) } // NewStateEQ applies the EQ predicate on the "new_state" field. func NewStateEQ(v NewState) predicate.PlanDiff { - return predicate.PlanDiff(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldNewState), v)) - }) + return predicate.PlanDiff(sql.FieldEQ(FieldNewState, v)) } // NewStateNEQ applies the NEQ predicate on the "new_state" field. func NewStateNEQ(v NewState) predicate.PlanDiff { - return predicate.PlanDiff(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldNewState), v)) - }) + return predicate.PlanDiff(sql.FieldNEQ(FieldNewState, v)) } // NewStateIn applies the In predicate on the "new_state" field. func NewStateIn(vs ...NewState) predicate.PlanDiff { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.PlanDiff(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldNewState), v...)) - }) + return predicate.PlanDiff(sql.FieldIn(FieldNewState, vs...)) } // NewStateNotIn applies the NotIn predicate on the "new_state" field. func NewStateNotIn(vs ...NewState) predicate.PlanDiff { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.PlanDiff(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldNewState), v...)) - }) + return predicate.PlanDiff(sql.FieldNotIn(FieldNewState, vs...)) } // HasPlanDiffToBuildCommit applies the HasEdge predicate on the "PlanDiffToBuildCommit" edge. @@ -228,7 +124,6 @@ func HasPlanDiffToBuildCommit() predicate.PlanDiff { return predicate.PlanDiff(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(PlanDiffToBuildCommitTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, false, PlanDiffToBuildCommitTable, PlanDiffToBuildCommitColumn), ) sqlgraph.HasNeighbors(s, step) @@ -238,11 +133,7 @@ func HasPlanDiffToBuildCommit() predicate.PlanDiff { // HasPlanDiffToBuildCommitWith applies the HasEdge predicate on the "PlanDiffToBuildCommit" edge with a given conditions (other predicates). func HasPlanDiffToBuildCommitWith(preds ...predicate.BuildCommit) predicate.PlanDiff { return predicate.PlanDiff(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(PlanDiffToBuildCommitInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, PlanDiffToBuildCommitTable, PlanDiffToBuildCommitColumn), - ) + step := newPlanDiffToBuildCommitStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -256,7 +147,6 @@ func HasPlanDiffToPlan() predicate.PlanDiff { return predicate.PlanDiff(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(PlanDiffToPlanTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, false, PlanDiffToPlanTable, PlanDiffToPlanColumn), ) sqlgraph.HasNeighbors(s, step) @@ -266,11 +156,7 @@ func HasPlanDiffToPlan() predicate.PlanDiff { // HasPlanDiffToPlanWith applies the HasEdge predicate on the "PlanDiffToPlan" edge with a given conditions (other predicates). func HasPlanDiffToPlanWith(preds ...predicate.Plan) predicate.PlanDiff { return predicate.PlanDiff(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(PlanDiffToPlanInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, PlanDiffToPlanTable, PlanDiffToPlanColumn), - ) + step := newPlanDiffToPlanStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -281,32 +167,15 @@ func HasPlanDiffToPlanWith(preds ...predicate.Plan) predicate.PlanDiff { // And groups predicates with the AND operator between them. func And(predicates ...predicate.PlanDiff) predicate.PlanDiff { - return predicate.PlanDiff(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.PlanDiff(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.PlanDiff) predicate.PlanDiff { - return predicate.PlanDiff(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.PlanDiff(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.PlanDiff) predicate.PlanDiff { - return predicate.PlanDiff(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.PlanDiff(sql.NotPredicates(p)) } diff --git a/ent/plandiff_create.go b/ent/plandiff_create.go index 4442c1dc..6565f32b 100755 --- a/ent/plandiff_create.go +++ b/ent/plandiff_create.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -77,44 +77,8 @@ func (pdc *PlanDiffCreate) Mutation() *PlanDiffMutation { // Save creates the PlanDiff in the database. func (pdc *PlanDiffCreate) Save(ctx context.Context) (*PlanDiff, error) { - var ( - err error - node *PlanDiff - ) pdc.defaults() - if len(pdc.hooks) == 0 { - if err = pdc.check(); err != nil { - return nil, err - } - node, err = pdc.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*PlanDiffMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = pdc.check(); err != nil { - return nil, err - } - pdc.mutation = mutation - if node, err = pdc.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(pdc.hooks) - 1; i >= 0; i-- { - if pdc.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = pdc.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, pdc.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, pdc.sqlSave, pdc.mutation, pdc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -170,10 +134,13 @@ func (pdc *PlanDiffCreate) check() error { } func (pdc *PlanDiffCreate) sqlSave(ctx context.Context) (*PlanDiff, error) { + if err := pdc.check(); err != nil { + return nil, err + } _node, _spec := pdc.createSpec() if err := sqlgraph.CreateNode(ctx, pdc.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } @@ -184,38 +151,26 @@ func (pdc *PlanDiffCreate) sqlSave(ctx context.Context) (*PlanDiff, error) { return nil, err } } + pdc.mutation.id = &_node.ID + pdc.mutation.done = true return _node, nil } func (pdc *PlanDiffCreate) createSpec() (*PlanDiff, *sqlgraph.CreateSpec) { var ( _node = &PlanDiff{config: pdc.config} - _spec = &sqlgraph.CreateSpec{ - Table: plandiff.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plandiff.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(plandiff.Table, sqlgraph.NewFieldSpec(plandiff.FieldID, field.TypeUUID)) ) if id, ok := pdc.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } if value, ok := pdc.mutation.Revision(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: plandiff.FieldRevision, - }) + _spec.SetField(plandiff.FieldRevision, field.TypeInt, value) _node.Revision = value } if value, ok := pdc.mutation.NewState(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: plandiff.FieldNewState, - }) + _spec.SetField(plandiff.FieldNewState, field.TypeEnum, value) _node.NewState = value } if nodes := pdc.mutation.PlanDiffToBuildCommitIDs(); len(nodes) > 0 { @@ -226,10 +181,7 @@ func (pdc *PlanDiffCreate) createSpec() (*PlanDiff, *sqlgraph.CreateSpec) { Columns: []string{plandiff.PlanDiffToBuildCommitColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: buildcommit.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(buildcommit.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -246,10 +198,7 @@ func (pdc *PlanDiffCreate) createSpec() (*PlanDiff, *sqlgraph.CreateSpec) { Columns: []string{plandiff.PlanDiffToPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -264,11 +213,15 @@ func (pdc *PlanDiffCreate) createSpec() (*PlanDiff, *sqlgraph.CreateSpec) { // PlanDiffCreateBulk is the builder for creating many PlanDiff entities in bulk. type PlanDiffCreateBulk struct { config + err error builders []*PlanDiffCreate } // Save creates the PlanDiff entities in the database. func (pdcb *PlanDiffCreateBulk) Save(ctx context.Context) ([]*PlanDiff, error) { + if pdcb.err != nil { + return nil, pdcb.err + } specs := make([]*sqlgraph.CreateSpec, len(pdcb.builders)) nodes := make([]*PlanDiff, len(pdcb.builders)) mutators := make([]Mutator, len(pdcb.builders)) @@ -285,8 +238,8 @@ func (pdcb *PlanDiffCreateBulk) Save(ctx context.Context) ([]*PlanDiff, error) { return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, pdcb.builders[i+1].mutation) } else { @@ -294,7 +247,7 @@ func (pdcb *PlanDiffCreateBulk) Save(ctx context.Context) ([]*PlanDiff, error) { // Invoke the actual operation on the latest mutation in the chain. if err = sqlgraph.BatchCreate(ctx, pdcb.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } } } diff --git a/ent/plandiff_delete.go b/ent/plandiff_delete.go index 2ab19892..5d125094 100755 --- a/ent/plandiff_delete.go +++ b/ent/plandiff_delete.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (pdd *PlanDiffDelete) Where(ps ...predicate.PlanDiff) *PlanDiffDelete { // Exec executes the deletion query and returns how many vertices were deleted. func (pdd *PlanDiffDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(pdd.hooks) == 0 { - affected, err = pdd.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*PlanDiffMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - pdd.mutation = mutation - affected, err = pdd.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(pdd.hooks) - 1; i >= 0; i-- { - if pdd.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = pdd.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, pdd.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, pdd.sqlExec, pdd.mutation, pdd.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (pdd *PlanDiffDelete) ExecX(ctx context.Context) int { } func (pdd *PlanDiffDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: plandiff.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plandiff.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(plandiff.Table, sqlgraph.NewFieldSpec(plandiff.FieldID, field.TypeUUID)) if ps := pdd.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -84,7 +48,12 @@ func (pdd *PlanDiffDelete) sqlExec(ctx context.Context) (int, error) { } } } - return sqlgraph.DeleteNodes(ctx, pdd.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, pdd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + pdd.mutation.done = true + return affected, err } // PlanDiffDeleteOne is the builder for deleting a single PlanDiff entity. @@ -92,6 +61,12 @@ type PlanDiffDeleteOne struct { pdd *PlanDiffDelete } +// Where appends a list predicates to the PlanDiffDelete builder. +func (pddo *PlanDiffDeleteOne) Where(ps ...predicate.PlanDiff) *PlanDiffDeleteOne { + pddo.pdd.mutation.Where(ps...) + return pddo +} + // Exec executes the deletion query. func (pddo *PlanDiffDeleteOne) Exec(ctx context.Context) error { n, err := pddo.pdd.Exec(ctx) @@ -107,5 +82,7 @@ func (pddo *PlanDiffDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (pddo *PlanDiffDeleteOne) ExecX(ctx context.Context) { - pddo.pdd.ExecX(ctx) + if err := pddo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/ent/plandiff_query.go b/ent/plandiff_query.go index 6e87fac3..9f1a9ec1 100755 --- a/ent/plandiff_query.go +++ b/ent/plandiff_query.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "errors" "fmt" "math" @@ -21,16 +20,15 @@ import ( // PlanDiffQuery is the builder for querying PlanDiff entities. type PlanDiffQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string - predicates []predicate.PlanDiff - // eager-loading edges. + ctx *QueryContext + order []plandiff.OrderOption + inters []Interceptor + predicates []predicate.PlanDiff withPlanDiffToBuildCommit *BuildCommitQuery withPlanDiffToPlan *PlanQuery withFKs bool + modifiers []func(*sql.Selector) + loadTotal []func(context.Context, []*PlanDiff) error // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -42,34 +40,34 @@ func (pdq *PlanDiffQuery) Where(ps ...predicate.PlanDiff) *PlanDiffQuery { return pdq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (pdq *PlanDiffQuery) Limit(limit int) *PlanDiffQuery { - pdq.limit = &limit + pdq.ctx.Limit = &limit return pdq } -// Offset adds an offset step to the query. +// Offset to start from. func (pdq *PlanDiffQuery) Offset(offset int) *PlanDiffQuery { - pdq.offset = &offset + pdq.ctx.Offset = &offset return pdq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (pdq *PlanDiffQuery) Unique(unique bool) *PlanDiffQuery { - pdq.unique = &unique + pdq.ctx.Unique = &unique return pdq } -// Order adds an order step to the query. -func (pdq *PlanDiffQuery) Order(o ...OrderFunc) *PlanDiffQuery { +// Order specifies how the records should be ordered. +func (pdq *PlanDiffQuery) Order(o ...plandiff.OrderOption) *PlanDiffQuery { pdq.order = append(pdq.order, o...) return pdq } // QueryPlanDiffToBuildCommit chains the current query on the "PlanDiffToBuildCommit" edge. func (pdq *PlanDiffQuery) QueryPlanDiffToBuildCommit() *BuildCommitQuery { - query := &BuildCommitQuery{config: pdq.config} + query := (&BuildCommitClient{config: pdq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := pdq.prepareQuery(ctx); err != nil { return nil, err @@ -91,7 +89,7 @@ func (pdq *PlanDiffQuery) QueryPlanDiffToBuildCommit() *BuildCommitQuery { // QueryPlanDiffToPlan chains the current query on the "PlanDiffToPlan" edge. func (pdq *PlanDiffQuery) QueryPlanDiffToPlan() *PlanQuery { - query := &PlanQuery{config: pdq.config} + query := (&PlanClient{config: pdq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := pdq.prepareQuery(ctx); err != nil { return nil, err @@ -114,7 +112,7 @@ func (pdq *PlanDiffQuery) QueryPlanDiffToPlan() *PlanQuery { // First returns the first PlanDiff entity from the query. // Returns a *NotFoundError when no PlanDiff was found. func (pdq *PlanDiffQuery) First(ctx context.Context) (*PlanDiff, error) { - nodes, err := pdq.Limit(1).All(ctx) + nodes, err := pdq.Limit(1).All(setContextOp(ctx, pdq.ctx, "First")) if err != nil { return nil, err } @@ -137,7 +135,7 @@ func (pdq *PlanDiffQuery) FirstX(ctx context.Context) *PlanDiff { // Returns a *NotFoundError when no PlanDiff ID was found. func (pdq *PlanDiffQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = pdq.Limit(1).IDs(ctx); err != nil { + if ids, err = pdq.Limit(1).IDs(setContextOp(ctx, pdq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -160,7 +158,7 @@ func (pdq *PlanDiffQuery) FirstIDX(ctx context.Context) uuid.UUID { // Returns a *NotSingularError when more than one PlanDiff entity is found. // Returns a *NotFoundError when no PlanDiff entities are found. func (pdq *PlanDiffQuery) Only(ctx context.Context) (*PlanDiff, error) { - nodes, err := pdq.Limit(2).All(ctx) + nodes, err := pdq.Limit(2).All(setContextOp(ctx, pdq.ctx, "Only")) if err != nil { return nil, err } @@ -188,7 +186,7 @@ func (pdq *PlanDiffQuery) OnlyX(ctx context.Context) *PlanDiff { // Returns a *NotFoundError when no entities are found. func (pdq *PlanDiffQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = pdq.Limit(2).IDs(ctx); err != nil { + if ids, err = pdq.Limit(2).IDs(setContextOp(ctx, pdq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -213,10 +211,12 @@ func (pdq *PlanDiffQuery) OnlyIDX(ctx context.Context) uuid.UUID { // All executes the query and returns a list of PlanDiffs. func (pdq *PlanDiffQuery) All(ctx context.Context) ([]*PlanDiff, error) { + ctx = setContextOp(ctx, pdq.ctx, "All") if err := pdq.prepareQuery(ctx); err != nil { return nil, err } - return pdq.sqlAll(ctx) + qr := querierAll[[]*PlanDiff, *PlanDiffQuery]() + return withInterceptors[[]*PlanDiff](ctx, pdq, qr, pdq.inters) } // AllX is like All, but panics if an error occurs. @@ -229,9 +229,12 @@ func (pdq *PlanDiffQuery) AllX(ctx context.Context) []*PlanDiff { } // IDs executes the query and returns a list of PlanDiff IDs. -func (pdq *PlanDiffQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { - var ids []uuid.UUID - if err := pdq.Select(plandiff.FieldID).Scan(ctx, &ids); err != nil { +func (pdq *PlanDiffQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if pdq.ctx.Unique == nil && pdq.path != nil { + pdq.Unique(true) + } + ctx = setContextOp(ctx, pdq.ctx, "IDs") + if err = pdq.Select(plandiff.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -248,10 +251,11 @@ func (pdq *PlanDiffQuery) IDsX(ctx context.Context) []uuid.UUID { // Count returns the count of the given query. func (pdq *PlanDiffQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, pdq.ctx, "Count") if err := pdq.prepareQuery(ctx); err != nil { return 0, err } - return pdq.sqlCount(ctx) + return withInterceptors[int](ctx, pdq, querierCount[*PlanDiffQuery](), pdq.inters) } // CountX is like Count, but panics if an error occurs. @@ -265,10 +269,15 @@ func (pdq *PlanDiffQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (pdq *PlanDiffQuery) Exist(ctx context.Context) (bool, error) { - if err := pdq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, pdq.ctx, "Exist") + switch _, err := pdq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return pdq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -288,23 +297,22 @@ func (pdq *PlanDiffQuery) Clone() *PlanDiffQuery { } return &PlanDiffQuery{ config: pdq.config, - limit: pdq.limit, - offset: pdq.offset, - order: append([]OrderFunc{}, pdq.order...), + ctx: pdq.ctx.Clone(), + order: append([]plandiff.OrderOption{}, pdq.order...), + inters: append([]Interceptor{}, pdq.inters...), predicates: append([]predicate.PlanDiff{}, pdq.predicates...), withPlanDiffToBuildCommit: pdq.withPlanDiffToBuildCommit.Clone(), withPlanDiffToPlan: pdq.withPlanDiffToPlan.Clone(), // clone intermediate query. - sql: pdq.sql.Clone(), - path: pdq.path, - unique: pdq.unique, + sql: pdq.sql.Clone(), + path: pdq.path, } } // WithPlanDiffToBuildCommit tells the query-builder to eager-load the nodes that are connected to // the "PlanDiffToBuildCommit" edge. The optional arguments are used to configure the query builder of the edge. func (pdq *PlanDiffQuery) WithPlanDiffToBuildCommit(opts ...func(*BuildCommitQuery)) *PlanDiffQuery { - query := &BuildCommitQuery{config: pdq.config} + query := (&BuildCommitClient{config: pdq.config}).Query() for _, opt := range opts { opt(query) } @@ -315,7 +323,7 @@ func (pdq *PlanDiffQuery) WithPlanDiffToBuildCommit(opts ...func(*BuildCommitQue // WithPlanDiffToPlan tells the query-builder to eager-load the nodes that are connected to // the "PlanDiffToPlan" edge. The optional arguments are used to configure the query builder of the edge. func (pdq *PlanDiffQuery) WithPlanDiffToPlan(opts ...func(*PlanQuery)) *PlanDiffQuery { - query := &PlanQuery{config: pdq.config} + query := (&PlanClient{config: pdq.config}).Query() for _, opt := range opts { opt(query) } @@ -337,17 +345,13 @@ func (pdq *PlanDiffQuery) WithPlanDiffToPlan(opts ...func(*PlanQuery)) *PlanDiff // GroupBy(plandiff.FieldRevision). // Aggregate(ent.Count()). // Scan(ctx, &v) -// func (pdq *PlanDiffQuery) GroupBy(field string, fields ...string) *PlanDiffGroupBy { - group := &PlanDiffGroupBy{config: pdq.config} - group.fields = append([]string{field}, fields...) - group.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := pdq.prepareQuery(ctx); err != nil { - return nil, err - } - return pdq.sqlQuery(ctx), nil - } - return group + pdq.ctx.Fields = append([]string{field}, fields...) + grbuild := &PlanDiffGroupBy{build: pdq} + grbuild.flds = &pdq.ctx.Fields + grbuild.label = plandiff.Label + grbuild.scan = grbuild.Scan + return grbuild } // Select allows the selection one or more fields/columns for the given query, @@ -362,14 +366,31 @@ func (pdq *PlanDiffQuery) GroupBy(field string, fields ...string) *PlanDiffGroup // client.PlanDiff.Query(). // Select(plandiff.FieldRevision). // Scan(ctx, &v) -// func (pdq *PlanDiffQuery) Select(fields ...string) *PlanDiffSelect { - pdq.fields = append(pdq.fields, fields...) - return &PlanDiffSelect{PlanDiffQuery: pdq} + pdq.ctx.Fields = append(pdq.ctx.Fields, fields...) + sbuild := &PlanDiffSelect{PlanDiffQuery: pdq} + sbuild.label = plandiff.Label + sbuild.flds, sbuild.scan = &pdq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a PlanDiffSelect configured with the given aggregations. +func (pdq *PlanDiffQuery) Aggregate(fns ...AggregateFunc) *PlanDiffSelect { + return pdq.Select().Aggregate(fns...) } func (pdq *PlanDiffQuery) prepareQuery(ctx context.Context) error { - for _, f := range pdq.fields { + for _, inter := range pdq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, pdq); err != nil { + return err + } + } + } + for _, f := range pdq.ctx.Fields { if !plandiff.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -384,7 +405,7 @@ func (pdq *PlanDiffQuery) prepareQuery(ctx context.Context) error { return nil } -func (pdq *PlanDiffQuery) sqlAll(ctx context.Context) ([]*PlanDiff, error) { +func (pdq *PlanDiffQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*PlanDiff, error) { var ( nodes = []*PlanDiff{} withFKs = pdq.withFKs @@ -400,121 +421,133 @@ func (pdq *PlanDiffQuery) sqlAll(ctx context.Context) ([]*PlanDiff, error) { if withFKs { _spec.Node.Columns = append(_spec.Node.Columns, plandiff.ForeignKeys...) } - _spec.ScanValues = func(columns []string) ([]interface{}, error) { + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*PlanDiff).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { node := &PlanDiff{config: pdq.config} nodes = append(nodes, node) - return node.scanValues(columns) - } - _spec.Assign = func(columns []string, values []interface{}) error { - if len(nodes) == 0 { - return fmt.Errorf("ent: Assign called without calling ScanValues") - } - node := nodes[len(nodes)-1] node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(pdq.modifiers) > 0 { + _spec.Modifiers = pdq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } if err := sqlgraph.QueryNodes(ctx, pdq.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := pdq.withPlanDiffToBuildCommit; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*PlanDiff) - for i := range nodes { - if nodes[i].plan_diff_plan_diff_to_build_commit == nil { - continue - } - fk := *nodes[i].plan_diff_plan_diff_to_build_commit - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + if err := pdq.loadPlanDiffToBuildCommit(ctx, query, nodes, nil, + func(n *PlanDiff, e *BuildCommit) { n.Edges.PlanDiffToBuildCommit = e }); err != nil { + return nil, err } - query.Where(buildcommit.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + } + if query := pdq.withPlanDiffToPlan; query != nil { + if err := pdq.loadPlanDiffToPlan(ctx, query, nodes, nil, + func(n *PlanDiff, e *Plan) { n.Edges.PlanDiffToPlan = e }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "plan_diff_plan_diff_to_build_commit" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.PlanDiffToBuildCommit = n - } + } + for i := range pdq.loadTotal { + if err := pdq.loadTotal[i](ctx, nodes); err != nil { + return nil, err } } + return nodes, nil +} - if query := pdq.withPlanDiffToPlan; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*PlanDiff) +func (pdq *PlanDiffQuery) loadPlanDiffToBuildCommit(ctx context.Context, query *BuildCommitQuery, nodes []*PlanDiff, init func(*PlanDiff), assign func(*PlanDiff, *BuildCommit)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*PlanDiff) + for i := range nodes { + if nodes[i].plan_diff_plan_diff_to_build_commit == nil { + continue + } + fk := *nodes[i].plan_diff_plan_diff_to_build_commit + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(buildcommit.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "plan_diff_plan_diff_to_build_commit" returned %v`, n.ID) + } for i := range nodes { - if nodes[i].plan_diff_plan_diff_to_plan == nil { - continue - } - fk := *nodes[i].plan_diff_plan_diff_to_plan - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + assign(nodes[i], n) } - query.Where(plan.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + } + return nil +} +func (pdq *PlanDiffQuery) loadPlanDiffToPlan(ctx context.Context, query *PlanQuery, nodes []*PlanDiff, init func(*PlanDiff), assign func(*PlanDiff, *Plan)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*PlanDiff) + for i := range nodes { + if nodes[i].plan_diff_plan_diff_to_plan == nil { + continue } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "plan_diff_plan_diff_to_plan" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.PlanDiffToPlan = n - } + fk := *nodes[i].plan_diff_plan_diff_to_plan + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) } + nodeids[fk] = append(nodeids[fk], nodes[i]) } - - return nodes, nil + if len(ids) == 0 { + return nil + } + query.Where(plan.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "plan_diff_plan_diff_to_plan" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil } func (pdq *PlanDiffQuery) sqlCount(ctx context.Context) (int, error) { _spec := pdq.querySpec() - _spec.Node.Columns = pdq.fields - if len(pdq.fields) > 0 { - _spec.Unique = pdq.unique != nil && *pdq.unique + if len(pdq.modifiers) > 0 { + _spec.Modifiers = pdq.modifiers } - return sqlgraph.CountNodes(ctx, pdq.driver, _spec) -} - -func (pdq *PlanDiffQuery) sqlExist(ctx context.Context) (bool, error) { - n, err := pdq.sqlCount(ctx) - if err != nil { - return false, fmt.Errorf("ent: check existence: %w", err) + _spec.Node.Columns = pdq.ctx.Fields + if len(pdq.ctx.Fields) > 0 { + _spec.Unique = pdq.ctx.Unique != nil && *pdq.ctx.Unique } - return n > 0, nil + return sqlgraph.CountNodes(ctx, pdq.driver, _spec) } func (pdq *PlanDiffQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: plandiff.Table, - Columns: plandiff.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plandiff.FieldID, - }, - }, - From: pdq.sql, - Unique: true, - } - if unique := pdq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(plandiff.Table, plandiff.Columns, sqlgraph.NewFieldSpec(plandiff.FieldID, field.TypeUUID)) + _spec.From = pdq.sql + if unique := pdq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if pdq.path != nil { + _spec.Unique = true } - if fields := pdq.fields; len(fields) > 0 { + if fields := pdq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, plandiff.FieldID) for i := range fields { @@ -530,10 +563,10 @@ func (pdq *PlanDiffQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := pdq.limit; limit != nil { + if limit := pdq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := pdq.offset; offset != nil { + if offset := pdq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := pdq.order; len(ps) > 0 { @@ -549,7 +582,7 @@ func (pdq *PlanDiffQuery) querySpec() *sqlgraph.QuerySpec { func (pdq *PlanDiffQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(pdq.driver.Dialect()) t1 := builder.Table(plandiff.Table) - columns := pdq.fields + columns := pdq.ctx.Fields if len(columns) == 0 { columns = plandiff.Columns } @@ -558,7 +591,7 @@ func (pdq *PlanDiffQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = pdq.sql selector.Select(selector.Columns(columns...)...) } - if pdq.unique != nil && *pdq.unique { + if pdq.ctx.Unique != nil && *pdq.ctx.Unique { selector.Distinct() } for _, p := range pdq.predicates { @@ -567,12 +600,12 @@ func (pdq *PlanDiffQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range pdq.order { p(selector) } - if offset := pdq.offset; offset != nil { + if offset := pdq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := pdq.limit; limit != nil { + if limit := pdq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -580,12 +613,8 @@ func (pdq *PlanDiffQuery) sqlQuery(ctx context.Context) *sql.Selector { // PlanDiffGroupBy is the group-by builder for PlanDiff entities. type PlanDiffGroupBy struct { - config - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) + selector + build *PlanDiffQuery } // Aggregate adds the given aggregation functions to the group-by query. @@ -594,471 +623,77 @@ func (pdgb *PlanDiffGroupBy) Aggregate(fns ...AggregateFunc) *PlanDiffGroupBy { return pdgb } -// Scan applies the group-by query and scans the result into the given value. -func (pdgb *PlanDiffGroupBy) Scan(ctx context.Context, v interface{}) error { - query, err := pdgb.path(ctx) - if err != nil { +// Scan applies the selector query and scans the result into the given value. +func (pdgb *PlanDiffGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, pdgb.build.ctx, "GroupBy") + if err := pdgb.build.prepareQuery(ctx); err != nil { return err } - pdgb.sql = query - return pdgb.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (pdgb *PlanDiffGroupBy) ScanX(ctx context.Context, v interface{}) { - if err := pdgb.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from group-by. -// It is only allowed when executing a group-by query with one field. -func (pdgb *PlanDiffGroupBy) Strings(ctx context.Context) ([]string, error) { - if len(pdgb.fields) > 1 { - return nil, errors.New("ent: PlanDiffGroupBy.Strings is not achievable when grouping more than 1 field") - } - var v []string - if err := pdgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (pdgb *PlanDiffGroupBy) StringsX(ctx context.Context) []string { - v, err := pdgb.Strings(ctx) - if err != nil { - panic(err) - } - return v + return scanWithInterceptors[*PlanDiffQuery, *PlanDiffGroupBy](ctx, pdgb.build, pdgb, pdgb.build.inters, v) } -// String returns a single string from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (pdgb *PlanDiffGroupBy) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = pdgb.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{plandiff.Label} - default: - err = fmt.Errorf("ent: PlanDiffGroupBy.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (pdgb *PlanDiffGroupBy) StringX(ctx context.Context) string { - v, err := pdgb.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from group-by. -// It is only allowed when executing a group-by query with one field. -func (pdgb *PlanDiffGroupBy) Ints(ctx context.Context) ([]int, error) { - if len(pdgb.fields) > 1 { - return nil, errors.New("ent: PlanDiffGroupBy.Ints is not achievable when grouping more than 1 field") - } - var v []int - if err := pdgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (pdgb *PlanDiffGroupBy) IntsX(ctx context.Context) []int { - v, err := pdgb.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (pdgb *PlanDiffGroupBy) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = pdgb.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{plandiff.Label} - default: - err = fmt.Errorf("ent: PlanDiffGroupBy.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (pdgb *PlanDiffGroupBy) IntX(ctx context.Context) int { - v, err := pdgb.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from group-by. -// It is only allowed when executing a group-by query with one field. -func (pdgb *PlanDiffGroupBy) Float64s(ctx context.Context) ([]float64, error) { - if len(pdgb.fields) > 1 { - return nil, errors.New("ent: PlanDiffGroupBy.Float64s is not achievable when grouping more than 1 field") - } - var v []float64 - if err := pdgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (pdgb *PlanDiffGroupBy) Float64sX(ctx context.Context) []float64 { - v, err := pdgb.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (pdgb *PlanDiffGroupBy) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = pdgb.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{plandiff.Label} - default: - err = fmt.Errorf("ent: PlanDiffGroupBy.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (pdgb *PlanDiffGroupBy) Float64X(ctx context.Context) float64 { - v, err := pdgb.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from group-by. -// It is only allowed when executing a group-by query with one field. -func (pdgb *PlanDiffGroupBy) Bools(ctx context.Context) ([]bool, error) { - if len(pdgb.fields) > 1 { - return nil, errors.New("ent: PlanDiffGroupBy.Bools is not achievable when grouping more than 1 field") - } - var v []bool - if err := pdgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (pdgb *PlanDiffGroupBy) BoolsX(ctx context.Context) []bool { - v, err := pdgb.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (pdgb *PlanDiffGroupBy) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = pdgb.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{plandiff.Label} - default: - err = fmt.Errorf("ent: PlanDiffGroupBy.Bools returned %d results when one was expected", len(v)) - } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (pdgb *PlanDiffGroupBy) BoolX(ctx context.Context) bool { - v, err := pdgb.Bool(ctx) - if err != nil { - panic(err) +func (pdgb *PlanDiffGroupBy) sqlScan(ctx context.Context, root *PlanDiffQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(pdgb.fns)) + for _, fn := range pdgb.fns { + aggregation = append(aggregation, fn(selector)) } - return v -} - -func (pdgb *PlanDiffGroupBy) sqlScan(ctx context.Context, v interface{}) error { - for _, f := range pdgb.fields { - if !plandiff.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*pdgb.flds)+len(pdgb.fns)) + for _, f := range *pdgb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := pdgb.sqlQuery() + selector.GroupBy(selector.Columns(*pdgb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := pdgb.driver.Query(ctx, query, args, rows); err != nil { + if err := pdgb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (pdgb *PlanDiffGroupBy) sqlQuery() *sql.Selector { - selector := pdgb.sql.Select() - aggregation := make([]string, 0, len(pdgb.fns)) - for _, fn := range pdgb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(pdgb.fields)+len(pdgb.fns)) - for _, f := range pdgb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(pdgb.fields...)...) -} - // PlanDiffSelect is the builder for selecting fields of PlanDiff entities. type PlanDiffSelect struct { *PlanDiffQuery - // intermediate query (i.e. traversal path). - sql *sql.Selector + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (pds *PlanDiffSelect) Aggregate(fns ...AggregateFunc) *PlanDiffSelect { + pds.fns = append(pds.fns, fns...) + return pds } // Scan applies the selector query and scans the result into the given value. -func (pds *PlanDiffSelect) Scan(ctx context.Context, v interface{}) error { +func (pds *PlanDiffSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, pds.ctx, "Select") if err := pds.prepareQuery(ctx); err != nil { return err } - pds.sql = pds.PlanDiffQuery.sqlQuery(ctx) - return pds.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (pds *PlanDiffSelect) ScanX(ctx context.Context, v interface{}) { - if err := pds.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from a selector. It is only allowed when selecting one field. -func (pds *PlanDiffSelect) Strings(ctx context.Context) ([]string, error) { - if len(pds.fields) > 1 { - return nil, errors.New("ent: PlanDiffSelect.Strings is not achievable when selecting more than 1 field") - } - var v []string - if err := pds.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil + return scanWithInterceptors[*PlanDiffQuery, *PlanDiffSelect](ctx, pds.PlanDiffQuery, pds, pds.inters, v) } -// StringsX is like Strings, but panics if an error occurs. -func (pds *PlanDiffSelect) StringsX(ctx context.Context) []string { - v, err := pds.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a selector. It is only allowed when selecting one field. -func (pds *PlanDiffSelect) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = pds.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{plandiff.Label} - default: - err = fmt.Errorf("ent: PlanDiffSelect.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (pds *PlanDiffSelect) StringX(ctx context.Context) string { - v, err := pds.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from a selector. It is only allowed when selecting one field. -func (pds *PlanDiffSelect) Ints(ctx context.Context) ([]int, error) { - if len(pds.fields) > 1 { - return nil, errors.New("ent: PlanDiffSelect.Ints is not achievable when selecting more than 1 field") - } - var v []int - if err := pds.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (pds *PlanDiffSelect) IntsX(ctx context.Context) []int { - v, err := pds.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a selector. It is only allowed when selecting one field. -func (pds *PlanDiffSelect) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = pds.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{plandiff.Label} - default: - err = fmt.Errorf("ent: PlanDiffSelect.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (pds *PlanDiffSelect) IntX(ctx context.Context) int { - v, err := pds.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. -func (pds *PlanDiffSelect) Float64s(ctx context.Context) ([]float64, error) { - if len(pds.fields) > 1 { - return nil, errors.New("ent: PlanDiffSelect.Float64s is not achievable when selecting more than 1 field") - } - var v []float64 - if err := pds.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (pds *PlanDiffSelect) Float64sX(ctx context.Context) []float64 { - v, err := pds.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. -func (pds *PlanDiffSelect) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = pds.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{plandiff.Label} - default: - err = fmt.Errorf("ent: PlanDiffSelect.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (pds *PlanDiffSelect) Float64X(ctx context.Context) float64 { - v, err := pds.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from a selector. It is only allowed when selecting one field. -func (pds *PlanDiffSelect) Bools(ctx context.Context) ([]bool, error) { - if len(pds.fields) > 1 { - return nil, errors.New("ent: PlanDiffSelect.Bools is not achievable when selecting more than 1 field") - } - var v []bool - if err := pds.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (pds *PlanDiffSelect) BoolsX(ctx context.Context) []bool { - v, err := pds.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a selector. It is only allowed when selecting one field. -func (pds *PlanDiffSelect) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = pds.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{plandiff.Label} - default: - err = fmt.Errorf("ent: PlanDiffSelect.Bools returned %d results when one was expected", len(v)) +func (pds *PlanDiffSelect) sqlScan(ctx context.Context, root *PlanDiffQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(pds.fns)) + for _, fn := range pds.fns { + aggregation = append(aggregation, fn(selector)) } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (pds *PlanDiffSelect) BoolX(ctx context.Context) bool { - v, err := pds.Bool(ctx) - if err != nil { - panic(err) + switch n := len(*pds.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) } - return v -} - -func (pds *PlanDiffSelect) sqlScan(ctx context.Context, v interface{}) error { rows := &sql.Rows{} - query, args := pds.sql.Query() + query, args := selector.Query() if err := pds.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/ent/plandiff_update.go b/ent/plandiff_update.go index 12e90008..e693176f 100755 --- a/ent/plandiff_update.go +++ b/ent/plandiff_update.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -37,6 +37,14 @@ func (pdu *PlanDiffUpdate) SetRevision(i int) *PlanDiffUpdate { return pdu } +// SetNillableRevision sets the "revision" field if the given value is not nil. +func (pdu *PlanDiffUpdate) SetNillableRevision(i *int) *PlanDiffUpdate { + if i != nil { + pdu.SetRevision(*i) + } + return pdu +} + // AddRevision adds i to the "revision" field. func (pdu *PlanDiffUpdate) AddRevision(i int) *PlanDiffUpdate { pdu.mutation.AddRevision(i) @@ -49,6 +57,14 @@ func (pdu *PlanDiffUpdate) SetNewState(ps plandiff.NewState) *PlanDiffUpdate { return pdu } +// SetNillableNewState sets the "new_state" field if the given value is not nil. +func (pdu *PlanDiffUpdate) SetNillableNewState(ps *plandiff.NewState) *PlanDiffUpdate { + if ps != nil { + pdu.SetNewState(*ps) + } + return pdu +} + // SetPlanDiffToBuildCommitID sets the "PlanDiffToBuildCommit" edge to the BuildCommit entity by ID. func (pdu *PlanDiffUpdate) SetPlanDiffToBuildCommitID(id uuid.UUID) *PlanDiffUpdate { pdu.mutation.SetPlanDiffToBuildCommitID(id) @@ -90,40 +106,7 @@ func (pdu *PlanDiffUpdate) ClearPlanDiffToPlan() *PlanDiffUpdate { // Save executes the query and returns the number of nodes affected by the update operation. func (pdu *PlanDiffUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(pdu.hooks) == 0 { - if err = pdu.check(); err != nil { - return 0, err - } - affected, err = pdu.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*PlanDiffMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = pdu.check(); err != nil { - return 0, err - } - pdu.mutation = mutation - affected, err = pdu.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(pdu.hooks) - 1; i >= 0; i-- { - if pdu.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = pdu.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, pdu.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, pdu.sqlSave, pdu.mutation, pdu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -165,16 +148,10 @@ func (pdu *PlanDiffUpdate) check() error { } func (pdu *PlanDiffUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: plandiff.Table, - Columns: plandiff.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plandiff.FieldID, - }, - }, + if err := pdu.check(); err != nil { + return n, err } + _spec := sqlgraph.NewUpdateSpec(plandiff.Table, plandiff.Columns, sqlgraph.NewFieldSpec(plandiff.FieldID, field.TypeUUID)) if ps := pdu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -183,25 +160,13 @@ func (pdu *PlanDiffUpdate) sqlSave(ctx context.Context) (n int, err error) { } } if value, ok := pdu.mutation.Revision(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: plandiff.FieldRevision, - }) + _spec.SetField(plandiff.FieldRevision, field.TypeInt, value) } if value, ok := pdu.mutation.AddedRevision(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: plandiff.FieldRevision, - }) + _spec.AddField(plandiff.FieldRevision, field.TypeInt, value) } if value, ok := pdu.mutation.NewState(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: plandiff.FieldNewState, - }) + _spec.SetField(plandiff.FieldNewState, field.TypeEnum, value) } if pdu.mutation.PlanDiffToBuildCommitCleared() { edge := &sqlgraph.EdgeSpec{ @@ -211,10 +176,7 @@ func (pdu *PlanDiffUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{plandiff.PlanDiffToBuildCommitColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: buildcommit.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(buildcommit.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -227,10 +189,7 @@ func (pdu *PlanDiffUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{plandiff.PlanDiffToBuildCommitColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: buildcommit.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(buildcommit.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -246,10 +205,7 @@ func (pdu *PlanDiffUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{plandiff.PlanDiffToPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -262,10 +218,7 @@ func (pdu *PlanDiffUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{plandiff.PlanDiffToPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -277,10 +230,11 @@ func (pdu *PlanDiffUpdate) sqlSave(ctx context.Context) (n int, err error) { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{plandiff.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return 0, err } + pdu.mutation.done = true return n, nil } @@ -299,6 +253,14 @@ func (pduo *PlanDiffUpdateOne) SetRevision(i int) *PlanDiffUpdateOne { return pduo } +// SetNillableRevision sets the "revision" field if the given value is not nil. +func (pduo *PlanDiffUpdateOne) SetNillableRevision(i *int) *PlanDiffUpdateOne { + if i != nil { + pduo.SetRevision(*i) + } + return pduo +} + // AddRevision adds i to the "revision" field. func (pduo *PlanDiffUpdateOne) AddRevision(i int) *PlanDiffUpdateOne { pduo.mutation.AddRevision(i) @@ -311,6 +273,14 @@ func (pduo *PlanDiffUpdateOne) SetNewState(ps plandiff.NewState) *PlanDiffUpdate return pduo } +// SetNillableNewState sets the "new_state" field if the given value is not nil. +func (pduo *PlanDiffUpdateOne) SetNillableNewState(ps *plandiff.NewState) *PlanDiffUpdateOne { + if ps != nil { + pduo.SetNewState(*ps) + } + return pduo +} + // SetPlanDiffToBuildCommitID sets the "PlanDiffToBuildCommit" edge to the BuildCommit entity by ID. func (pduo *PlanDiffUpdateOne) SetPlanDiffToBuildCommitID(id uuid.UUID) *PlanDiffUpdateOne { pduo.mutation.SetPlanDiffToBuildCommitID(id) @@ -350,6 +320,12 @@ func (pduo *PlanDiffUpdateOne) ClearPlanDiffToPlan() *PlanDiffUpdateOne { return pduo } +// Where appends a list predicates to the PlanDiffUpdate builder. +func (pduo *PlanDiffUpdateOne) Where(ps ...predicate.PlanDiff) *PlanDiffUpdateOne { + pduo.mutation.Where(ps...) + return pduo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (pduo *PlanDiffUpdateOne) Select(field string, fields ...string) *PlanDiffUpdateOne { @@ -359,40 +335,7 @@ func (pduo *PlanDiffUpdateOne) Select(field string, fields ...string) *PlanDiffU // Save executes the query and returns the updated PlanDiff entity. func (pduo *PlanDiffUpdateOne) Save(ctx context.Context) (*PlanDiff, error) { - var ( - err error - node *PlanDiff - ) - if len(pduo.hooks) == 0 { - if err = pduo.check(); err != nil { - return nil, err - } - node, err = pduo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*PlanDiffMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = pduo.check(); err != nil { - return nil, err - } - pduo.mutation = mutation - node, err = pduo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(pduo.hooks) - 1; i >= 0; i-- { - if pduo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = pduo.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, pduo.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, pduo.sqlSave, pduo.mutation, pduo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -434,16 +377,10 @@ func (pduo *PlanDiffUpdateOne) check() error { } func (pduo *PlanDiffUpdateOne) sqlSave(ctx context.Context) (_node *PlanDiff, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: plandiff.Table, - Columns: plandiff.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plandiff.FieldID, - }, - }, + if err := pduo.check(); err != nil { + return _node, err } + _spec := sqlgraph.NewUpdateSpec(plandiff.Table, plandiff.Columns, sqlgraph.NewFieldSpec(plandiff.FieldID, field.TypeUUID)) id, ok := pduo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "PlanDiff.id" for update`)} @@ -469,25 +406,13 @@ func (pduo *PlanDiffUpdateOne) sqlSave(ctx context.Context) (_node *PlanDiff, er } } if value, ok := pduo.mutation.Revision(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: plandiff.FieldRevision, - }) + _spec.SetField(plandiff.FieldRevision, field.TypeInt, value) } if value, ok := pduo.mutation.AddedRevision(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: plandiff.FieldRevision, - }) + _spec.AddField(plandiff.FieldRevision, field.TypeInt, value) } if value, ok := pduo.mutation.NewState(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: plandiff.FieldNewState, - }) + _spec.SetField(plandiff.FieldNewState, field.TypeEnum, value) } if pduo.mutation.PlanDiffToBuildCommitCleared() { edge := &sqlgraph.EdgeSpec{ @@ -497,10 +422,7 @@ func (pduo *PlanDiffUpdateOne) sqlSave(ctx context.Context) (_node *PlanDiff, er Columns: []string{plandiff.PlanDiffToBuildCommitColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: buildcommit.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(buildcommit.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -513,10 +435,7 @@ func (pduo *PlanDiffUpdateOne) sqlSave(ctx context.Context) (_node *PlanDiff, er Columns: []string{plandiff.PlanDiffToBuildCommitColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: buildcommit.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(buildcommit.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -532,10 +451,7 @@ func (pduo *PlanDiffUpdateOne) sqlSave(ctx context.Context) (_node *PlanDiff, er Columns: []string{plandiff.PlanDiffToPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -548,10 +464,7 @@ func (pduo *PlanDiffUpdateOne) sqlSave(ctx context.Context) (_node *PlanDiff, er Columns: []string{plandiff.PlanDiffToPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -566,9 +479,10 @@ func (pduo *PlanDiffUpdateOne) sqlSave(ctx context.Context) (_node *PlanDiff, er if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{plandiff.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } + pduo.mutation.done = true return _node, nil } diff --git a/ent/predicate/predicate.go b/ent/predicate/predicate.go index e11cfcae..8e2a8f24 100755 --- a/ent/predicate/predicate.go +++ b/ent/predicate/predicate.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package predicate diff --git a/ent/provisionedhost.go b/ent/provisionedhost.go index af3c4a70..794a000b 100755 --- a/ent/provisionedhost.go +++ b/ent/provisionedhost.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -7,6 +7,7 @@ import ( "fmt" "strings" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/gen0cide/laforge/ent/build" "github.com/gen0cide/laforge/ent/ginfilemiddleware" @@ -33,6 +34,7 @@ type ProvisionedHost struct { // The values are being populated by the ProvisionedHostQuery when eager-loading is set. Edges ProvisionedHostEdges `json:"edges"` + // vvvvvvvvvvvv CUSTOM vvvvvvvvvvvv // Edges put into the main struct to be loaded via hcl // ProvisionedHostToStatus holds the value of the ProvisionedHostToStatus edge. HCLProvisionedHostToStatus *Status `json:"ProvisionedHostToStatus,omitempty"` @@ -54,13 +56,14 @@ type ProvisionedHost struct { HCLProvisionedHostToPlan *Plan `json:"ProvisionedHostToPlan,omitempty"` // ProvisionedHostToGinFileMiddleware holds the value of the ProvisionedHostToGinFileMiddleware edge. HCLProvisionedHostToGinFileMiddleware *GinFileMiddleware `json:"ProvisionedHostToGinFileMiddleware,omitempty"` - // + // ^^^^^^^^^^^^ CUSTOM ^^^^^^^^^^^^^ gin_file_middleware_gin_file_middleware_to_provisioned_host *uuid.UUID plan_plan_to_provisioned_host *uuid.UUID provisioned_host_provisioned_host_to_provisioned_network *uuid.UUID provisioned_host_provisioned_host_to_host *uuid.UUID provisioned_host_provisioned_host_to_end_step_plan *uuid.UUID provisioned_host_provisioned_host_to_build *uuid.UUID + selectValues sql.SelectValues } // ProvisionedHostEdges holds the relations/edges for other nodes in the graph. @@ -88,6 +91,12 @@ type ProvisionedHostEdges struct { // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. loadedTypes [10]bool + // totalCount holds the count of the edges above. + totalCount [10]map[string]int + + namedProvisionedHostToProvisioningStep map[string][]*ProvisioningStep + namedProvisionedHostToAgentStatus map[string][]*AgentStatus + namedProvisionedHostToAgentTask map[string][]*AgentTask } // ProvisionedHostToStatusOrErr returns the ProvisionedHostToStatus value or an error if the edge @@ -95,8 +104,7 @@ type ProvisionedHostEdges struct { func (e ProvisionedHostEdges) ProvisionedHostToStatusOrErr() (*Status, error) { if e.loadedTypes[0] { if e.ProvisionedHostToStatus == nil { - // The edge ProvisionedHostToStatus was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: status.Label} } return e.ProvisionedHostToStatus, nil @@ -109,8 +117,7 @@ func (e ProvisionedHostEdges) ProvisionedHostToStatusOrErr() (*Status, error) { func (e ProvisionedHostEdges) ProvisionedHostToProvisionedNetworkOrErr() (*ProvisionedNetwork, error) { if e.loadedTypes[1] { if e.ProvisionedHostToProvisionedNetwork == nil { - // The edge ProvisionedHostToProvisionedNetwork was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: provisionednetwork.Label} } return e.ProvisionedHostToProvisionedNetwork, nil @@ -123,8 +130,7 @@ func (e ProvisionedHostEdges) ProvisionedHostToProvisionedNetworkOrErr() (*Provi func (e ProvisionedHostEdges) ProvisionedHostToHostOrErr() (*Host, error) { if e.loadedTypes[2] { if e.ProvisionedHostToHost == nil { - // The edge ProvisionedHostToHost was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: host.Label} } return e.ProvisionedHostToHost, nil @@ -137,8 +143,7 @@ func (e ProvisionedHostEdges) ProvisionedHostToHostOrErr() (*Host, error) { func (e ProvisionedHostEdges) ProvisionedHostToEndStepPlanOrErr() (*Plan, error) { if e.loadedTypes[3] { if e.ProvisionedHostToEndStepPlan == nil { - // The edge ProvisionedHostToEndStepPlan was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: plan.Label} } return e.ProvisionedHostToEndStepPlan, nil @@ -151,8 +156,7 @@ func (e ProvisionedHostEdges) ProvisionedHostToEndStepPlanOrErr() (*Plan, error) func (e ProvisionedHostEdges) ProvisionedHostToBuildOrErr() (*Build, error) { if e.loadedTypes[4] { if e.ProvisionedHostToBuild == nil { - // The edge ProvisionedHostToBuild was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: build.Label} } return e.ProvisionedHostToBuild, nil @@ -192,8 +196,7 @@ func (e ProvisionedHostEdges) ProvisionedHostToAgentTaskOrErr() ([]*AgentTask, e func (e ProvisionedHostEdges) ProvisionedHostToPlanOrErr() (*Plan, error) { if e.loadedTypes[8] { if e.ProvisionedHostToPlan == nil { - // The edge ProvisionedHostToPlan was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: plan.Label} } return e.ProvisionedHostToPlan, nil @@ -206,8 +209,7 @@ func (e ProvisionedHostEdges) ProvisionedHostToPlanOrErr() (*Plan, error) { func (e ProvisionedHostEdges) ProvisionedHostToGinFileMiddlewareOrErr() (*GinFileMiddleware, error) { if e.loadedTypes[9] { if e.ProvisionedHostToGinFileMiddleware == nil { - // The edge ProvisionedHostToGinFileMiddleware was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: ginfilemiddleware.Label} } return e.ProvisionedHostToGinFileMiddleware, nil @@ -216,8 +218,8 @@ func (e ProvisionedHostEdges) ProvisionedHostToGinFileMiddlewareOrErr() (*GinFil } // scanValues returns the types for scanning values from sql.Rows. -func (*ProvisionedHost) scanValues(columns []string) ([]interface{}, error) { - values := make([]interface{}, len(columns)) +func (*ProvisionedHost) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) for i := range columns { switch columns[i] { case provisionedhost.FieldVars: @@ -239,7 +241,7 @@ func (*ProvisionedHost) scanValues(columns []string) ([]interface{}, error) { case provisionedhost.ForeignKeys[5]: // provisioned_host_provisioned_host_to_build values[i] = &sql.NullScanner{S: new(uuid.UUID)} default: - return nil, fmt.Errorf("unexpected column %q for type ProvisionedHost", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -247,7 +249,7 @@ func (*ProvisionedHost) scanValues(columns []string) ([]interface{}, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the ProvisionedHost fields. -func (ph *ProvisionedHost) assignValues(columns []string, values []interface{}) error { +func (ph *ProvisionedHost) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -322,76 +324,84 @@ func (ph *ProvisionedHost) assignValues(columns []string, values []interface{}) ph.provisioned_host_provisioned_host_to_build = new(uuid.UUID) *ph.provisioned_host_provisioned_host_to_build = *value.S.(*uuid.UUID) } + default: + ph.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the ProvisionedHost. +// This includes values selected through modifiers, order, etc. +func (ph *ProvisionedHost) Value(name string) (ent.Value, error) { + return ph.selectValues.Get(name) +} + // QueryProvisionedHostToStatus queries the "ProvisionedHostToStatus" edge of the ProvisionedHost entity. func (ph *ProvisionedHost) QueryProvisionedHostToStatus() *StatusQuery { - return (&ProvisionedHostClient{config: ph.config}).QueryProvisionedHostToStatus(ph) + return NewProvisionedHostClient(ph.config).QueryProvisionedHostToStatus(ph) } // QueryProvisionedHostToProvisionedNetwork queries the "ProvisionedHostToProvisionedNetwork" edge of the ProvisionedHost entity. func (ph *ProvisionedHost) QueryProvisionedHostToProvisionedNetwork() *ProvisionedNetworkQuery { - return (&ProvisionedHostClient{config: ph.config}).QueryProvisionedHostToProvisionedNetwork(ph) + return NewProvisionedHostClient(ph.config).QueryProvisionedHostToProvisionedNetwork(ph) } // QueryProvisionedHostToHost queries the "ProvisionedHostToHost" edge of the ProvisionedHost entity. func (ph *ProvisionedHost) QueryProvisionedHostToHost() *HostQuery { - return (&ProvisionedHostClient{config: ph.config}).QueryProvisionedHostToHost(ph) + return NewProvisionedHostClient(ph.config).QueryProvisionedHostToHost(ph) } // QueryProvisionedHostToEndStepPlan queries the "ProvisionedHostToEndStepPlan" edge of the ProvisionedHost entity. func (ph *ProvisionedHost) QueryProvisionedHostToEndStepPlan() *PlanQuery { - return (&ProvisionedHostClient{config: ph.config}).QueryProvisionedHostToEndStepPlan(ph) + return NewProvisionedHostClient(ph.config).QueryProvisionedHostToEndStepPlan(ph) } // QueryProvisionedHostToBuild queries the "ProvisionedHostToBuild" edge of the ProvisionedHost entity. func (ph *ProvisionedHost) QueryProvisionedHostToBuild() *BuildQuery { - return (&ProvisionedHostClient{config: ph.config}).QueryProvisionedHostToBuild(ph) + return NewProvisionedHostClient(ph.config).QueryProvisionedHostToBuild(ph) } // QueryProvisionedHostToProvisioningStep queries the "ProvisionedHostToProvisioningStep" edge of the ProvisionedHost entity. func (ph *ProvisionedHost) QueryProvisionedHostToProvisioningStep() *ProvisioningStepQuery { - return (&ProvisionedHostClient{config: ph.config}).QueryProvisionedHostToProvisioningStep(ph) + return NewProvisionedHostClient(ph.config).QueryProvisionedHostToProvisioningStep(ph) } // QueryProvisionedHostToAgentStatus queries the "ProvisionedHostToAgentStatus" edge of the ProvisionedHost entity. func (ph *ProvisionedHost) QueryProvisionedHostToAgentStatus() *AgentStatusQuery { - return (&ProvisionedHostClient{config: ph.config}).QueryProvisionedHostToAgentStatus(ph) + return NewProvisionedHostClient(ph.config).QueryProvisionedHostToAgentStatus(ph) } // QueryProvisionedHostToAgentTask queries the "ProvisionedHostToAgentTask" edge of the ProvisionedHost entity. func (ph *ProvisionedHost) QueryProvisionedHostToAgentTask() *AgentTaskQuery { - return (&ProvisionedHostClient{config: ph.config}).QueryProvisionedHostToAgentTask(ph) + return NewProvisionedHostClient(ph.config).QueryProvisionedHostToAgentTask(ph) } // QueryProvisionedHostToPlan queries the "ProvisionedHostToPlan" edge of the ProvisionedHost entity. func (ph *ProvisionedHost) QueryProvisionedHostToPlan() *PlanQuery { - return (&ProvisionedHostClient{config: ph.config}).QueryProvisionedHostToPlan(ph) + return NewProvisionedHostClient(ph.config).QueryProvisionedHostToPlan(ph) } // QueryProvisionedHostToGinFileMiddleware queries the "ProvisionedHostToGinFileMiddleware" edge of the ProvisionedHost entity. func (ph *ProvisionedHost) QueryProvisionedHostToGinFileMiddleware() *GinFileMiddlewareQuery { - return (&ProvisionedHostClient{config: ph.config}).QueryProvisionedHostToGinFileMiddleware(ph) + return NewProvisionedHostClient(ph.config).QueryProvisionedHostToGinFileMiddleware(ph) } // Update returns a builder for updating this ProvisionedHost. // Note that you need to call ProvisionedHost.Unwrap() before calling this method if this ProvisionedHost // was returned from a transaction, and the transaction was committed or rolled back. func (ph *ProvisionedHost) Update() *ProvisionedHostUpdateOne { - return (&ProvisionedHostClient{config: ph.config}).UpdateOne(ph) + return NewProvisionedHostClient(ph.config).UpdateOne(ph) } // Unwrap unwraps the ProvisionedHost entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. func (ph *ProvisionedHost) Unwrap() *ProvisionedHost { - tx, ok := ph.config.driver.(*txDriver) + _tx, ok := ph.config.driver.(*txDriver) if !ok { panic("ent: ProvisionedHost is not a transactional entity") } - ph.config.driver = tx.drv + ph.config.driver = _tx.drv return ph } @@ -399,24 +409,92 @@ func (ph *ProvisionedHost) Unwrap() *ProvisionedHost { func (ph *ProvisionedHost) String() string { var builder strings.Builder builder.WriteString("ProvisionedHost(") - builder.WriteString(fmt.Sprintf("id=%v", ph.ID)) - builder.WriteString(", subnet_ip=") + builder.WriteString(fmt.Sprintf("id=%v, ", ph.ID)) + builder.WriteString("subnet_ip=") builder.WriteString(ph.SubnetIP) + builder.WriteString(", ") if v := ph.AddonType; v != nil { - builder.WriteString(", addon_type=") + builder.WriteString("addon_type=") builder.WriteString(fmt.Sprintf("%v", *v)) } - builder.WriteString(", vars=") + builder.WriteString(", ") + builder.WriteString("vars=") builder.WriteString(fmt.Sprintf("%v", ph.Vars)) builder.WriteByte(')') return builder.String() } -// ProvisionedHosts is a parsable slice of ProvisionedHost. -type ProvisionedHosts []*ProvisionedHost +// NamedProvisionedHostToProvisioningStep returns the ProvisionedHostToProvisioningStep named value or an error if the edge was not +// loaded in eager-loading with this name. +func (ph *ProvisionedHost) NamedProvisionedHostToProvisioningStep(name string) ([]*ProvisioningStep, error) { + if ph.Edges.namedProvisionedHostToProvisioningStep == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := ph.Edges.namedProvisionedHostToProvisioningStep[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} + +func (ph *ProvisionedHost) appendNamedProvisionedHostToProvisioningStep(name string, edges ...*ProvisioningStep) { + if ph.Edges.namedProvisionedHostToProvisioningStep == nil { + ph.Edges.namedProvisionedHostToProvisioningStep = make(map[string][]*ProvisioningStep) + } + if len(edges) == 0 { + ph.Edges.namedProvisionedHostToProvisioningStep[name] = []*ProvisioningStep{} + } else { + ph.Edges.namedProvisionedHostToProvisioningStep[name] = append(ph.Edges.namedProvisionedHostToProvisioningStep[name], edges...) + } +} + +// NamedProvisionedHostToAgentStatus returns the ProvisionedHostToAgentStatus named value or an error if the edge was not +// loaded in eager-loading with this name. +func (ph *ProvisionedHost) NamedProvisionedHostToAgentStatus(name string) ([]*AgentStatus, error) { + if ph.Edges.namedProvisionedHostToAgentStatus == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := ph.Edges.namedProvisionedHostToAgentStatus[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} + +func (ph *ProvisionedHost) appendNamedProvisionedHostToAgentStatus(name string, edges ...*AgentStatus) { + if ph.Edges.namedProvisionedHostToAgentStatus == nil { + ph.Edges.namedProvisionedHostToAgentStatus = make(map[string][]*AgentStatus) + } + if len(edges) == 0 { + ph.Edges.namedProvisionedHostToAgentStatus[name] = []*AgentStatus{} + } else { + ph.Edges.namedProvisionedHostToAgentStatus[name] = append(ph.Edges.namedProvisionedHostToAgentStatus[name], edges...) + } +} + +// NamedProvisionedHostToAgentTask returns the ProvisionedHostToAgentTask named value or an error if the edge was not +// loaded in eager-loading with this name. +func (ph *ProvisionedHost) NamedProvisionedHostToAgentTask(name string) ([]*AgentTask, error) { + if ph.Edges.namedProvisionedHostToAgentTask == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := ph.Edges.namedProvisionedHostToAgentTask[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} -func (ph ProvisionedHosts) config(cfg config) { - for _i := range ph { - ph[_i].config = cfg +func (ph *ProvisionedHost) appendNamedProvisionedHostToAgentTask(name string, edges ...*AgentTask) { + if ph.Edges.namedProvisionedHostToAgentTask == nil { + ph.Edges.namedProvisionedHostToAgentTask = make(map[string][]*AgentTask) + } + if len(edges) == 0 { + ph.Edges.namedProvisionedHostToAgentTask[name] = []*AgentTask{} + } else { + ph.Edges.namedProvisionedHostToAgentTask[name] = append(ph.Edges.namedProvisionedHostToAgentTask[name], edges...) } } + +// ProvisionedHosts is a parsable slice of ProvisionedHost. +type ProvisionedHosts []*ProvisionedHost diff --git a/ent/provisionedhost/provisionedhost.go b/ent/provisionedhost/provisionedhost.go index e4e633f4..139914a3 100755 --- a/ent/provisionedhost/provisionedhost.go +++ b/ent/provisionedhost/provisionedhost.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package provisionedhost @@ -7,6 +7,8 @@ import ( "io" "strconv" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -176,19 +178,198 @@ func AddonTypeValidator(at AddonType) error { } } +// OrderOption defines the ordering options for the ProvisionedHost queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// BySubnetIP orders the results by the subnet_ip field. +func BySubnetIP(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSubnetIP, opts...).ToFunc() +} + +// ByAddonType orders the results by the addon_type field. +func ByAddonType(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAddonType, opts...).ToFunc() +} + +// ByProvisionedHostToStatusField orders the results by ProvisionedHostToStatus field. +func ByProvisionedHostToStatusField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newProvisionedHostToStatusStep(), sql.OrderByField(field, opts...)) + } +} + +// ByProvisionedHostToProvisionedNetworkField orders the results by ProvisionedHostToProvisionedNetwork field. +func ByProvisionedHostToProvisionedNetworkField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newProvisionedHostToProvisionedNetworkStep(), sql.OrderByField(field, opts...)) + } +} + +// ByProvisionedHostToHostField orders the results by ProvisionedHostToHost field. +func ByProvisionedHostToHostField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newProvisionedHostToHostStep(), sql.OrderByField(field, opts...)) + } +} + +// ByProvisionedHostToEndStepPlanField orders the results by ProvisionedHostToEndStepPlan field. +func ByProvisionedHostToEndStepPlanField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newProvisionedHostToEndStepPlanStep(), sql.OrderByField(field, opts...)) + } +} + +// ByProvisionedHostToBuildField orders the results by ProvisionedHostToBuild field. +func ByProvisionedHostToBuildField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newProvisionedHostToBuildStep(), sql.OrderByField(field, opts...)) + } +} + +// ByProvisionedHostToProvisioningStepCount orders the results by ProvisionedHostToProvisioningStep count. +func ByProvisionedHostToProvisioningStepCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newProvisionedHostToProvisioningStepStep(), opts...) + } +} + +// ByProvisionedHostToProvisioningStep orders the results by ProvisionedHostToProvisioningStep terms. +func ByProvisionedHostToProvisioningStep(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newProvisionedHostToProvisioningStepStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByProvisionedHostToAgentStatusCount orders the results by ProvisionedHostToAgentStatus count. +func ByProvisionedHostToAgentStatusCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newProvisionedHostToAgentStatusStep(), opts...) + } +} + +// ByProvisionedHostToAgentStatus orders the results by ProvisionedHostToAgentStatus terms. +func ByProvisionedHostToAgentStatus(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newProvisionedHostToAgentStatusStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByProvisionedHostToAgentTaskCount orders the results by ProvisionedHostToAgentTask count. +func ByProvisionedHostToAgentTaskCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newProvisionedHostToAgentTaskStep(), opts...) + } +} + +// ByProvisionedHostToAgentTask orders the results by ProvisionedHostToAgentTask terms. +func ByProvisionedHostToAgentTask(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newProvisionedHostToAgentTaskStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByProvisionedHostToPlanField orders the results by ProvisionedHostToPlan field. +func ByProvisionedHostToPlanField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newProvisionedHostToPlanStep(), sql.OrderByField(field, opts...)) + } +} + +// ByProvisionedHostToGinFileMiddlewareField orders the results by ProvisionedHostToGinFileMiddleware field. +func ByProvisionedHostToGinFileMiddlewareField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newProvisionedHostToGinFileMiddlewareStep(), sql.OrderByField(field, opts...)) + } +} +func newProvisionedHostToStatusStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ProvisionedHostToStatusInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2O, false, ProvisionedHostToStatusTable, ProvisionedHostToStatusColumn), + ) +} +func newProvisionedHostToProvisionedNetworkStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ProvisionedHostToProvisionedNetworkInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, ProvisionedHostToProvisionedNetworkTable, ProvisionedHostToProvisionedNetworkColumn), + ) +} +func newProvisionedHostToHostStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ProvisionedHostToHostInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, ProvisionedHostToHostTable, ProvisionedHostToHostColumn), + ) +} +func newProvisionedHostToEndStepPlanStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ProvisionedHostToEndStepPlanInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, ProvisionedHostToEndStepPlanTable, ProvisionedHostToEndStepPlanColumn), + ) +} +func newProvisionedHostToBuildStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ProvisionedHostToBuildInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, ProvisionedHostToBuildTable, ProvisionedHostToBuildColumn), + ) +} +func newProvisionedHostToProvisioningStepStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ProvisionedHostToProvisioningStepInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, ProvisionedHostToProvisioningStepTable, ProvisionedHostToProvisioningStepColumn), + ) +} +func newProvisionedHostToAgentStatusStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ProvisionedHostToAgentStatusInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, ProvisionedHostToAgentStatusTable, ProvisionedHostToAgentStatusColumn), + ) +} +func newProvisionedHostToAgentTaskStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ProvisionedHostToAgentTaskInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, ProvisionedHostToAgentTaskTable, ProvisionedHostToAgentTaskColumn), + ) +} +func newProvisionedHostToPlanStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ProvisionedHostToPlanInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2O, true, ProvisionedHostToPlanTable, ProvisionedHostToPlanColumn), + ) +} +func newProvisionedHostToGinFileMiddlewareStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ProvisionedHostToGinFileMiddlewareInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2O, true, ProvisionedHostToGinFileMiddlewareTable, ProvisionedHostToGinFileMiddlewareColumn), + ) +} + // MarshalGQL implements graphql.Marshaler interface. -func (at AddonType) MarshalGQL(w io.Writer) { - io.WriteString(w, strconv.Quote(at.String())) +func (e AddonType) MarshalGQL(w io.Writer) { + io.WriteString(w, strconv.Quote(e.String())) } // UnmarshalGQL implements graphql.Unmarshaler interface. -func (at *AddonType) UnmarshalGQL(val interface{}) error { +func (e *AddonType) UnmarshalGQL(val interface{}) error { str, ok := val.(string) if !ok { return fmt.Errorf("enum %T must be a string", val) } - *at = AddonType(str) - if err := AddonTypeValidator(*at); err != nil { + *e = AddonType(str) + if err := AddonTypeValidator(*e); err != nil { return fmt.Errorf("%s is not a valid AddonType", str) } return nil diff --git a/ent/provisionedhost/where.go b/ent/provisionedhost/where.go index 63834491..0a351b94 100755 --- a/ent/provisionedhost/where.go +++ b/ent/provisionedhost/where.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package provisionedhost @@ -11,265 +11,147 @@ import ( // ID filters vertices based on their ID field. func ID(id uuid.UUID) predicate.ProvisionedHost { - return predicate.ProvisionedHost(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.ProvisionedHost(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id uuid.UUID) predicate.ProvisionedHost { - return predicate.ProvisionedHost(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.ProvisionedHost(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id uuid.UUID) predicate.ProvisionedHost { - return predicate.ProvisionedHost(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.ProvisionedHost(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...uuid.UUID) predicate.ProvisionedHost { - return predicate.ProvisionedHost(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.ProvisionedHost(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...uuid.UUID) predicate.ProvisionedHost { - return predicate.ProvisionedHost(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.ProvisionedHost(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id uuid.UUID) predicate.ProvisionedHost { - return predicate.ProvisionedHost(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.ProvisionedHost(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id uuid.UUID) predicate.ProvisionedHost { - return predicate.ProvisionedHost(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.ProvisionedHost(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id uuid.UUID) predicate.ProvisionedHost { - return predicate.ProvisionedHost(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.ProvisionedHost(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id uuid.UUID) predicate.ProvisionedHost { - return predicate.ProvisionedHost(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.ProvisionedHost(sql.FieldLTE(FieldID, id)) } // SubnetIP applies equality check predicate on the "subnet_ip" field. It's identical to SubnetIPEQ. func SubnetIP(v string) predicate.ProvisionedHost { - return predicate.ProvisionedHost(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSubnetIP), v)) - }) + return predicate.ProvisionedHost(sql.FieldEQ(FieldSubnetIP, v)) } // SubnetIPEQ applies the EQ predicate on the "subnet_ip" field. func SubnetIPEQ(v string) predicate.ProvisionedHost { - return predicate.ProvisionedHost(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSubnetIP), v)) - }) + return predicate.ProvisionedHost(sql.FieldEQ(FieldSubnetIP, v)) } // SubnetIPNEQ applies the NEQ predicate on the "subnet_ip" field. func SubnetIPNEQ(v string) predicate.ProvisionedHost { - return predicate.ProvisionedHost(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldSubnetIP), v)) - }) + return predicate.ProvisionedHost(sql.FieldNEQ(FieldSubnetIP, v)) } // SubnetIPIn applies the In predicate on the "subnet_ip" field. func SubnetIPIn(vs ...string) predicate.ProvisionedHost { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.ProvisionedHost(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldSubnetIP), v...)) - }) + return predicate.ProvisionedHost(sql.FieldIn(FieldSubnetIP, vs...)) } // SubnetIPNotIn applies the NotIn predicate on the "subnet_ip" field. func SubnetIPNotIn(vs ...string) predicate.ProvisionedHost { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.ProvisionedHost(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldSubnetIP), v...)) - }) + return predicate.ProvisionedHost(sql.FieldNotIn(FieldSubnetIP, vs...)) } // SubnetIPGT applies the GT predicate on the "subnet_ip" field. func SubnetIPGT(v string) predicate.ProvisionedHost { - return predicate.ProvisionedHost(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldSubnetIP), v)) - }) + return predicate.ProvisionedHost(sql.FieldGT(FieldSubnetIP, v)) } // SubnetIPGTE applies the GTE predicate on the "subnet_ip" field. func SubnetIPGTE(v string) predicate.ProvisionedHost { - return predicate.ProvisionedHost(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldSubnetIP), v)) - }) + return predicate.ProvisionedHost(sql.FieldGTE(FieldSubnetIP, v)) } // SubnetIPLT applies the LT predicate on the "subnet_ip" field. func SubnetIPLT(v string) predicate.ProvisionedHost { - return predicate.ProvisionedHost(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldSubnetIP), v)) - }) + return predicate.ProvisionedHost(sql.FieldLT(FieldSubnetIP, v)) } // SubnetIPLTE applies the LTE predicate on the "subnet_ip" field. func SubnetIPLTE(v string) predicate.ProvisionedHost { - return predicate.ProvisionedHost(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldSubnetIP), v)) - }) + return predicate.ProvisionedHost(sql.FieldLTE(FieldSubnetIP, v)) } // SubnetIPContains applies the Contains predicate on the "subnet_ip" field. func SubnetIPContains(v string) predicate.ProvisionedHost { - return predicate.ProvisionedHost(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldSubnetIP), v)) - }) + return predicate.ProvisionedHost(sql.FieldContains(FieldSubnetIP, v)) } // SubnetIPHasPrefix applies the HasPrefix predicate on the "subnet_ip" field. func SubnetIPHasPrefix(v string) predicate.ProvisionedHost { - return predicate.ProvisionedHost(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldSubnetIP), v)) - }) + return predicate.ProvisionedHost(sql.FieldHasPrefix(FieldSubnetIP, v)) } // SubnetIPHasSuffix applies the HasSuffix predicate on the "subnet_ip" field. func SubnetIPHasSuffix(v string) predicate.ProvisionedHost { - return predicate.ProvisionedHost(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldSubnetIP), v)) - }) + return predicate.ProvisionedHost(sql.FieldHasSuffix(FieldSubnetIP, v)) } // SubnetIPEqualFold applies the EqualFold predicate on the "subnet_ip" field. func SubnetIPEqualFold(v string) predicate.ProvisionedHost { - return predicate.ProvisionedHost(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldSubnetIP), v)) - }) + return predicate.ProvisionedHost(sql.FieldEqualFold(FieldSubnetIP, v)) } // SubnetIPContainsFold applies the ContainsFold predicate on the "subnet_ip" field. func SubnetIPContainsFold(v string) predicate.ProvisionedHost { - return predicate.ProvisionedHost(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldSubnetIP), v)) - }) + return predicate.ProvisionedHost(sql.FieldContainsFold(FieldSubnetIP, v)) } // AddonTypeEQ applies the EQ predicate on the "addon_type" field. func AddonTypeEQ(v AddonType) predicate.ProvisionedHost { - return predicate.ProvisionedHost(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldAddonType), v)) - }) + return predicate.ProvisionedHost(sql.FieldEQ(FieldAddonType, v)) } // AddonTypeNEQ applies the NEQ predicate on the "addon_type" field. func AddonTypeNEQ(v AddonType) predicate.ProvisionedHost { - return predicate.ProvisionedHost(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldAddonType), v)) - }) + return predicate.ProvisionedHost(sql.FieldNEQ(FieldAddonType, v)) } // AddonTypeIn applies the In predicate on the "addon_type" field. func AddonTypeIn(vs ...AddonType) predicate.ProvisionedHost { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.ProvisionedHost(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldAddonType), v...)) - }) + return predicate.ProvisionedHost(sql.FieldIn(FieldAddonType, vs...)) } // AddonTypeNotIn applies the NotIn predicate on the "addon_type" field. func AddonTypeNotIn(vs ...AddonType) predicate.ProvisionedHost { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.ProvisionedHost(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldAddonType), v...)) - }) + return predicate.ProvisionedHost(sql.FieldNotIn(FieldAddonType, vs...)) } // AddonTypeIsNil applies the IsNil predicate on the "addon_type" field. func AddonTypeIsNil() predicate.ProvisionedHost { - return predicate.ProvisionedHost(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldAddonType))) - }) + return predicate.ProvisionedHost(sql.FieldIsNull(FieldAddonType)) } // AddonTypeNotNil applies the NotNil predicate on the "addon_type" field. func AddonTypeNotNil() predicate.ProvisionedHost { - return predicate.ProvisionedHost(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldAddonType))) - }) + return predicate.ProvisionedHost(sql.FieldNotNull(FieldAddonType)) } // HasProvisionedHostToStatus applies the HasEdge predicate on the "ProvisionedHostToStatus" edge. @@ -277,7 +159,6 @@ func HasProvisionedHostToStatus() predicate.ProvisionedHost { return predicate.ProvisionedHost(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisionedHostToStatusTable, FieldID), sqlgraph.Edge(sqlgraph.O2O, false, ProvisionedHostToStatusTable, ProvisionedHostToStatusColumn), ) sqlgraph.HasNeighbors(s, step) @@ -287,11 +168,7 @@ func HasProvisionedHostToStatus() predicate.ProvisionedHost { // HasProvisionedHostToStatusWith applies the HasEdge predicate on the "ProvisionedHostToStatus" edge with a given conditions (other predicates). func HasProvisionedHostToStatusWith(preds ...predicate.Status) predicate.ProvisionedHost { return predicate.ProvisionedHost(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisionedHostToStatusInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2O, false, ProvisionedHostToStatusTable, ProvisionedHostToStatusColumn), - ) + step := newProvisionedHostToStatusStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -305,7 +182,6 @@ func HasProvisionedHostToProvisionedNetwork() predicate.ProvisionedHost { return predicate.ProvisionedHost(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisionedHostToProvisionedNetworkTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, false, ProvisionedHostToProvisionedNetworkTable, ProvisionedHostToProvisionedNetworkColumn), ) sqlgraph.HasNeighbors(s, step) @@ -315,11 +191,7 @@ func HasProvisionedHostToProvisionedNetwork() predicate.ProvisionedHost { // HasProvisionedHostToProvisionedNetworkWith applies the HasEdge predicate on the "ProvisionedHostToProvisionedNetwork" edge with a given conditions (other predicates). func HasProvisionedHostToProvisionedNetworkWith(preds ...predicate.ProvisionedNetwork) predicate.ProvisionedHost { return predicate.ProvisionedHost(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisionedHostToProvisionedNetworkInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, ProvisionedHostToProvisionedNetworkTable, ProvisionedHostToProvisionedNetworkColumn), - ) + step := newProvisionedHostToProvisionedNetworkStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -333,7 +205,6 @@ func HasProvisionedHostToHost() predicate.ProvisionedHost { return predicate.ProvisionedHost(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisionedHostToHostTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, false, ProvisionedHostToHostTable, ProvisionedHostToHostColumn), ) sqlgraph.HasNeighbors(s, step) @@ -343,11 +214,7 @@ func HasProvisionedHostToHost() predicate.ProvisionedHost { // HasProvisionedHostToHostWith applies the HasEdge predicate on the "ProvisionedHostToHost" edge with a given conditions (other predicates). func HasProvisionedHostToHostWith(preds ...predicate.Host) predicate.ProvisionedHost { return predicate.ProvisionedHost(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisionedHostToHostInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, ProvisionedHostToHostTable, ProvisionedHostToHostColumn), - ) + step := newProvisionedHostToHostStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -361,7 +228,6 @@ func HasProvisionedHostToEndStepPlan() predicate.ProvisionedHost { return predicate.ProvisionedHost(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisionedHostToEndStepPlanTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, false, ProvisionedHostToEndStepPlanTable, ProvisionedHostToEndStepPlanColumn), ) sqlgraph.HasNeighbors(s, step) @@ -371,11 +237,7 @@ func HasProvisionedHostToEndStepPlan() predicate.ProvisionedHost { // HasProvisionedHostToEndStepPlanWith applies the HasEdge predicate on the "ProvisionedHostToEndStepPlan" edge with a given conditions (other predicates). func HasProvisionedHostToEndStepPlanWith(preds ...predicate.Plan) predicate.ProvisionedHost { return predicate.ProvisionedHost(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisionedHostToEndStepPlanInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, ProvisionedHostToEndStepPlanTable, ProvisionedHostToEndStepPlanColumn), - ) + step := newProvisionedHostToEndStepPlanStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -389,7 +251,6 @@ func HasProvisionedHostToBuild() predicate.ProvisionedHost { return predicate.ProvisionedHost(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisionedHostToBuildTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, false, ProvisionedHostToBuildTable, ProvisionedHostToBuildColumn), ) sqlgraph.HasNeighbors(s, step) @@ -399,11 +260,7 @@ func HasProvisionedHostToBuild() predicate.ProvisionedHost { // HasProvisionedHostToBuildWith applies the HasEdge predicate on the "ProvisionedHostToBuild" edge with a given conditions (other predicates). func HasProvisionedHostToBuildWith(preds ...predicate.Build) predicate.ProvisionedHost { return predicate.ProvisionedHost(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisionedHostToBuildInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, ProvisionedHostToBuildTable, ProvisionedHostToBuildColumn), - ) + step := newProvisionedHostToBuildStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -417,7 +274,6 @@ func HasProvisionedHostToProvisioningStep() predicate.ProvisionedHost { return predicate.ProvisionedHost(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisionedHostToProvisioningStepTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, true, ProvisionedHostToProvisioningStepTable, ProvisionedHostToProvisioningStepColumn), ) sqlgraph.HasNeighbors(s, step) @@ -427,11 +283,7 @@ func HasProvisionedHostToProvisioningStep() predicate.ProvisionedHost { // HasProvisionedHostToProvisioningStepWith applies the HasEdge predicate on the "ProvisionedHostToProvisioningStep" edge with a given conditions (other predicates). func HasProvisionedHostToProvisioningStepWith(preds ...predicate.ProvisioningStep) predicate.ProvisionedHost { return predicate.ProvisionedHost(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisionedHostToProvisioningStepInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, true, ProvisionedHostToProvisioningStepTable, ProvisionedHostToProvisioningStepColumn), - ) + step := newProvisionedHostToProvisioningStepStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -445,7 +297,6 @@ func HasProvisionedHostToAgentStatus() predicate.ProvisionedHost { return predicate.ProvisionedHost(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisionedHostToAgentStatusTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, true, ProvisionedHostToAgentStatusTable, ProvisionedHostToAgentStatusColumn), ) sqlgraph.HasNeighbors(s, step) @@ -455,11 +306,7 @@ func HasProvisionedHostToAgentStatus() predicate.ProvisionedHost { // HasProvisionedHostToAgentStatusWith applies the HasEdge predicate on the "ProvisionedHostToAgentStatus" edge with a given conditions (other predicates). func HasProvisionedHostToAgentStatusWith(preds ...predicate.AgentStatus) predicate.ProvisionedHost { return predicate.ProvisionedHost(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisionedHostToAgentStatusInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, true, ProvisionedHostToAgentStatusTable, ProvisionedHostToAgentStatusColumn), - ) + step := newProvisionedHostToAgentStatusStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -473,7 +320,6 @@ func HasProvisionedHostToAgentTask() predicate.ProvisionedHost { return predicate.ProvisionedHost(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisionedHostToAgentTaskTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, true, ProvisionedHostToAgentTaskTable, ProvisionedHostToAgentTaskColumn), ) sqlgraph.HasNeighbors(s, step) @@ -483,11 +329,7 @@ func HasProvisionedHostToAgentTask() predicate.ProvisionedHost { // HasProvisionedHostToAgentTaskWith applies the HasEdge predicate on the "ProvisionedHostToAgentTask" edge with a given conditions (other predicates). func HasProvisionedHostToAgentTaskWith(preds ...predicate.AgentTask) predicate.ProvisionedHost { return predicate.ProvisionedHost(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisionedHostToAgentTaskInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, true, ProvisionedHostToAgentTaskTable, ProvisionedHostToAgentTaskColumn), - ) + step := newProvisionedHostToAgentTaskStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -501,7 +343,6 @@ func HasProvisionedHostToPlan() predicate.ProvisionedHost { return predicate.ProvisionedHost(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisionedHostToPlanTable, FieldID), sqlgraph.Edge(sqlgraph.O2O, true, ProvisionedHostToPlanTable, ProvisionedHostToPlanColumn), ) sqlgraph.HasNeighbors(s, step) @@ -511,11 +352,7 @@ func HasProvisionedHostToPlan() predicate.ProvisionedHost { // HasProvisionedHostToPlanWith applies the HasEdge predicate on the "ProvisionedHostToPlan" edge with a given conditions (other predicates). func HasProvisionedHostToPlanWith(preds ...predicate.Plan) predicate.ProvisionedHost { return predicate.ProvisionedHost(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisionedHostToPlanInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2O, true, ProvisionedHostToPlanTable, ProvisionedHostToPlanColumn), - ) + step := newProvisionedHostToPlanStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -529,7 +366,6 @@ func HasProvisionedHostToGinFileMiddleware() predicate.ProvisionedHost { return predicate.ProvisionedHost(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisionedHostToGinFileMiddlewareTable, FieldID), sqlgraph.Edge(sqlgraph.O2O, true, ProvisionedHostToGinFileMiddlewareTable, ProvisionedHostToGinFileMiddlewareColumn), ) sqlgraph.HasNeighbors(s, step) @@ -539,11 +375,7 @@ func HasProvisionedHostToGinFileMiddleware() predicate.ProvisionedHost { // HasProvisionedHostToGinFileMiddlewareWith applies the HasEdge predicate on the "ProvisionedHostToGinFileMiddleware" edge with a given conditions (other predicates). func HasProvisionedHostToGinFileMiddlewareWith(preds ...predicate.GinFileMiddleware) predicate.ProvisionedHost { return predicate.ProvisionedHost(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisionedHostToGinFileMiddlewareInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2O, true, ProvisionedHostToGinFileMiddlewareTable, ProvisionedHostToGinFileMiddlewareColumn), - ) + step := newProvisionedHostToGinFileMiddlewareStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -554,32 +386,15 @@ func HasProvisionedHostToGinFileMiddlewareWith(preds ...predicate.GinFileMiddlew // And groups predicates with the AND operator between them. func And(predicates ...predicate.ProvisionedHost) predicate.ProvisionedHost { - return predicate.ProvisionedHost(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.ProvisionedHost(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.ProvisionedHost) predicate.ProvisionedHost { - return predicate.ProvisionedHost(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.ProvisionedHost(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.ProvisionedHost) predicate.ProvisionedHost { - return predicate.ProvisionedHost(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.ProvisionedHost(sql.NotPredicates(p)) } diff --git a/ent/provisionedhost_create.go b/ent/provisionedhost_create.go index 5ee375a5..aba493a2 100755 --- a/ent/provisionedhost_create.go +++ b/ent/provisionedhost_create.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -222,44 +222,8 @@ func (phc *ProvisionedHostCreate) Mutation() *ProvisionedHostMutation { // Save creates the ProvisionedHost in the database. func (phc *ProvisionedHostCreate) Save(ctx context.Context) (*ProvisionedHost, error) { - var ( - err error - node *ProvisionedHost - ) phc.defaults() - if len(phc.hooks) == 0 { - if err = phc.check(); err != nil { - return nil, err - } - node, err = phc.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*ProvisionedHostMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = phc.check(); err != nil { - return nil, err - } - phc.mutation = mutation - if node, err = phc.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(phc.hooks) - 1; i >= 0; i-- { - if phc.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = phc.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, phc.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, phc.sqlSave, phc.mutation, phc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -321,10 +285,13 @@ func (phc *ProvisionedHostCreate) check() error { } func (phc *ProvisionedHostCreate) sqlSave(ctx context.Context) (*ProvisionedHost, error) { + if err := phc.check(); err != nil { + return nil, err + } _node, _spec := phc.createSpec() if err := sqlgraph.CreateNode(ctx, phc.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } @@ -335,46 +302,30 @@ func (phc *ProvisionedHostCreate) sqlSave(ctx context.Context) (*ProvisionedHost return nil, err } } + phc.mutation.id = &_node.ID + phc.mutation.done = true return _node, nil } func (phc *ProvisionedHostCreate) createSpec() (*ProvisionedHost, *sqlgraph.CreateSpec) { var ( _node = &ProvisionedHost{config: phc.config} - _spec = &sqlgraph.CreateSpec{ - Table: provisionedhost.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionedhost.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(provisionedhost.Table, sqlgraph.NewFieldSpec(provisionedhost.FieldID, field.TypeUUID)) ) if id, ok := phc.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } if value, ok := phc.mutation.SubnetIP(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: provisionedhost.FieldSubnetIP, - }) + _spec.SetField(provisionedhost.FieldSubnetIP, field.TypeString, value) _node.SubnetIP = value } if value, ok := phc.mutation.AddonType(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: provisionedhost.FieldAddonType, - }) + _spec.SetField(provisionedhost.FieldAddonType, field.TypeEnum, value) _node.AddonType = &value } if value, ok := phc.mutation.Vars(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: provisionedhost.FieldVars, - }) + _spec.SetField(provisionedhost.FieldVars, field.TypeJSON, value) _node.Vars = value } if nodes := phc.mutation.ProvisionedHostToStatusIDs(); len(nodes) > 0 { @@ -385,10 +336,7 @@ func (phc *ProvisionedHostCreate) createSpec() (*ProvisionedHost, *sqlgraph.Crea Columns: []string{provisionedhost.ProvisionedHostToStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -404,10 +352,7 @@ func (phc *ProvisionedHostCreate) createSpec() (*ProvisionedHost, *sqlgraph.Crea Columns: []string{provisionedhost.ProvisionedHostToProvisionedNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionednetwork.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -424,10 +369,7 @@ func (phc *ProvisionedHostCreate) createSpec() (*ProvisionedHost, *sqlgraph.Crea Columns: []string{provisionedhost.ProvisionedHostToHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: host.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(host.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -444,10 +386,7 @@ func (phc *ProvisionedHostCreate) createSpec() (*ProvisionedHost, *sqlgraph.Crea Columns: []string{provisionedhost.ProvisionedHostToEndStepPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -464,10 +403,7 @@ func (phc *ProvisionedHostCreate) createSpec() (*ProvisionedHost, *sqlgraph.Crea Columns: []string{provisionedhost.ProvisionedHostToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -484,10 +420,7 @@ func (phc *ProvisionedHostCreate) createSpec() (*ProvisionedHost, *sqlgraph.Crea Columns: []string{provisionedhost.ProvisionedHostToProvisioningStepColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisioningstep.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisioningstep.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -503,10 +436,7 @@ func (phc *ProvisionedHostCreate) createSpec() (*ProvisionedHost, *sqlgraph.Crea Columns: []string{provisionedhost.ProvisionedHostToAgentStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: agentstatus.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(agentstatus.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -522,10 +452,7 @@ func (phc *ProvisionedHostCreate) createSpec() (*ProvisionedHost, *sqlgraph.Crea Columns: []string{provisionedhost.ProvisionedHostToAgentTaskColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: agenttask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(agenttask.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -541,10 +468,7 @@ func (phc *ProvisionedHostCreate) createSpec() (*ProvisionedHost, *sqlgraph.Crea Columns: []string{provisionedhost.ProvisionedHostToPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -561,10 +485,7 @@ func (phc *ProvisionedHostCreate) createSpec() (*ProvisionedHost, *sqlgraph.Crea Columns: []string{provisionedhost.ProvisionedHostToGinFileMiddlewareColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: ginfilemiddleware.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(ginfilemiddleware.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -579,11 +500,15 @@ func (phc *ProvisionedHostCreate) createSpec() (*ProvisionedHost, *sqlgraph.Crea // ProvisionedHostCreateBulk is the builder for creating many ProvisionedHost entities in bulk. type ProvisionedHostCreateBulk struct { config + err error builders []*ProvisionedHostCreate } // Save creates the ProvisionedHost entities in the database. func (phcb *ProvisionedHostCreateBulk) Save(ctx context.Context) ([]*ProvisionedHost, error) { + if phcb.err != nil { + return nil, phcb.err + } specs := make([]*sqlgraph.CreateSpec, len(phcb.builders)) nodes := make([]*ProvisionedHost, len(phcb.builders)) mutators := make([]Mutator, len(phcb.builders)) @@ -600,8 +525,8 @@ func (phcb *ProvisionedHostCreateBulk) Save(ctx context.Context) ([]*Provisioned return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, phcb.builders[i+1].mutation) } else { @@ -609,7 +534,7 @@ func (phcb *ProvisionedHostCreateBulk) Save(ctx context.Context) ([]*Provisioned // Invoke the actual operation on the latest mutation in the chain. if err = sqlgraph.BatchCreate(ctx, phcb.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } } } diff --git a/ent/provisionedhost_delete.go b/ent/provisionedhost_delete.go index d65375f0..83d284de 100755 --- a/ent/provisionedhost_delete.go +++ b/ent/provisionedhost_delete.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (phd *ProvisionedHostDelete) Where(ps ...predicate.ProvisionedHost) *Provis // Exec executes the deletion query and returns how many vertices were deleted. func (phd *ProvisionedHostDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(phd.hooks) == 0 { - affected, err = phd.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*ProvisionedHostMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - phd.mutation = mutation - affected, err = phd.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(phd.hooks) - 1; i >= 0; i-- { - if phd.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = phd.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, phd.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, phd.sqlExec, phd.mutation, phd.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (phd *ProvisionedHostDelete) ExecX(ctx context.Context) int { } func (phd *ProvisionedHostDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: provisionedhost.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionedhost.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(provisionedhost.Table, sqlgraph.NewFieldSpec(provisionedhost.FieldID, field.TypeUUID)) if ps := phd.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -84,7 +48,12 @@ func (phd *ProvisionedHostDelete) sqlExec(ctx context.Context) (int, error) { } } } - return sqlgraph.DeleteNodes(ctx, phd.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, phd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + phd.mutation.done = true + return affected, err } // ProvisionedHostDeleteOne is the builder for deleting a single ProvisionedHost entity. @@ -92,6 +61,12 @@ type ProvisionedHostDeleteOne struct { phd *ProvisionedHostDelete } +// Where appends a list predicates to the ProvisionedHostDelete builder. +func (phdo *ProvisionedHostDeleteOne) Where(ps ...predicate.ProvisionedHost) *ProvisionedHostDeleteOne { + phdo.phd.mutation.Where(ps...) + return phdo +} + // Exec executes the deletion query. func (phdo *ProvisionedHostDeleteOne) Exec(ctx context.Context) error { n, err := phdo.phd.Exec(ctx) @@ -107,5 +82,7 @@ func (phdo *ProvisionedHostDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (phdo *ProvisionedHostDeleteOne) ExecX(ctx context.Context) { - phdo.phd.ExecX(ctx) + if err := phdo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/ent/provisionedhost_query.go b/ent/provisionedhost_query.go index e0cc24bd..c6cb5bca 100755 --- a/ent/provisionedhost_query.go +++ b/ent/provisionedhost_query.go @@ -1,11 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" "database/sql/driver" - "errors" "fmt" "math" @@ -29,24 +28,26 @@ import ( // ProvisionedHostQuery is the builder for querying ProvisionedHost entities. type ProvisionedHostQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string - predicates []predicate.ProvisionedHost - // eager-loading edges. - withProvisionedHostToStatus *StatusQuery - withProvisionedHostToProvisionedNetwork *ProvisionedNetworkQuery - withProvisionedHostToHost *HostQuery - withProvisionedHostToEndStepPlan *PlanQuery - withProvisionedHostToBuild *BuildQuery - withProvisionedHostToProvisioningStep *ProvisioningStepQuery - withProvisionedHostToAgentStatus *AgentStatusQuery - withProvisionedHostToAgentTask *AgentTaskQuery - withProvisionedHostToPlan *PlanQuery - withProvisionedHostToGinFileMiddleware *GinFileMiddlewareQuery - withFKs bool + ctx *QueryContext + order []provisionedhost.OrderOption + inters []Interceptor + predicates []predicate.ProvisionedHost + withProvisionedHostToStatus *StatusQuery + withProvisionedHostToProvisionedNetwork *ProvisionedNetworkQuery + withProvisionedHostToHost *HostQuery + withProvisionedHostToEndStepPlan *PlanQuery + withProvisionedHostToBuild *BuildQuery + withProvisionedHostToProvisioningStep *ProvisioningStepQuery + withProvisionedHostToAgentStatus *AgentStatusQuery + withProvisionedHostToAgentTask *AgentTaskQuery + withProvisionedHostToPlan *PlanQuery + withProvisionedHostToGinFileMiddleware *GinFileMiddlewareQuery + withFKs bool + modifiers []func(*sql.Selector) + loadTotal []func(context.Context, []*ProvisionedHost) error + withNamedProvisionedHostToProvisioningStep map[string]*ProvisioningStepQuery + withNamedProvisionedHostToAgentStatus map[string]*AgentStatusQuery + withNamedProvisionedHostToAgentTask map[string]*AgentTaskQuery // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -58,34 +59,34 @@ func (phq *ProvisionedHostQuery) Where(ps ...predicate.ProvisionedHost) *Provisi return phq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (phq *ProvisionedHostQuery) Limit(limit int) *ProvisionedHostQuery { - phq.limit = &limit + phq.ctx.Limit = &limit return phq } -// Offset adds an offset step to the query. +// Offset to start from. func (phq *ProvisionedHostQuery) Offset(offset int) *ProvisionedHostQuery { - phq.offset = &offset + phq.ctx.Offset = &offset return phq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (phq *ProvisionedHostQuery) Unique(unique bool) *ProvisionedHostQuery { - phq.unique = &unique + phq.ctx.Unique = &unique return phq } -// Order adds an order step to the query. -func (phq *ProvisionedHostQuery) Order(o ...OrderFunc) *ProvisionedHostQuery { +// Order specifies how the records should be ordered. +func (phq *ProvisionedHostQuery) Order(o ...provisionedhost.OrderOption) *ProvisionedHostQuery { phq.order = append(phq.order, o...) return phq } // QueryProvisionedHostToStatus chains the current query on the "ProvisionedHostToStatus" edge. func (phq *ProvisionedHostQuery) QueryProvisionedHostToStatus() *StatusQuery { - query := &StatusQuery{config: phq.config} + query := (&StatusClient{config: phq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := phq.prepareQuery(ctx); err != nil { return nil, err @@ -107,7 +108,7 @@ func (phq *ProvisionedHostQuery) QueryProvisionedHostToStatus() *StatusQuery { // QueryProvisionedHostToProvisionedNetwork chains the current query on the "ProvisionedHostToProvisionedNetwork" edge. func (phq *ProvisionedHostQuery) QueryProvisionedHostToProvisionedNetwork() *ProvisionedNetworkQuery { - query := &ProvisionedNetworkQuery{config: phq.config} + query := (&ProvisionedNetworkClient{config: phq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := phq.prepareQuery(ctx); err != nil { return nil, err @@ -129,7 +130,7 @@ func (phq *ProvisionedHostQuery) QueryProvisionedHostToProvisionedNetwork() *Pro // QueryProvisionedHostToHost chains the current query on the "ProvisionedHostToHost" edge. func (phq *ProvisionedHostQuery) QueryProvisionedHostToHost() *HostQuery { - query := &HostQuery{config: phq.config} + query := (&HostClient{config: phq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := phq.prepareQuery(ctx); err != nil { return nil, err @@ -151,7 +152,7 @@ func (phq *ProvisionedHostQuery) QueryProvisionedHostToHost() *HostQuery { // QueryProvisionedHostToEndStepPlan chains the current query on the "ProvisionedHostToEndStepPlan" edge. func (phq *ProvisionedHostQuery) QueryProvisionedHostToEndStepPlan() *PlanQuery { - query := &PlanQuery{config: phq.config} + query := (&PlanClient{config: phq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := phq.prepareQuery(ctx); err != nil { return nil, err @@ -173,7 +174,7 @@ func (phq *ProvisionedHostQuery) QueryProvisionedHostToEndStepPlan() *PlanQuery // QueryProvisionedHostToBuild chains the current query on the "ProvisionedHostToBuild" edge. func (phq *ProvisionedHostQuery) QueryProvisionedHostToBuild() *BuildQuery { - query := &BuildQuery{config: phq.config} + query := (&BuildClient{config: phq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := phq.prepareQuery(ctx); err != nil { return nil, err @@ -195,7 +196,7 @@ func (phq *ProvisionedHostQuery) QueryProvisionedHostToBuild() *BuildQuery { // QueryProvisionedHostToProvisioningStep chains the current query on the "ProvisionedHostToProvisioningStep" edge. func (phq *ProvisionedHostQuery) QueryProvisionedHostToProvisioningStep() *ProvisioningStepQuery { - query := &ProvisioningStepQuery{config: phq.config} + query := (&ProvisioningStepClient{config: phq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := phq.prepareQuery(ctx); err != nil { return nil, err @@ -217,7 +218,7 @@ func (phq *ProvisionedHostQuery) QueryProvisionedHostToProvisioningStep() *Provi // QueryProvisionedHostToAgentStatus chains the current query on the "ProvisionedHostToAgentStatus" edge. func (phq *ProvisionedHostQuery) QueryProvisionedHostToAgentStatus() *AgentStatusQuery { - query := &AgentStatusQuery{config: phq.config} + query := (&AgentStatusClient{config: phq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := phq.prepareQuery(ctx); err != nil { return nil, err @@ -239,7 +240,7 @@ func (phq *ProvisionedHostQuery) QueryProvisionedHostToAgentStatus() *AgentStatu // QueryProvisionedHostToAgentTask chains the current query on the "ProvisionedHostToAgentTask" edge. func (phq *ProvisionedHostQuery) QueryProvisionedHostToAgentTask() *AgentTaskQuery { - query := &AgentTaskQuery{config: phq.config} + query := (&AgentTaskClient{config: phq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := phq.prepareQuery(ctx); err != nil { return nil, err @@ -261,7 +262,7 @@ func (phq *ProvisionedHostQuery) QueryProvisionedHostToAgentTask() *AgentTaskQue // QueryProvisionedHostToPlan chains the current query on the "ProvisionedHostToPlan" edge. func (phq *ProvisionedHostQuery) QueryProvisionedHostToPlan() *PlanQuery { - query := &PlanQuery{config: phq.config} + query := (&PlanClient{config: phq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := phq.prepareQuery(ctx); err != nil { return nil, err @@ -283,7 +284,7 @@ func (phq *ProvisionedHostQuery) QueryProvisionedHostToPlan() *PlanQuery { // QueryProvisionedHostToGinFileMiddleware chains the current query on the "ProvisionedHostToGinFileMiddleware" edge. func (phq *ProvisionedHostQuery) QueryProvisionedHostToGinFileMiddleware() *GinFileMiddlewareQuery { - query := &GinFileMiddlewareQuery{config: phq.config} + query := (&GinFileMiddlewareClient{config: phq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := phq.prepareQuery(ctx); err != nil { return nil, err @@ -306,7 +307,7 @@ func (phq *ProvisionedHostQuery) QueryProvisionedHostToGinFileMiddleware() *GinF // First returns the first ProvisionedHost entity from the query. // Returns a *NotFoundError when no ProvisionedHost was found. func (phq *ProvisionedHostQuery) First(ctx context.Context) (*ProvisionedHost, error) { - nodes, err := phq.Limit(1).All(ctx) + nodes, err := phq.Limit(1).All(setContextOp(ctx, phq.ctx, "First")) if err != nil { return nil, err } @@ -329,7 +330,7 @@ func (phq *ProvisionedHostQuery) FirstX(ctx context.Context) *ProvisionedHost { // Returns a *NotFoundError when no ProvisionedHost ID was found. func (phq *ProvisionedHostQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = phq.Limit(1).IDs(ctx); err != nil { + if ids, err = phq.Limit(1).IDs(setContextOp(ctx, phq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -352,7 +353,7 @@ func (phq *ProvisionedHostQuery) FirstIDX(ctx context.Context) uuid.UUID { // Returns a *NotSingularError when more than one ProvisionedHost entity is found. // Returns a *NotFoundError when no ProvisionedHost entities are found. func (phq *ProvisionedHostQuery) Only(ctx context.Context) (*ProvisionedHost, error) { - nodes, err := phq.Limit(2).All(ctx) + nodes, err := phq.Limit(2).All(setContextOp(ctx, phq.ctx, "Only")) if err != nil { return nil, err } @@ -380,7 +381,7 @@ func (phq *ProvisionedHostQuery) OnlyX(ctx context.Context) *ProvisionedHost { // Returns a *NotFoundError when no entities are found. func (phq *ProvisionedHostQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = phq.Limit(2).IDs(ctx); err != nil { + if ids, err = phq.Limit(2).IDs(setContextOp(ctx, phq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -405,10 +406,12 @@ func (phq *ProvisionedHostQuery) OnlyIDX(ctx context.Context) uuid.UUID { // All executes the query and returns a list of ProvisionedHosts. func (phq *ProvisionedHostQuery) All(ctx context.Context) ([]*ProvisionedHost, error) { + ctx = setContextOp(ctx, phq.ctx, "All") if err := phq.prepareQuery(ctx); err != nil { return nil, err } - return phq.sqlAll(ctx) + qr := querierAll[[]*ProvisionedHost, *ProvisionedHostQuery]() + return withInterceptors[[]*ProvisionedHost](ctx, phq, qr, phq.inters) } // AllX is like All, but panics if an error occurs. @@ -421,9 +424,12 @@ func (phq *ProvisionedHostQuery) AllX(ctx context.Context) []*ProvisionedHost { } // IDs executes the query and returns a list of ProvisionedHost IDs. -func (phq *ProvisionedHostQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { - var ids []uuid.UUID - if err := phq.Select(provisionedhost.FieldID).Scan(ctx, &ids); err != nil { +func (phq *ProvisionedHostQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if phq.ctx.Unique == nil && phq.path != nil { + phq.Unique(true) + } + ctx = setContextOp(ctx, phq.ctx, "IDs") + if err = phq.Select(provisionedhost.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -440,10 +446,11 @@ func (phq *ProvisionedHostQuery) IDsX(ctx context.Context) []uuid.UUID { // Count returns the count of the given query. func (phq *ProvisionedHostQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, phq.ctx, "Count") if err := phq.prepareQuery(ctx); err != nil { return 0, err } - return phq.sqlCount(ctx) + return withInterceptors[int](ctx, phq, querierCount[*ProvisionedHostQuery](), phq.inters) } // CountX is like Count, but panics if an error occurs. @@ -457,10 +464,15 @@ func (phq *ProvisionedHostQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (phq *ProvisionedHostQuery) Exist(ctx context.Context) (bool, error) { - if err := phq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, phq.ctx, "Exist") + switch _, err := phq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return phq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -480,9 +492,9 @@ func (phq *ProvisionedHostQuery) Clone() *ProvisionedHostQuery { } return &ProvisionedHostQuery{ config: phq.config, - limit: phq.limit, - offset: phq.offset, - order: append([]OrderFunc{}, phq.order...), + ctx: phq.ctx.Clone(), + order: append([]provisionedhost.OrderOption{}, phq.order...), + inters: append([]Interceptor{}, phq.inters...), predicates: append([]predicate.ProvisionedHost{}, phq.predicates...), withProvisionedHostToStatus: phq.withProvisionedHostToStatus.Clone(), withProvisionedHostToProvisionedNetwork: phq.withProvisionedHostToProvisionedNetwork.Clone(), @@ -495,16 +507,15 @@ func (phq *ProvisionedHostQuery) Clone() *ProvisionedHostQuery { withProvisionedHostToPlan: phq.withProvisionedHostToPlan.Clone(), withProvisionedHostToGinFileMiddleware: phq.withProvisionedHostToGinFileMiddleware.Clone(), // clone intermediate query. - sql: phq.sql.Clone(), - path: phq.path, - unique: phq.unique, + sql: phq.sql.Clone(), + path: phq.path, } } // WithProvisionedHostToStatus tells the query-builder to eager-load the nodes that are connected to // the "ProvisionedHostToStatus" edge. The optional arguments are used to configure the query builder of the edge. func (phq *ProvisionedHostQuery) WithProvisionedHostToStatus(opts ...func(*StatusQuery)) *ProvisionedHostQuery { - query := &StatusQuery{config: phq.config} + query := (&StatusClient{config: phq.config}).Query() for _, opt := range opts { opt(query) } @@ -515,7 +526,7 @@ func (phq *ProvisionedHostQuery) WithProvisionedHostToStatus(opts ...func(*Statu // WithProvisionedHostToProvisionedNetwork tells the query-builder to eager-load the nodes that are connected to // the "ProvisionedHostToProvisionedNetwork" edge. The optional arguments are used to configure the query builder of the edge. func (phq *ProvisionedHostQuery) WithProvisionedHostToProvisionedNetwork(opts ...func(*ProvisionedNetworkQuery)) *ProvisionedHostQuery { - query := &ProvisionedNetworkQuery{config: phq.config} + query := (&ProvisionedNetworkClient{config: phq.config}).Query() for _, opt := range opts { opt(query) } @@ -526,7 +537,7 @@ func (phq *ProvisionedHostQuery) WithProvisionedHostToProvisionedNetwork(opts .. // WithProvisionedHostToHost tells the query-builder to eager-load the nodes that are connected to // the "ProvisionedHostToHost" edge. The optional arguments are used to configure the query builder of the edge. func (phq *ProvisionedHostQuery) WithProvisionedHostToHost(opts ...func(*HostQuery)) *ProvisionedHostQuery { - query := &HostQuery{config: phq.config} + query := (&HostClient{config: phq.config}).Query() for _, opt := range opts { opt(query) } @@ -537,7 +548,7 @@ func (phq *ProvisionedHostQuery) WithProvisionedHostToHost(opts ...func(*HostQue // WithProvisionedHostToEndStepPlan tells the query-builder to eager-load the nodes that are connected to // the "ProvisionedHostToEndStepPlan" edge. The optional arguments are used to configure the query builder of the edge. func (phq *ProvisionedHostQuery) WithProvisionedHostToEndStepPlan(opts ...func(*PlanQuery)) *ProvisionedHostQuery { - query := &PlanQuery{config: phq.config} + query := (&PlanClient{config: phq.config}).Query() for _, opt := range opts { opt(query) } @@ -548,7 +559,7 @@ func (phq *ProvisionedHostQuery) WithProvisionedHostToEndStepPlan(opts ...func(* // WithProvisionedHostToBuild tells the query-builder to eager-load the nodes that are connected to // the "ProvisionedHostToBuild" edge. The optional arguments are used to configure the query builder of the edge. func (phq *ProvisionedHostQuery) WithProvisionedHostToBuild(opts ...func(*BuildQuery)) *ProvisionedHostQuery { - query := &BuildQuery{config: phq.config} + query := (&BuildClient{config: phq.config}).Query() for _, opt := range opts { opt(query) } @@ -559,7 +570,7 @@ func (phq *ProvisionedHostQuery) WithProvisionedHostToBuild(opts ...func(*BuildQ // WithProvisionedHostToProvisioningStep tells the query-builder to eager-load the nodes that are connected to // the "ProvisionedHostToProvisioningStep" edge. The optional arguments are used to configure the query builder of the edge. func (phq *ProvisionedHostQuery) WithProvisionedHostToProvisioningStep(opts ...func(*ProvisioningStepQuery)) *ProvisionedHostQuery { - query := &ProvisioningStepQuery{config: phq.config} + query := (&ProvisioningStepClient{config: phq.config}).Query() for _, opt := range opts { opt(query) } @@ -570,7 +581,7 @@ func (phq *ProvisionedHostQuery) WithProvisionedHostToProvisioningStep(opts ...f // WithProvisionedHostToAgentStatus tells the query-builder to eager-load the nodes that are connected to // the "ProvisionedHostToAgentStatus" edge. The optional arguments are used to configure the query builder of the edge. func (phq *ProvisionedHostQuery) WithProvisionedHostToAgentStatus(opts ...func(*AgentStatusQuery)) *ProvisionedHostQuery { - query := &AgentStatusQuery{config: phq.config} + query := (&AgentStatusClient{config: phq.config}).Query() for _, opt := range opts { opt(query) } @@ -581,7 +592,7 @@ func (phq *ProvisionedHostQuery) WithProvisionedHostToAgentStatus(opts ...func(* // WithProvisionedHostToAgentTask tells the query-builder to eager-load the nodes that are connected to // the "ProvisionedHostToAgentTask" edge. The optional arguments are used to configure the query builder of the edge. func (phq *ProvisionedHostQuery) WithProvisionedHostToAgentTask(opts ...func(*AgentTaskQuery)) *ProvisionedHostQuery { - query := &AgentTaskQuery{config: phq.config} + query := (&AgentTaskClient{config: phq.config}).Query() for _, opt := range opts { opt(query) } @@ -592,7 +603,7 @@ func (phq *ProvisionedHostQuery) WithProvisionedHostToAgentTask(opts ...func(*Ag // WithProvisionedHostToPlan tells the query-builder to eager-load the nodes that are connected to // the "ProvisionedHostToPlan" edge. The optional arguments are used to configure the query builder of the edge. func (phq *ProvisionedHostQuery) WithProvisionedHostToPlan(opts ...func(*PlanQuery)) *ProvisionedHostQuery { - query := &PlanQuery{config: phq.config} + query := (&PlanClient{config: phq.config}).Query() for _, opt := range opts { opt(query) } @@ -603,7 +614,7 @@ func (phq *ProvisionedHostQuery) WithProvisionedHostToPlan(opts ...func(*PlanQue // WithProvisionedHostToGinFileMiddleware tells the query-builder to eager-load the nodes that are connected to // the "ProvisionedHostToGinFileMiddleware" edge. The optional arguments are used to configure the query builder of the edge. func (phq *ProvisionedHostQuery) WithProvisionedHostToGinFileMiddleware(opts ...func(*GinFileMiddlewareQuery)) *ProvisionedHostQuery { - query := &GinFileMiddlewareQuery{config: phq.config} + query := (&GinFileMiddlewareClient{config: phq.config}).Query() for _, opt := range opts { opt(query) } @@ -625,17 +636,13 @@ func (phq *ProvisionedHostQuery) WithProvisionedHostToGinFileMiddleware(opts ... // GroupBy(provisionedhost.FieldSubnetIP). // Aggregate(ent.Count()). // Scan(ctx, &v) -// func (phq *ProvisionedHostQuery) GroupBy(field string, fields ...string) *ProvisionedHostGroupBy { - group := &ProvisionedHostGroupBy{config: phq.config} - group.fields = append([]string{field}, fields...) - group.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := phq.prepareQuery(ctx); err != nil { - return nil, err - } - return phq.sqlQuery(ctx), nil - } - return group + phq.ctx.Fields = append([]string{field}, fields...) + grbuild := &ProvisionedHostGroupBy{build: phq} + grbuild.flds = &phq.ctx.Fields + grbuild.label = provisionedhost.Label + grbuild.scan = grbuild.Scan + return grbuild } // Select allows the selection one or more fields/columns for the given query, @@ -650,14 +657,31 @@ func (phq *ProvisionedHostQuery) GroupBy(field string, fields ...string) *Provis // client.ProvisionedHost.Query(). // Select(provisionedhost.FieldSubnetIP). // Scan(ctx, &v) -// func (phq *ProvisionedHostQuery) Select(fields ...string) *ProvisionedHostSelect { - phq.fields = append(phq.fields, fields...) - return &ProvisionedHostSelect{ProvisionedHostQuery: phq} + phq.ctx.Fields = append(phq.ctx.Fields, fields...) + sbuild := &ProvisionedHostSelect{ProvisionedHostQuery: phq} + sbuild.label = provisionedhost.Label + sbuild.flds, sbuild.scan = &phq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a ProvisionedHostSelect configured with the given aggregations. +func (phq *ProvisionedHostQuery) Aggregate(fns ...AggregateFunc) *ProvisionedHostSelect { + return phq.Select().Aggregate(fns...) } func (phq *ProvisionedHostQuery) prepareQuery(ctx context.Context) error { - for _, f := range phq.fields { + for _, inter := range phq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, phq); err != nil { + return err + } + } + } + for _, f := range phq.ctx.Fields { if !provisionedhost.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -672,7 +696,7 @@ func (phq *ProvisionedHostQuery) prepareQuery(ctx context.Context) error { return nil } -func (phq *ProvisionedHostQuery) sqlAll(ctx context.Context) ([]*ProvisionedHost, error) { +func (phq *ProvisionedHostQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*ProvisionedHost, error) { var ( nodes = []*ProvisionedHost{} withFKs = phq.withFKs @@ -696,352 +720,460 @@ func (phq *ProvisionedHostQuery) sqlAll(ctx context.Context) ([]*ProvisionedHost if withFKs { _spec.Node.Columns = append(_spec.Node.Columns, provisionedhost.ForeignKeys...) } - _spec.ScanValues = func(columns []string) ([]interface{}, error) { + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*ProvisionedHost).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { node := &ProvisionedHost{config: phq.config} nodes = append(nodes, node) - return node.scanValues(columns) - } - _spec.Assign = func(columns []string, values []interface{}) error { - if len(nodes) == 0 { - return fmt.Errorf("ent: Assign called without calling ScanValues") - } - node := nodes[len(nodes)-1] node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(phq.modifiers) > 0 { + _spec.Modifiers = phq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } if err := sqlgraph.QueryNodes(ctx, phq.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := phq.withProvisionedHostToStatus; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*ProvisionedHost) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - } - query.withFKs = true - query.Where(predicate.Status(func(s *sql.Selector) { - s.Where(sql.InValues(provisionedhost.ProvisionedHostToStatusColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { + if err := phq.loadProvisionedHostToStatus(ctx, query, nodes, nil, + func(n *ProvisionedHost, e *Status) { n.Edges.ProvisionedHostToStatus = e }); err != nil { return nil, err } - for _, n := range neighbors { - fk := n.provisioned_host_provisioned_host_to_status - if fk == nil { - return nil, fmt.Errorf(`foreign-key "provisioned_host_provisioned_host_to_status" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "provisioned_host_provisioned_host_to_status" returned %v for node %v`, *fk, n.ID) - } - node.Edges.ProvisionedHostToStatus = n - } } - if query := phq.withProvisionedHostToProvisionedNetwork; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*ProvisionedHost) - for i := range nodes { - if nodes[i].provisioned_host_provisioned_host_to_provisioned_network == nil { - continue - } - fk := *nodes[i].provisioned_host_provisioned_host_to_provisioned_network - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + if err := phq.loadProvisionedHostToProvisionedNetwork(ctx, query, nodes, nil, + func(n *ProvisionedHost, e *ProvisionedNetwork) { n.Edges.ProvisionedHostToProvisionedNetwork = e }); err != nil { + return nil, err } - query.Where(provisionednetwork.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + } + if query := phq.withProvisionedHostToHost; query != nil { + if err := phq.loadProvisionedHostToHost(ctx, query, nodes, nil, + func(n *ProvisionedHost, e *Host) { n.Edges.ProvisionedHostToHost = e }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "provisioned_host_provisioned_host_to_provisioned_network" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.ProvisionedHostToProvisionedNetwork = n - } + } + if query := phq.withProvisionedHostToEndStepPlan; query != nil { + if err := phq.loadProvisionedHostToEndStepPlan(ctx, query, nodes, nil, + func(n *ProvisionedHost, e *Plan) { n.Edges.ProvisionedHostToEndStepPlan = e }); err != nil { + return nil, err } } - - if query := phq.withProvisionedHostToHost; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*ProvisionedHost) - for i := range nodes { - if nodes[i].provisioned_host_provisioned_host_to_host == nil { - continue - } - fk := *nodes[i].provisioned_host_provisioned_host_to_host - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + if query := phq.withProvisionedHostToBuild; query != nil { + if err := phq.loadProvisionedHostToBuild(ctx, query, nodes, nil, + func(n *ProvisionedHost, e *Build) { n.Edges.ProvisionedHostToBuild = e }); err != nil { + return nil, err } - query.Where(host.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + } + if query := phq.withProvisionedHostToProvisioningStep; query != nil { + if err := phq.loadProvisionedHostToProvisioningStep(ctx, query, nodes, + func(n *ProvisionedHost) { n.Edges.ProvisionedHostToProvisioningStep = []*ProvisioningStep{} }, + func(n *ProvisionedHost, e *ProvisioningStep) { + n.Edges.ProvisionedHostToProvisioningStep = append(n.Edges.ProvisionedHostToProvisioningStep, e) + }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "provisioned_host_provisioned_host_to_host" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.ProvisionedHostToHost = n - } + } + if query := phq.withProvisionedHostToAgentStatus; query != nil { + if err := phq.loadProvisionedHostToAgentStatus(ctx, query, nodes, + func(n *ProvisionedHost) { n.Edges.ProvisionedHostToAgentStatus = []*AgentStatus{} }, + func(n *ProvisionedHost, e *AgentStatus) { + n.Edges.ProvisionedHostToAgentStatus = append(n.Edges.ProvisionedHostToAgentStatus, e) + }); err != nil { + return nil, err } } - - if query := phq.withProvisionedHostToEndStepPlan; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*ProvisionedHost) - for i := range nodes { - if nodes[i].provisioned_host_provisioned_host_to_end_step_plan == nil { - continue - } - fk := *nodes[i].provisioned_host_provisioned_host_to_end_step_plan - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + if query := phq.withProvisionedHostToAgentTask; query != nil { + if err := phq.loadProvisionedHostToAgentTask(ctx, query, nodes, + func(n *ProvisionedHost) { n.Edges.ProvisionedHostToAgentTask = []*AgentTask{} }, + func(n *ProvisionedHost, e *AgentTask) { + n.Edges.ProvisionedHostToAgentTask = append(n.Edges.ProvisionedHostToAgentTask, e) + }); err != nil { + return nil, err } - query.Where(plan.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + } + if query := phq.withProvisionedHostToPlan; query != nil { + if err := phq.loadProvisionedHostToPlan(ctx, query, nodes, nil, + func(n *ProvisionedHost, e *Plan) { n.Edges.ProvisionedHostToPlan = e }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "provisioned_host_provisioned_host_to_end_step_plan" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.ProvisionedHostToEndStepPlan = n - } + } + if query := phq.withProvisionedHostToGinFileMiddleware; query != nil { + if err := phq.loadProvisionedHostToGinFileMiddleware(ctx, query, nodes, nil, + func(n *ProvisionedHost, e *GinFileMiddleware) { n.Edges.ProvisionedHostToGinFileMiddleware = e }); err != nil { + return nil, err } } - - if query := phq.withProvisionedHostToBuild; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*ProvisionedHost) - for i := range nodes { - if nodes[i].provisioned_host_provisioned_host_to_build == nil { - continue - } - fk := *nodes[i].provisioned_host_provisioned_host_to_build - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + for name, query := range phq.withNamedProvisionedHostToProvisioningStep { + if err := phq.loadProvisionedHostToProvisioningStep(ctx, query, nodes, + func(n *ProvisionedHost) { n.appendNamedProvisionedHostToProvisioningStep(name) }, + func(n *ProvisionedHost, e *ProvisioningStep) { n.appendNamedProvisionedHostToProvisioningStep(name, e) }); err != nil { + return nil, err } - query.Where(build.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + } + for name, query := range phq.withNamedProvisionedHostToAgentStatus { + if err := phq.loadProvisionedHostToAgentStatus(ctx, query, nodes, + func(n *ProvisionedHost) { n.appendNamedProvisionedHostToAgentStatus(name) }, + func(n *ProvisionedHost, e *AgentStatus) { n.appendNamedProvisionedHostToAgentStatus(name, e) }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "provisioned_host_provisioned_host_to_build" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.ProvisionedHostToBuild = n - } + } + for name, query := range phq.withNamedProvisionedHostToAgentTask { + if err := phq.loadProvisionedHostToAgentTask(ctx, query, nodes, + func(n *ProvisionedHost) { n.appendNamedProvisionedHostToAgentTask(name) }, + func(n *ProvisionedHost, e *AgentTask) { n.appendNamedProvisionedHostToAgentTask(name, e) }); err != nil { + return nil, err } } + for i := range phq.loadTotal { + if err := phq.loadTotal[i](ctx, nodes); err != nil { + return nil, err + } + } + return nodes, nil +} - if query := phq.withProvisionedHostToProvisioningStep; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*ProvisionedHost) +func (phq *ProvisionedHostQuery) loadProvisionedHostToStatus(ctx context.Context, query *StatusQuery, nodes []*ProvisionedHost, init func(*ProvisionedHost), assign func(*ProvisionedHost, *Status)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*ProvisionedHost) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + } + query.withFKs = true + query.Where(predicate.Status(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(provisionedhost.ProvisionedHostToStatusColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.provisioned_host_provisioned_host_to_status + if fk == nil { + return fmt.Errorf(`foreign-key "provisioned_host_provisioned_host_to_status" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "provisioned_host_provisioned_host_to_status" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} +func (phq *ProvisionedHostQuery) loadProvisionedHostToProvisionedNetwork(ctx context.Context, query *ProvisionedNetworkQuery, nodes []*ProvisionedHost, init func(*ProvisionedHost), assign func(*ProvisionedHost, *ProvisionedNetwork)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*ProvisionedHost) + for i := range nodes { + if nodes[i].provisioned_host_provisioned_host_to_provisioned_network == nil { + continue + } + fk := *nodes[i].provisioned_host_provisioned_host_to_provisioned_network + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(provisionednetwork.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "provisioned_host_provisioned_host_to_provisioned_network" returned %v`, n.ID) + } for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.ProvisionedHostToProvisioningStep = []*ProvisioningStep{} - } - query.withFKs = true - query.Where(predicate.ProvisioningStep(func(s *sql.Selector) { - s.Where(sql.InValues(provisionedhost.ProvisionedHostToProvisioningStepColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + assign(nodes[i], n) } - for _, n := range neighbors { - fk := n.provisioning_step_provisioning_step_to_provisioned_host - if fk == nil { - return nil, fmt.Errorf(`foreign-key "provisioning_step_provisioning_step_to_provisioned_host" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "provisioning_step_provisioning_step_to_provisioned_host" returned %v for node %v`, *fk, n.ID) - } - node.Edges.ProvisionedHostToProvisioningStep = append(node.Edges.ProvisionedHostToProvisioningStep, n) + } + return nil +} +func (phq *ProvisionedHostQuery) loadProvisionedHostToHost(ctx context.Context, query *HostQuery, nodes []*ProvisionedHost, init func(*ProvisionedHost), assign func(*ProvisionedHost, *Host)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*ProvisionedHost) + for i := range nodes { + if nodes[i].provisioned_host_provisioned_host_to_host == nil { + continue + } + fk := *nodes[i].provisioned_host_provisioned_host_to_host + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) } + nodeids[fk] = append(nodeids[fk], nodes[i]) } - - if query := phq.withProvisionedHostToAgentStatus; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*ProvisionedHost) + if len(ids) == 0 { + return nil + } + query.Where(host.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "provisioned_host_provisioned_host_to_host" returned %v`, n.ID) + } for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.ProvisionedHostToAgentStatus = []*AgentStatus{} - } - query.withFKs = true - query.Where(predicate.AgentStatus(func(s *sql.Selector) { - s.Where(sql.InValues(provisionedhost.ProvisionedHostToAgentStatusColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + assign(nodes[i], n) } - for _, n := range neighbors { - fk := n.agent_status_agent_status_to_provisioned_host - if fk == nil { - return nil, fmt.Errorf(`foreign-key "agent_status_agent_status_to_provisioned_host" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "agent_status_agent_status_to_provisioned_host" returned %v for node %v`, *fk, n.ID) - } - node.Edges.ProvisionedHostToAgentStatus = append(node.Edges.ProvisionedHostToAgentStatus, n) + } + return nil +} +func (phq *ProvisionedHostQuery) loadProvisionedHostToEndStepPlan(ctx context.Context, query *PlanQuery, nodes []*ProvisionedHost, init func(*ProvisionedHost), assign func(*ProvisionedHost, *Plan)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*ProvisionedHost) + for i := range nodes { + if nodes[i].provisioned_host_provisioned_host_to_end_step_plan == nil { + continue + } + fk := *nodes[i].provisioned_host_provisioned_host_to_end_step_plan + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) } + nodeids[fk] = append(nodeids[fk], nodes[i]) } - - if query := phq.withProvisionedHostToAgentTask; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*ProvisionedHost) + if len(ids) == 0 { + return nil + } + query.Where(plan.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "provisioned_host_provisioned_host_to_end_step_plan" returned %v`, n.ID) + } for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.ProvisionedHostToAgentTask = []*AgentTask{} - } - query.withFKs = true - query.Where(predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.InValues(provisionedhost.ProvisionedHostToAgentTaskColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + assign(nodes[i], n) } - for _, n := range neighbors { - fk := n.agent_task_agent_task_to_provisioned_host - if fk == nil { - return nil, fmt.Errorf(`foreign-key "agent_task_agent_task_to_provisioned_host" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "agent_task_agent_task_to_provisioned_host" returned %v for node %v`, *fk, n.ID) - } - node.Edges.ProvisionedHostToAgentTask = append(node.Edges.ProvisionedHostToAgentTask, n) + } + return nil +} +func (phq *ProvisionedHostQuery) loadProvisionedHostToBuild(ctx context.Context, query *BuildQuery, nodes []*ProvisionedHost, init func(*ProvisionedHost), assign func(*ProvisionedHost, *Build)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*ProvisionedHost) + for i := range nodes { + if nodes[i].provisioned_host_provisioned_host_to_build == nil { + continue + } + fk := *nodes[i].provisioned_host_provisioned_host_to_build + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) } + nodeids[fk] = append(nodeids[fk], nodes[i]) } - - if query := phq.withProvisionedHostToPlan; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*ProvisionedHost) + if len(ids) == 0 { + return nil + } + query.Where(build.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "provisioned_host_provisioned_host_to_build" returned %v`, n.ID) + } for i := range nodes { - if nodes[i].plan_plan_to_provisioned_host == nil { - continue - } - fk := *nodes[i].plan_plan_to_provisioned_host - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + assign(nodes[i], n) } - query.Where(plan.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + } + return nil +} +func (phq *ProvisionedHostQuery) loadProvisionedHostToProvisioningStep(ctx context.Context, query *ProvisioningStepQuery, nodes []*ProvisionedHost, init func(*ProvisionedHost), assign func(*ProvisionedHost, *ProvisioningStep)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*ProvisionedHost) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "plan_plan_to_provisioned_host" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.ProvisionedHostToPlan = n - } + } + query.withFKs = true + query.Where(predicate.ProvisioningStep(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(provisionedhost.ProvisionedHostToProvisioningStepColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.provisioning_step_provisioning_step_to_provisioned_host + if fk == nil { + return fmt.Errorf(`foreign-key "provisioning_step_provisioning_step_to_provisioned_host" is nil for node %v`, n.ID) } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "provisioning_step_provisioning_step_to_provisioned_host" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) } - - if query := phq.withProvisionedHostToGinFileMiddleware; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*ProvisionedHost) + return nil +} +func (phq *ProvisionedHostQuery) loadProvisionedHostToAgentStatus(ctx context.Context, query *AgentStatusQuery, nodes []*ProvisionedHost, init func(*ProvisionedHost), assign func(*ProvisionedHost, *AgentStatus)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*ProvisionedHost) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.AgentStatus(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(provisionedhost.ProvisionedHostToAgentStatusColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.agent_status_agent_status_to_provisioned_host + if fk == nil { + return fmt.Errorf(`foreign-key "agent_status_agent_status_to_provisioned_host" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "agent_status_agent_status_to_provisioned_host" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} +func (phq *ProvisionedHostQuery) loadProvisionedHostToAgentTask(ctx context.Context, query *AgentTaskQuery, nodes []*ProvisionedHost, init func(*ProvisionedHost), assign func(*ProvisionedHost, *AgentTask)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*ProvisionedHost) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.AgentTask(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(provisionedhost.ProvisionedHostToAgentTaskColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.agent_task_agent_task_to_provisioned_host + if fk == nil { + return fmt.Errorf(`foreign-key "agent_task_agent_task_to_provisioned_host" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "agent_task_agent_task_to_provisioned_host" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} +func (phq *ProvisionedHostQuery) loadProvisionedHostToPlan(ctx context.Context, query *PlanQuery, nodes []*ProvisionedHost, init func(*ProvisionedHost), assign func(*ProvisionedHost, *Plan)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*ProvisionedHost) + for i := range nodes { + if nodes[i].plan_plan_to_provisioned_host == nil { + continue + } + fk := *nodes[i].plan_plan_to_provisioned_host + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(plan.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "plan_plan_to_provisioned_host" returned %v`, n.ID) + } for i := range nodes { - if nodes[i].gin_file_middleware_gin_file_middleware_to_provisioned_host == nil { - continue - } - fk := *nodes[i].gin_file_middleware_gin_file_middleware_to_provisioned_host - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + assign(nodes[i], n) } - query.Where(ginfilemiddleware.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + } + return nil +} +func (phq *ProvisionedHostQuery) loadProvisionedHostToGinFileMiddleware(ctx context.Context, query *GinFileMiddlewareQuery, nodes []*ProvisionedHost, init func(*ProvisionedHost), assign func(*ProvisionedHost, *GinFileMiddleware)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*ProvisionedHost) + for i := range nodes { + if nodes[i].gin_file_middleware_gin_file_middleware_to_provisioned_host == nil { + continue } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "gin_file_middleware_gin_file_middleware_to_provisioned_host" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.ProvisionedHostToGinFileMiddleware = n - } + fk := *nodes[i].gin_file_middleware_gin_file_middleware_to_provisioned_host + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) } + nodeids[fk] = append(nodeids[fk], nodes[i]) } - - return nodes, nil + if len(ids) == 0 { + return nil + } + query.Where(ginfilemiddleware.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "gin_file_middleware_gin_file_middleware_to_provisioned_host" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil } func (phq *ProvisionedHostQuery) sqlCount(ctx context.Context) (int, error) { _spec := phq.querySpec() - _spec.Node.Columns = phq.fields - if len(phq.fields) > 0 { - _spec.Unique = phq.unique != nil && *phq.unique + if len(phq.modifiers) > 0 { + _spec.Modifiers = phq.modifiers } - return sqlgraph.CountNodes(ctx, phq.driver, _spec) -} - -func (phq *ProvisionedHostQuery) sqlExist(ctx context.Context) (bool, error) { - n, err := phq.sqlCount(ctx) - if err != nil { - return false, fmt.Errorf("ent: check existence: %w", err) + _spec.Node.Columns = phq.ctx.Fields + if len(phq.ctx.Fields) > 0 { + _spec.Unique = phq.ctx.Unique != nil && *phq.ctx.Unique } - return n > 0, nil + return sqlgraph.CountNodes(ctx, phq.driver, _spec) } func (phq *ProvisionedHostQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: provisionedhost.Table, - Columns: provisionedhost.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionedhost.FieldID, - }, - }, - From: phq.sql, - Unique: true, - } - if unique := phq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(provisionedhost.Table, provisionedhost.Columns, sqlgraph.NewFieldSpec(provisionedhost.FieldID, field.TypeUUID)) + _spec.From = phq.sql + if unique := phq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if phq.path != nil { + _spec.Unique = true } - if fields := phq.fields; len(fields) > 0 { + if fields := phq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, provisionedhost.FieldID) for i := range fields { @@ -1057,10 +1189,10 @@ func (phq *ProvisionedHostQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := phq.limit; limit != nil { + if limit := phq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := phq.offset; offset != nil { + if offset := phq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := phq.order; len(ps) > 0 { @@ -1076,7 +1208,7 @@ func (phq *ProvisionedHostQuery) querySpec() *sqlgraph.QuerySpec { func (phq *ProvisionedHostQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(phq.driver.Dialect()) t1 := builder.Table(provisionedhost.Table) - columns := phq.fields + columns := phq.ctx.Fields if len(columns) == 0 { columns = provisionedhost.Columns } @@ -1085,7 +1217,7 @@ func (phq *ProvisionedHostQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = phq.sql selector.Select(selector.Columns(columns...)...) } - if phq.unique != nil && *phq.unique { + if phq.ctx.Unique != nil && *phq.ctx.Unique { selector.Distinct() } for _, p := range phq.predicates { @@ -1094,498 +1226,142 @@ func (phq *ProvisionedHostQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range phq.order { p(selector) } - if offset := phq.offset; offset != nil { + if offset := phq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := phq.limit; limit != nil { + if limit := phq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector } -// ProvisionedHostGroupBy is the group-by builder for ProvisionedHost entities. -type ProvisionedHostGroupBy struct { - config - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) -} - -// Aggregate adds the given aggregation functions to the group-by query. -func (phgb *ProvisionedHostGroupBy) Aggregate(fns ...AggregateFunc) *ProvisionedHostGroupBy { - phgb.fns = append(phgb.fns, fns...) - return phgb -} - -// Scan applies the group-by query and scans the result into the given value. -func (phgb *ProvisionedHostGroupBy) Scan(ctx context.Context, v interface{}) error { - query, err := phgb.path(ctx) - if err != nil { - return err - } - phgb.sql = query - return phgb.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (phgb *ProvisionedHostGroupBy) ScanX(ctx context.Context, v interface{}) { - if err := phgb.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from group-by. -// It is only allowed when executing a group-by query with one field. -func (phgb *ProvisionedHostGroupBy) Strings(ctx context.Context) ([]string, error) { - if len(phgb.fields) > 1 { - return nil, errors.New("ent: ProvisionedHostGroupBy.Strings is not achievable when grouping more than 1 field") - } - var v []string - if err := phgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (phgb *ProvisionedHostGroupBy) StringsX(ctx context.Context) []string { - v, err := phgb.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (phgb *ProvisionedHostGroupBy) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = phgb.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{provisionedhost.Label} - default: - err = fmt.Errorf("ent: ProvisionedHostGroupBy.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (phgb *ProvisionedHostGroupBy) StringX(ctx context.Context) string { - v, err := phgb.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from group-by. -// It is only allowed when executing a group-by query with one field. -func (phgb *ProvisionedHostGroupBy) Ints(ctx context.Context) ([]int, error) { - if len(phgb.fields) > 1 { - return nil, errors.New("ent: ProvisionedHostGroupBy.Ints is not achievable when grouping more than 1 field") - } - var v []int - if err := phgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (phgb *ProvisionedHostGroupBy) IntsX(ctx context.Context) []int { - v, err := phgb.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (phgb *ProvisionedHostGroupBy) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = phgb.Ints(ctx); err != nil { - return +// WithNamedProvisionedHostToProvisioningStep tells the query-builder to eager-load the nodes that are connected to the "ProvisionedHostToProvisioningStep" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (phq *ProvisionedHostQuery) WithNamedProvisionedHostToProvisioningStep(name string, opts ...func(*ProvisioningStepQuery)) *ProvisionedHostQuery { + query := (&ProvisioningStepClient{config: phq.config}).Query() + for _, opt := range opts { + opt(query) } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{provisionedhost.Label} - default: - err = fmt.Errorf("ent: ProvisionedHostGroupBy.Ints returned %d results when one was expected", len(v)) + if phq.withNamedProvisionedHostToProvisioningStep == nil { + phq.withNamedProvisionedHostToProvisioningStep = make(map[string]*ProvisioningStepQuery) } - return -} - -// IntX is like Int, but panics if an error occurs. -func (phgb *ProvisionedHostGroupBy) IntX(ctx context.Context) int { - v, err := phgb.Int(ctx) - if err != nil { - panic(err) - } - return v + phq.withNamedProvisionedHostToProvisioningStep[name] = query + return phq } -// Float64s returns list of float64s from group-by. -// It is only allowed when executing a group-by query with one field. -func (phgb *ProvisionedHostGroupBy) Float64s(ctx context.Context) ([]float64, error) { - if len(phgb.fields) > 1 { - return nil, errors.New("ent: ProvisionedHostGroupBy.Float64s is not achievable when grouping more than 1 field") - } - var v []float64 - if err := phgb.Scan(ctx, &v); err != nil { - return nil, err +// WithNamedProvisionedHostToAgentStatus tells the query-builder to eager-load the nodes that are connected to the "ProvisionedHostToAgentStatus" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (phq *ProvisionedHostQuery) WithNamedProvisionedHostToAgentStatus(name string, opts ...func(*AgentStatusQuery)) *ProvisionedHostQuery { + query := (&AgentStatusClient{config: phq.config}).Query() + for _, opt := range opts { + opt(query) } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (phgb *ProvisionedHostGroupBy) Float64sX(ctx context.Context) []float64 { - v, err := phgb.Float64s(ctx) - if err != nil { - panic(err) + if phq.withNamedProvisionedHostToAgentStatus == nil { + phq.withNamedProvisionedHostToAgentStatus = make(map[string]*AgentStatusQuery) } - return v + phq.withNamedProvisionedHostToAgentStatus[name] = query + return phq } -// Float64 returns a single float64 from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (phgb *ProvisionedHostGroupBy) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = phgb.Float64s(ctx); err != nil { - return +// WithNamedProvisionedHostToAgentTask tells the query-builder to eager-load the nodes that are connected to the "ProvisionedHostToAgentTask" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (phq *ProvisionedHostQuery) WithNamedProvisionedHostToAgentTask(name string, opts ...func(*AgentTaskQuery)) *ProvisionedHostQuery { + query := (&AgentTaskClient{config: phq.config}).Query() + for _, opt := range opts { + opt(query) } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{provisionedhost.Label} - default: - err = fmt.Errorf("ent: ProvisionedHostGroupBy.Float64s returned %d results when one was expected", len(v)) + if phq.withNamedProvisionedHostToAgentTask == nil { + phq.withNamedProvisionedHostToAgentTask = make(map[string]*AgentTaskQuery) } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (phgb *ProvisionedHostGroupBy) Float64X(ctx context.Context) float64 { - v, err := phgb.Float64(ctx) - if err != nil { - panic(err) - } - return v + phq.withNamedProvisionedHostToAgentTask[name] = query + return phq } -// Bools returns list of bools from group-by. -// It is only allowed when executing a group-by query with one field. -func (phgb *ProvisionedHostGroupBy) Bools(ctx context.Context) ([]bool, error) { - if len(phgb.fields) > 1 { - return nil, errors.New("ent: ProvisionedHostGroupBy.Bools is not achievable when grouping more than 1 field") - } - var v []bool - if err := phgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil +// ProvisionedHostGroupBy is the group-by builder for ProvisionedHost entities. +type ProvisionedHostGroupBy struct { + selector + build *ProvisionedHostQuery } -// BoolsX is like Bools, but panics if an error occurs. -func (phgb *ProvisionedHostGroupBy) BoolsX(ctx context.Context) []bool { - v, err := phgb.Bools(ctx) - if err != nil { - panic(err) - } - return v +// Aggregate adds the given aggregation functions to the group-by query. +func (phgb *ProvisionedHostGroupBy) Aggregate(fns ...AggregateFunc) *ProvisionedHostGroupBy { + phgb.fns = append(phgb.fns, fns...) + return phgb } -// Bool returns a single bool from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (phgb *ProvisionedHostGroupBy) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = phgb.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{provisionedhost.Label} - default: - err = fmt.Errorf("ent: ProvisionedHostGroupBy.Bools returned %d results when one was expected", len(v)) +// Scan applies the selector query and scans the result into the given value. +func (phgb *ProvisionedHostGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, phgb.build.ctx, "GroupBy") + if err := phgb.build.prepareQuery(ctx); err != nil { + return err } - return + return scanWithInterceptors[*ProvisionedHostQuery, *ProvisionedHostGroupBy](ctx, phgb.build, phgb, phgb.build.inters, v) } -// BoolX is like Bool, but panics if an error occurs. -func (phgb *ProvisionedHostGroupBy) BoolX(ctx context.Context) bool { - v, err := phgb.Bool(ctx) - if err != nil { - panic(err) +func (phgb *ProvisionedHostGroupBy) sqlScan(ctx context.Context, root *ProvisionedHostQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(phgb.fns)) + for _, fn := range phgb.fns { + aggregation = append(aggregation, fn(selector)) } - return v -} - -func (phgb *ProvisionedHostGroupBy) sqlScan(ctx context.Context, v interface{}) error { - for _, f := range phgb.fields { - if !provisionedhost.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*phgb.flds)+len(phgb.fns)) + for _, f := range *phgb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := phgb.sqlQuery() + selector.GroupBy(selector.Columns(*phgb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := phgb.driver.Query(ctx, query, args, rows); err != nil { + if err := phgb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (phgb *ProvisionedHostGroupBy) sqlQuery() *sql.Selector { - selector := phgb.sql.Select() - aggregation := make([]string, 0, len(phgb.fns)) - for _, fn := range phgb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(phgb.fields)+len(phgb.fns)) - for _, f := range phgb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(phgb.fields...)...) -} - // ProvisionedHostSelect is the builder for selecting fields of ProvisionedHost entities. type ProvisionedHostSelect struct { *ProvisionedHostQuery - // intermediate query (i.e. traversal path). - sql *sql.Selector + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (phs *ProvisionedHostSelect) Aggregate(fns ...AggregateFunc) *ProvisionedHostSelect { + phs.fns = append(phs.fns, fns...) + return phs } // Scan applies the selector query and scans the result into the given value. -func (phs *ProvisionedHostSelect) Scan(ctx context.Context, v interface{}) error { +func (phs *ProvisionedHostSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, phs.ctx, "Select") if err := phs.prepareQuery(ctx); err != nil { return err } - phs.sql = phs.ProvisionedHostQuery.sqlQuery(ctx) - return phs.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (phs *ProvisionedHostSelect) ScanX(ctx context.Context, v interface{}) { - if err := phs.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from a selector. It is only allowed when selecting one field. -func (phs *ProvisionedHostSelect) Strings(ctx context.Context) ([]string, error) { - if len(phs.fields) > 1 { - return nil, errors.New("ent: ProvisionedHostSelect.Strings is not achievable when selecting more than 1 field") - } - var v []string - if err := phs.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (phs *ProvisionedHostSelect) StringsX(ctx context.Context) []string { - v, err := phs.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a selector. It is only allowed when selecting one field. -func (phs *ProvisionedHostSelect) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = phs.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{provisionedhost.Label} - default: - err = fmt.Errorf("ent: ProvisionedHostSelect.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (phs *ProvisionedHostSelect) StringX(ctx context.Context) string { - v, err := phs.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from a selector. It is only allowed when selecting one field. -func (phs *ProvisionedHostSelect) Ints(ctx context.Context) ([]int, error) { - if len(phs.fields) > 1 { - return nil, errors.New("ent: ProvisionedHostSelect.Ints is not achievable when selecting more than 1 field") - } - var v []int - if err := phs.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (phs *ProvisionedHostSelect) IntsX(ctx context.Context) []int { - v, err := phs.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a selector. It is only allowed when selecting one field. -func (phs *ProvisionedHostSelect) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = phs.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{provisionedhost.Label} - default: - err = fmt.Errorf("ent: ProvisionedHostSelect.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (phs *ProvisionedHostSelect) IntX(ctx context.Context) int { - v, err := phs.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. -func (phs *ProvisionedHostSelect) Float64s(ctx context.Context) ([]float64, error) { - if len(phs.fields) > 1 { - return nil, errors.New("ent: ProvisionedHostSelect.Float64s is not achievable when selecting more than 1 field") - } - var v []float64 - if err := phs.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (phs *ProvisionedHostSelect) Float64sX(ctx context.Context) []float64 { - v, err := phs.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. -func (phs *ProvisionedHostSelect) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = phs.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{provisionedhost.Label} - default: - err = fmt.Errorf("ent: ProvisionedHostSelect.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (phs *ProvisionedHostSelect) Float64X(ctx context.Context) float64 { - v, err := phs.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from a selector. It is only allowed when selecting one field. -func (phs *ProvisionedHostSelect) Bools(ctx context.Context) ([]bool, error) { - if len(phs.fields) > 1 { - return nil, errors.New("ent: ProvisionedHostSelect.Bools is not achievable when selecting more than 1 field") - } - var v []bool - if err := phs.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (phs *ProvisionedHostSelect) BoolsX(ctx context.Context) []bool { - v, err := phs.Bools(ctx) - if err != nil { - panic(err) - } - return v + return scanWithInterceptors[*ProvisionedHostQuery, *ProvisionedHostSelect](ctx, phs.ProvisionedHostQuery, phs, phs.inters, v) } -// Bool returns a single bool from a selector. It is only allowed when selecting one field. -func (phs *ProvisionedHostSelect) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = phs.Bools(ctx); err != nil { - return +func (phs *ProvisionedHostSelect) sqlScan(ctx context.Context, root *ProvisionedHostQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(phs.fns)) + for _, fn := range phs.fns { + aggregation = append(aggregation, fn(selector)) } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{provisionedhost.Label} - default: - err = fmt.Errorf("ent: ProvisionedHostSelect.Bools returned %d results when one was expected", len(v)) + switch n := len(*phs.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (phs *ProvisionedHostSelect) BoolX(ctx context.Context) bool { - v, err := phs.Bool(ctx) - if err != nil { - panic(err) - } - return v -} - -func (phs *ProvisionedHostSelect) sqlScan(ctx context.Context, v interface{}) error { rows := &sql.Rows{} - query, args := phs.sql.Query() + query, args := selector.Query() if err := phs.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/ent/provisionedhost_update.go b/ent/provisionedhost_update.go index c9f112f3..76cd3b8a 100755 --- a/ent/provisionedhost_update.go +++ b/ent/provisionedhost_update.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -43,6 +43,14 @@ func (phu *ProvisionedHostUpdate) SetSubnetIP(s string) *ProvisionedHostUpdate { return phu } +// SetNillableSubnetIP sets the "subnet_ip" field if the given value is not nil. +func (phu *ProvisionedHostUpdate) SetNillableSubnetIP(s *string) *ProvisionedHostUpdate { + if s != nil { + phu.SetSubnetIP(*s) + } + return phu +} + // SetAddonType sets the "addon_type" field. func (phu *ProvisionedHostUpdate) SetAddonType(pt provisionedhost.AddonType) *ProvisionedHostUpdate { phu.mutation.SetAddonType(pt) @@ -327,40 +335,7 @@ func (phu *ProvisionedHostUpdate) ClearProvisionedHostToGinFileMiddleware() *Pro // Save executes the query and returns the number of nodes affected by the update operation. func (phu *ProvisionedHostUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(phu.hooks) == 0 { - if err = phu.check(); err != nil { - return 0, err - } - affected, err = phu.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*ProvisionedHostMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = phu.check(); err != nil { - return 0, err - } - phu.mutation = mutation - affected, err = phu.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(phu.hooks) - 1; i >= 0; i-- { - if phu.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = phu.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, phu.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, phu.sqlSave, phu.mutation, phu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -408,16 +383,10 @@ func (phu *ProvisionedHostUpdate) check() error { } func (phu *ProvisionedHostUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: provisionedhost.Table, - Columns: provisionedhost.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionedhost.FieldID, - }, - }, + if err := phu.check(); err != nil { + return n, err } + _spec := sqlgraph.NewUpdateSpec(provisionedhost.Table, provisionedhost.Columns, sqlgraph.NewFieldSpec(provisionedhost.FieldID, field.TypeUUID)) if ps := phu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -426,31 +395,16 @@ func (phu *ProvisionedHostUpdate) sqlSave(ctx context.Context) (n int, err error } } if value, ok := phu.mutation.SubnetIP(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: provisionedhost.FieldSubnetIP, - }) + _spec.SetField(provisionedhost.FieldSubnetIP, field.TypeString, value) } if value, ok := phu.mutation.AddonType(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: provisionedhost.FieldAddonType, - }) + _spec.SetField(provisionedhost.FieldAddonType, field.TypeEnum, value) } if phu.mutation.AddonTypeCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Column: provisionedhost.FieldAddonType, - }) + _spec.ClearField(provisionedhost.FieldAddonType, field.TypeEnum) } if value, ok := phu.mutation.Vars(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: provisionedhost.FieldVars, - }) + _spec.SetField(provisionedhost.FieldVars, field.TypeJSON, value) } if phu.mutation.ProvisionedHostToStatusCleared() { edge := &sqlgraph.EdgeSpec{ @@ -460,10 +414,7 @@ func (phu *ProvisionedHostUpdate) sqlSave(ctx context.Context) (n int, err error Columns: []string{provisionedhost.ProvisionedHostToStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -476,10 +427,7 @@ func (phu *ProvisionedHostUpdate) sqlSave(ctx context.Context) (n int, err error Columns: []string{provisionedhost.ProvisionedHostToStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -495,10 +443,7 @@ func (phu *ProvisionedHostUpdate) sqlSave(ctx context.Context) (n int, err error Columns: []string{provisionedhost.ProvisionedHostToProvisionedNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionednetwork.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -511,10 +456,7 @@ func (phu *ProvisionedHostUpdate) sqlSave(ctx context.Context) (n int, err error Columns: []string{provisionedhost.ProvisionedHostToProvisionedNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionednetwork.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -530,10 +472,7 @@ func (phu *ProvisionedHostUpdate) sqlSave(ctx context.Context) (n int, err error Columns: []string{provisionedhost.ProvisionedHostToHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: host.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(host.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -546,10 +485,7 @@ func (phu *ProvisionedHostUpdate) sqlSave(ctx context.Context) (n int, err error Columns: []string{provisionedhost.ProvisionedHostToHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: host.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(host.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -565,10 +501,7 @@ func (phu *ProvisionedHostUpdate) sqlSave(ctx context.Context) (n int, err error Columns: []string{provisionedhost.ProvisionedHostToEndStepPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -581,10 +514,7 @@ func (phu *ProvisionedHostUpdate) sqlSave(ctx context.Context) (n int, err error Columns: []string{provisionedhost.ProvisionedHostToEndStepPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -600,10 +530,7 @@ func (phu *ProvisionedHostUpdate) sqlSave(ctx context.Context) (n int, err error Columns: []string{provisionedhost.ProvisionedHostToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -616,10 +543,7 @@ func (phu *ProvisionedHostUpdate) sqlSave(ctx context.Context) (n int, err error Columns: []string{provisionedhost.ProvisionedHostToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -635,10 +559,7 @@ func (phu *ProvisionedHostUpdate) sqlSave(ctx context.Context) (n int, err error Columns: []string{provisionedhost.ProvisionedHostToProvisioningStepColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisioningstep.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisioningstep.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -651,10 +572,7 @@ func (phu *ProvisionedHostUpdate) sqlSave(ctx context.Context) (n int, err error Columns: []string{provisionedhost.ProvisionedHostToProvisioningStepColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisioningstep.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisioningstep.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -670,10 +588,7 @@ func (phu *ProvisionedHostUpdate) sqlSave(ctx context.Context) (n int, err error Columns: []string{provisionedhost.ProvisionedHostToProvisioningStepColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisioningstep.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisioningstep.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -689,10 +604,7 @@ func (phu *ProvisionedHostUpdate) sqlSave(ctx context.Context) (n int, err error Columns: []string{provisionedhost.ProvisionedHostToAgentStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: agentstatus.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(agentstatus.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -705,10 +617,7 @@ func (phu *ProvisionedHostUpdate) sqlSave(ctx context.Context) (n int, err error Columns: []string{provisionedhost.ProvisionedHostToAgentStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: agentstatus.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(agentstatus.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -724,10 +633,7 @@ func (phu *ProvisionedHostUpdate) sqlSave(ctx context.Context) (n int, err error Columns: []string{provisionedhost.ProvisionedHostToAgentStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: agentstatus.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(agentstatus.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -743,10 +649,7 @@ func (phu *ProvisionedHostUpdate) sqlSave(ctx context.Context) (n int, err error Columns: []string{provisionedhost.ProvisionedHostToAgentTaskColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: agenttask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(agenttask.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -759,10 +662,7 @@ func (phu *ProvisionedHostUpdate) sqlSave(ctx context.Context) (n int, err error Columns: []string{provisionedhost.ProvisionedHostToAgentTaskColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: agenttask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(agenttask.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -778,10 +678,7 @@ func (phu *ProvisionedHostUpdate) sqlSave(ctx context.Context) (n int, err error Columns: []string{provisionedhost.ProvisionedHostToAgentTaskColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: agenttask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(agenttask.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -797,10 +694,7 @@ func (phu *ProvisionedHostUpdate) sqlSave(ctx context.Context) (n int, err error Columns: []string{provisionedhost.ProvisionedHostToPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -813,10 +707,7 @@ func (phu *ProvisionedHostUpdate) sqlSave(ctx context.Context) (n int, err error Columns: []string{provisionedhost.ProvisionedHostToPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -832,10 +723,7 @@ func (phu *ProvisionedHostUpdate) sqlSave(ctx context.Context) (n int, err error Columns: []string{provisionedhost.ProvisionedHostToGinFileMiddlewareColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: ginfilemiddleware.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(ginfilemiddleware.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -848,10 +736,7 @@ func (phu *ProvisionedHostUpdate) sqlSave(ctx context.Context) (n int, err error Columns: []string{provisionedhost.ProvisionedHostToGinFileMiddlewareColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: ginfilemiddleware.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(ginfilemiddleware.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -863,10 +748,11 @@ func (phu *ProvisionedHostUpdate) sqlSave(ctx context.Context) (n int, err error if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{provisionedhost.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return 0, err } + phu.mutation.done = true return n, nil } @@ -884,6 +770,14 @@ func (phuo *ProvisionedHostUpdateOne) SetSubnetIP(s string) *ProvisionedHostUpda return phuo } +// SetNillableSubnetIP sets the "subnet_ip" field if the given value is not nil. +func (phuo *ProvisionedHostUpdateOne) SetNillableSubnetIP(s *string) *ProvisionedHostUpdateOne { + if s != nil { + phuo.SetSubnetIP(*s) + } + return phuo +} + // SetAddonType sets the "addon_type" field. func (phuo *ProvisionedHostUpdateOne) SetAddonType(pt provisionedhost.AddonType) *ProvisionedHostUpdateOne { phuo.mutation.SetAddonType(pt) @@ -1166,6 +1060,12 @@ func (phuo *ProvisionedHostUpdateOne) ClearProvisionedHostToGinFileMiddleware() return phuo } +// Where appends a list predicates to the ProvisionedHostUpdate builder. +func (phuo *ProvisionedHostUpdateOne) Where(ps ...predicate.ProvisionedHost) *ProvisionedHostUpdateOne { + phuo.mutation.Where(ps...) + return phuo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (phuo *ProvisionedHostUpdateOne) Select(field string, fields ...string) *ProvisionedHostUpdateOne { @@ -1175,40 +1075,7 @@ func (phuo *ProvisionedHostUpdateOne) Select(field string, fields ...string) *Pr // Save executes the query and returns the updated ProvisionedHost entity. func (phuo *ProvisionedHostUpdateOne) Save(ctx context.Context) (*ProvisionedHost, error) { - var ( - err error - node *ProvisionedHost - ) - if len(phuo.hooks) == 0 { - if err = phuo.check(); err != nil { - return nil, err - } - node, err = phuo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*ProvisionedHostMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = phuo.check(); err != nil { - return nil, err - } - phuo.mutation = mutation - node, err = phuo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(phuo.hooks) - 1; i >= 0; i-- { - if phuo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = phuo.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, phuo.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, phuo.sqlSave, phuo.mutation, phuo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -1256,16 +1123,10 @@ func (phuo *ProvisionedHostUpdateOne) check() error { } func (phuo *ProvisionedHostUpdateOne) sqlSave(ctx context.Context) (_node *ProvisionedHost, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: provisionedhost.Table, - Columns: provisionedhost.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionedhost.FieldID, - }, - }, + if err := phuo.check(); err != nil { + return _node, err } + _spec := sqlgraph.NewUpdateSpec(provisionedhost.Table, provisionedhost.Columns, sqlgraph.NewFieldSpec(provisionedhost.FieldID, field.TypeUUID)) id, ok := phuo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "ProvisionedHost.id" for update`)} @@ -1291,31 +1152,16 @@ func (phuo *ProvisionedHostUpdateOne) sqlSave(ctx context.Context) (_node *Provi } } if value, ok := phuo.mutation.SubnetIP(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: provisionedhost.FieldSubnetIP, - }) + _spec.SetField(provisionedhost.FieldSubnetIP, field.TypeString, value) } if value, ok := phuo.mutation.AddonType(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: provisionedhost.FieldAddonType, - }) + _spec.SetField(provisionedhost.FieldAddonType, field.TypeEnum, value) } if phuo.mutation.AddonTypeCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Column: provisionedhost.FieldAddonType, - }) + _spec.ClearField(provisionedhost.FieldAddonType, field.TypeEnum) } if value, ok := phuo.mutation.Vars(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: provisionedhost.FieldVars, - }) + _spec.SetField(provisionedhost.FieldVars, field.TypeJSON, value) } if phuo.mutation.ProvisionedHostToStatusCleared() { edge := &sqlgraph.EdgeSpec{ @@ -1325,10 +1171,7 @@ func (phuo *ProvisionedHostUpdateOne) sqlSave(ctx context.Context) (_node *Provi Columns: []string{provisionedhost.ProvisionedHostToStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1341,10 +1184,7 @@ func (phuo *ProvisionedHostUpdateOne) sqlSave(ctx context.Context) (_node *Provi Columns: []string{provisionedhost.ProvisionedHostToStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1360,10 +1200,7 @@ func (phuo *ProvisionedHostUpdateOne) sqlSave(ctx context.Context) (_node *Provi Columns: []string{provisionedhost.ProvisionedHostToProvisionedNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionednetwork.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1376,10 +1213,7 @@ func (phuo *ProvisionedHostUpdateOne) sqlSave(ctx context.Context) (_node *Provi Columns: []string{provisionedhost.ProvisionedHostToProvisionedNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionednetwork.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1395,10 +1229,7 @@ func (phuo *ProvisionedHostUpdateOne) sqlSave(ctx context.Context) (_node *Provi Columns: []string{provisionedhost.ProvisionedHostToHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: host.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(host.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1411,10 +1242,7 @@ func (phuo *ProvisionedHostUpdateOne) sqlSave(ctx context.Context) (_node *Provi Columns: []string{provisionedhost.ProvisionedHostToHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: host.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(host.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1430,10 +1258,7 @@ func (phuo *ProvisionedHostUpdateOne) sqlSave(ctx context.Context) (_node *Provi Columns: []string{provisionedhost.ProvisionedHostToEndStepPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1446,10 +1271,7 @@ func (phuo *ProvisionedHostUpdateOne) sqlSave(ctx context.Context) (_node *Provi Columns: []string{provisionedhost.ProvisionedHostToEndStepPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1465,10 +1287,7 @@ func (phuo *ProvisionedHostUpdateOne) sqlSave(ctx context.Context) (_node *Provi Columns: []string{provisionedhost.ProvisionedHostToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1481,10 +1300,7 @@ func (phuo *ProvisionedHostUpdateOne) sqlSave(ctx context.Context) (_node *Provi Columns: []string{provisionedhost.ProvisionedHostToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1500,10 +1316,7 @@ func (phuo *ProvisionedHostUpdateOne) sqlSave(ctx context.Context) (_node *Provi Columns: []string{provisionedhost.ProvisionedHostToProvisioningStepColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisioningstep.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisioningstep.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1516,10 +1329,7 @@ func (phuo *ProvisionedHostUpdateOne) sqlSave(ctx context.Context) (_node *Provi Columns: []string{provisionedhost.ProvisionedHostToProvisioningStepColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisioningstep.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisioningstep.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1535,10 +1345,7 @@ func (phuo *ProvisionedHostUpdateOne) sqlSave(ctx context.Context) (_node *Provi Columns: []string{provisionedhost.ProvisionedHostToProvisioningStepColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisioningstep.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisioningstep.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1554,10 +1361,7 @@ func (phuo *ProvisionedHostUpdateOne) sqlSave(ctx context.Context) (_node *Provi Columns: []string{provisionedhost.ProvisionedHostToAgentStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: agentstatus.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(agentstatus.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1570,10 +1374,7 @@ func (phuo *ProvisionedHostUpdateOne) sqlSave(ctx context.Context) (_node *Provi Columns: []string{provisionedhost.ProvisionedHostToAgentStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: agentstatus.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(agentstatus.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1589,10 +1390,7 @@ func (phuo *ProvisionedHostUpdateOne) sqlSave(ctx context.Context) (_node *Provi Columns: []string{provisionedhost.ProvisionedHostToAgentStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: agentstatus.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(agentstatus.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1608,10 +1406,7 @@ func (phuo *ProvisionedHostUpdateOne) sqlSave(ctx context.Context) (_node *Provi Columns: []string{provisionedhost.ProvisionedHostToAgentTaskColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: agenttask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(agenttask.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1624,10 +1419,7 @@ func (phuo *ProvisionedHostUpdateOne) sqlSave(ctx context.Context) (_node *Provi Columns: []string{provisionedhost.ProvisionedHostToAgentTaskColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: agenttask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(agenttask.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1643,10 +1435,7 @@ func (phuo *ProvisionedHostUpdateOne) sqlSave(ctx context.Context) (_node *Provi Columns: []string{provisionedhost.ProvisionedHostToAgentTaskColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: agenttask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(agenttask.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1662,10 +1451,7 @@ func (phuo *ProvisionedHostUpdateOne) sqlSave(ctx context.Context) (_node *Provi Columns: []string{provisionedhost.ProvisionedHostToPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1678,10 +1464,7 @@ func (phuo *ProvisionedHostUpdateOne) sqlSave(ctx context.Context) (_node *Provi Columns: []string{provisionedhost.ProvisionedHostToPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1697,10 +1480,7 @@ func (phuo *ProvisionedHostUpdateOne) sqlSave(ctx context.Context) (_node *Provi Columns: []string{provisionedhost.ProvisionedHostToGinFileMiddlewareColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: ginfilemiddleware.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(ginfilemiddleware.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1713,10 +1493,7 @@ func (phuo *ProvisionedHostUpdateOne) sqlSave(ctx context.Context) (_node *Provi Columns: []string{provisionedhost.ProvisionedHostToGinFileMiddlewareColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: ginfilemiddleware.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(ginfilemiddleware.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1731,9 +1508,10 @@ func (phuo *ProvisionedHostUpdateOne) sqlSave(ctx context.Context) (_node *Provi if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{provisionedhost.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } + phuo.mutation.done = true return _node, nil } diff --git a/ent/provisionednetwork.go b/ent/provisionednetwork.go index c845a6ec..25394eb0 100755 --- a/ent/provisionednetwork.go +++ b/ent/provisionednetwork.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -7,6 +7,7 @@ import ( "fmt" "strings" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/gen0cide/laforge/ent/build" "github.com/gen0cide/laforge/ent/network" @@ -32,6 +33,7 @@ type ProvisionedNetwork struct { // The values are being populated by the ProvisionedNetworkQuery when eager-loading is set. Edges ProvisionedNetworkEdges `json:"edges"` + // vvvvvvvvvvvv CUSTOM vvvvvvvvvvvv // Edges put into the main struct to be loaded via hcl // ProvisionedNetworkToStatus holds the value of the ProvisionedNetworkToStatus edge. HCLProvisionedNetworkToStatus *Status `json:"ProvisionedNetworkToStatus,omitempty"` @@ -45,11 +47,12 @@ type ProvisionedNetwork struct { HCLProvisionedNetworkToProvisionedHost []*ProvisionedHost `json:"ProvisionedNetworkToProvisionedHost,omitempty"` // ProvisionedNetworkToPlan holds the value of the ProvisionedNetworkToPlan edge. HCLProvisionedNetworkToPlan *Plan `json:"ProvisionedNetworkToPlan,omitempty"` - // + // ^^^^^^^^^^^^ CUSTOM ^^^^^^^^^^^^^ plan_plan_to_provisioned_network *uuid.UUID provisioned_network_provisioned_network_to_network *uuid.UUID provisioned_network_provisioned_network_to_build *uuid.UUID provisioned_network_provisioned_network_to_team *uuid.UUID + selectValues sql.SelectValues } // ProvisionedNetworkEdges holds the relations/edges for other nodes in the graph. @@ -69,6 +72,10 @@ type ProvisionedNetworkEdges struct { // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. loadedTypes [6]bool + // totalCount holds the count of the edges above. + totalCount [6]map[string]int + + namedProvisionedNetworkToProvisionedHost map[string][]*ProvisionedHost } // ProvisionedNetworkToStatusOrErr returns the ProvisionedNetworkToStatus value or an error if the edge @@ -76,8 +83,7 @@ type ProvisionedNetworkEdges struct { func (e ProvisionedNetworkEdges) ProvisionedNetworkToStatusOrErr() (*Status, error) { if e.loadedTypes[0] { if e.ProvisionedNetworkToStatus == nil { - // The edge ProvisionedNetworkToStatus was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: status.Label} } return e.ProvisionedNetworkToStatus, nil @@ -90,8 +96,7 @@ func (e ProvisionedNetworkEdges) ProvisionedNetworkToStatusOrErr() (*Status, err func (e ProvisionedNetworkEdges) ProvisionedNetworkToNetworkOrErr() (*Network, error) { if e.loadedTypes[1] { if e.ProvisionedNetworkToNetwork == nil { - // The edge ProvisionedNetworkToNetwork was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: network.Label} } return e.ProvisionedNetworkToNetwork, nil @@ -104,8 +109,7 @@ func (e ProvisionedNetworkEdges) ProvisionedNetworkToNetworkOrErr() (*Network, e func (e ProvisionedNetworkEdges) ProvisionedNetworkToBuildOrErr() (*Build, error) { if e.loadedTypes[2] { if e.ProvisionedNetworkToBuild == nil { - // The edge ProvisionedNetworkToBuild was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: build.Label} } return e.ProvisionedNetworkToBuild, nil @@ -118,8 +122,7 @@ func (e ProvisionedNetworkEdges) ProvisionedNetworkToBuildOrErr() (*Build, error func (e ProvisionedNetworkEdges) ProvisionedNetworkToTeamOrErr() (*Team, error) { if e.loadedTypes[3] { if e.ProvisionedNetworkToTeam == nil { - // The edge ProvisionedNetworkToTeam was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: team.Label} } return e.ProvisionedNetworkToTeam, nil @@ -141,8 +144,7 @@ func (e ProvisionedNetworkEdges) ProvisionedNetworkToProvisionedHostOrErr() ([]* func (e ProvisionedNetworkEdges) ProvisionedNetworkToPlanOrErr() (*Plan, error) { if e.loadedTypes[5] { if e.ProvisionedNetworkToPlan == nil { - // The edge ProvisionedNetworkToPlan was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: plan.Label} } return e.ProvisionedNetworkToPlan, nil @@ -151,8 +153,8 @@ func (e ProvisionedNetworkEdges) ProvisionedNetworkToPlanOrErr() (*Plan, error) } // scanValues returns the types for scanning values from sql.Rows. -func (*ProvisionedNetwork) scanValues(columns []string) ([]interface{}, error) { - values := make([]interface{}, len(columns)) +func (*ProvisionedNetwork) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) for i := range columns { switch columns[i] { case provisionednetwork.FieldVars: @@ -170,7 +172,7 @@ func (*ProvisionedNetwork) scanValues(columns []string) ([]interface{}, error) { case provisionednetwork.ForeignKeys[3]: // provisioned_network_provisioned_network_to_team values[i] = &sql.NullScanner{S: new(uuid.UUID)} default: - return nil, fmt.Errorf("unexpected column %q for type ProvisionedNetwork", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -178,7 +180,7 @@ func (*ProvisionedNetwork) scanValues(columns []string) ([]interface{}, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the ProvisionedNetwork fields. -func (pn *ProvisionedNetwork) assignValues(columns []string, values []interface{}) error { +func (pn *ProvisionedNetwork) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -238,56 +240,64 @@ func (pn *ProvisionedNetwork) assignValues(columns []string, values []interface{ pn.provisioned_network_provisioned_network_to_team = new(uuid.UUID) *pn.provisioned_network_provisioned_network_to_team = *value.S.(*uuid.UUID) } + default: + pn.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the ProvisionedNetwork. +// This includes values selected through modifiers, order, etc. +func (pn *ProvisionedNetwork) Value(name string) (ent.Value, error) { + return pn.selectValues.Get(name) +} + // QueryProvisionedNetworkToStatus queries the "ProvisionedNetworkToStatus" edge of the ProvisionedNetwork entity. func (pn *ProvisionedNetwork) QueryProvisionedNetworkToStatus() *StatusQuery { - return (&ProvisionedNetworkClient{config: pn.config}).QueryProvisionedNetworkToStatus(pn) + return NewProvisionedNetworkClient(pn.config).QueryProvisionedNetworkToStatus(pn) } // QueryProvisionedNetworkToNetwork queries the "ProvisionedNetworkToNetwork" edge of the ProvisionedNetwork entity. func (pn *ProvisionedNetwork) QueryProvisionedNetworkToNetwork() *NetworkQuery { - return (&ProvisionedNetworkClient{config: pn.config}).QueryProvisionedNetworkToNetwork(pn) + return NewProvisionedNetworkClient(pn.config).QueryProvisionedNetworkToNetwork(pn) } // QueryProvisionedNetworkToBuild queries the "ProvisionedNetworkToBuild" edge of the ProvisionedNetwork entity. func (pn *ProvisionedNetwork) QueryProvisionedNetworkToBuild() *BuildQuery { - return (&ProvisionedNetworkClient{config: pn.config}).QueryProvisionedNetworkToBuild(pn) + return NewProvisionedNetworkClient(pn.config).QueryProvisionedNetworkToBuild(pn) } // QueryProvisionedNetworkToTeam queries the "ProvisionedNetworkToTeam" edge of the ProvisionedNetwork entity. func (pn *ProvisionedNetwork) QueryProvisionedNetworkToTeam() *TeamQuery { - return (&ProvisionedNetworkClient{config: pn.config}).QueryProvisionedNetworkToTeam(pn) + return NewProvisionedNetworkClient(pn.config).QueryProvisionedNetworkToTeam(pn) } // QueryProvisionedNetworkToProvisionedHost queries the "ProvisionedNetworkToProvisionedHost" edge of the ProvisionedNetwork entity. func (pn *ProvisionedNetwork) QueryProvisionedNetworkToProvisionedHost() *ProvisionedHostQuery { - return (&ProvisionedNetworkClient{config: pn.config}).QueryProvisionedNetworkToProvisionedHost(pn) + return NewProvisionedNetworkClient(pn.config).QueryProvisionedNetworkToProvisionedHost(pn) } // QueryProvisionedNetworkToPlan queries the "ProvisionedNetworkToPlan" edge of the ProvisionedNetwork entity. func (pn *ProvisionedNetwork) QueryProvisionedNetworkToPlan() *PlanQuery { - return (&ProvisionedNetworkClient{config: pn.config}).QueryProvisionedNetworkToPlan(pn) + return NewProvisionedNetworkClient(pn.config).QueryProvisionedNetworkToPlan(pn) } // Update returns a builder for updating this ProvisionedNetwork. // Note that you need to call ProvisionedNetwork.Unwrap() before calling this method if this ProvisionedNetwork // was returned from a transaction, and the transaction was committed or rolled back. func (pn *ProvisionedNetwork) Update() *ProvisionedNetworkUpdateOne { - return (&ProvisionedNetworkClient{config: pn.config}).UpdateOne(pn) + return NewProvisionedNetworkClient(pn.config).UpdateOne(pn) } // Unwrap unwraps the ProvisionedNetwork entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. func (pn *ProvisionedNetwork) Unwrap() *ProvisionedNetwork { - tx, ok := pn.config.driver.(*txDriver) + _tx, ok := pn.config.driver.(*txDriver) if !ok { panic("ent: ProvisionedNetwork is not a transactional entity") } - pn.config.driver = tx.drv + pn.config.driver = _tx.drv return pn } @@ -295,22 +305,42 @@ func (pn *ProvisionedNetwork) Unwrap() *ProvisionedNetwork { func (pn *ProvisionedNetwork) String() string { var builder strings.Builder builder.WriteString("ProvisionedNetwork(") - builder.WriteString(fmt.Sprintf("id=%v", pn.ID)) - builder.WriteString(", name=") + builder.WriteString(fmt.Sprintf("id=%v, ", pn.ID)) + builder.WriteString("name=") builder.WriteString(pn.Name) - builder.WriteString(", cidr=") + builder.WriteString(", ") + builder.WriteString("cidr=") builder.WriteString(pn.Cidr) - builder.WriteString(", vars=") + builder.WriteString(", ") + builder.WriteString("vars=") builder.WriteString(fmt.Sprintf("%v", pn.Vars)) builder.WriteByte(')') return builder.String() } -// ProvisionedNetworks is a parsable slice of ProvisionedNetwork. -type ProvisionedNetworks []*ProvisionedNetwork +// NamedProvisionedNetworkToProvisionedHost returns the ProvisionedNetworkToProvisionedHost named value or an error if the edge was not +// loaded in eager-loading with this name. +func (pn *ProvisionedNetwork) NamedProvisionedNetworkToProvisionedHost(name string) ([]*ProvisionedHost, error) { + if pn.Edges.namedProvisionedNetworkToProvisionedHost == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := pn.Edges.namedProvisionedNetworkToProvisionedHost[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} -func (pn ProvisionedNetworks) config(cfg config) { - for _i := range pn { - pn[_i].config = cfg +func (pn *ProvisionedNetwork) appendNamedProvisionedNetworkToProvisionedHost(name string, edges ...*ProvisionedHost) { + if pn.Edges.namedProvisionedNetworkToProvisionedHost == nil { + pn.Edges.namedProvisionedNetworkToProvisionedHost = make(map[string][]*ProvisionedHost) + } + if len(edges) == 0 { + pn.Edges.namedProvisionedNetworkToProvisionedHost[name] = []*ProvisionedHost{} + } else { + pn.Edges.namedProvisionedNetworkToProvisionedHost[name] = append(pn.Edges.namedProvisionedNetworkToProvisionedHost[name], edges...) } } + +// ProvisionedNetworks is a parsable slice of ProvisionedNetwork. +type ProvisionedNetworks []*ProvisionedNetwork diff --git a/ent/provisionednetwork/provisionednetwork.go b/ent/provisionednetwork/provisionednetwork.go index 4e158d81..e0862358 100755 --- a/ent/provisionednetwork/provisionednetwork.go +++ b/ent/provisionednetwork/provisionednetwork.go @@ -1,8 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package provisionednetwork import ( + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -111,3 +113,112 @@ var ( // DefaultID holds the default value on creation for the "id" field. DefaultID func() uuid.UUID ) + +// OrderOption defines the ordering options for the ProvisionedNetwork queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByCidr orders the results by the cidr field. +func ByCidr(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCidr, opts...).ToFunc() +} + +// ByProvisionedNetworkToStatusField orders the results by ProvisionedNetworkToStatus field. +func ByProvisionedNetworkToStatusField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newProvisionedNetworkToStatusStep(), sql.OrderByField(field, opts...)) + } +} + +// ByProvisionedNetworkToNetworkField orders the results by ProvisionedNetworkToNetwork field. +func ByProvisionedNetworkToNetworkField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newProvisionedNetworkToNetworkStep(), sql.OrderByField(field, opts...)) + } +} + +// ByProvisionedNetworkToBuildField orders the results by ProvisionedNetworkToBuild field. +func ByProvisionedNetworkToBuildField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newProvisionedNetworkToBuildStep(), sql.OrderByField(field, opts...)) + } +} + +// ByProvisionedNetworkToTeamField orders the results by ProvisionedNetworkToTeam field. +func ByProvisionedNetworkToTeamField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newProvisionedNetworkToTeamStep(), sql.OrderByField(field, opts...)) + } +} + +// ByProvisionedNetworkToProvisionedHostCount orders the results by ProvisionedNetworkToProvisionedHost count. +func ByProvisionedNetworkToProvisionedHostCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newProvisionedNetworkToProvisionedHostStep(), opts...) + } +} + +// ByProvisionedNetworkToProvisionedHost orders the results by ProvisionedNetworkToProvisionedHost terms. +func ByProvisionedNetworkToProvisionedHost(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newProvisionedNetworkToProvisionedHostStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByProvisionedNetworkToPlanField orders the results by ProvisionedNetworkToPlan field. +func ByProvisionedNetworkToPlanField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newProvisionedNetworkToPlanStep(), sql.OrderByField(field, opts...)) + } +} +func newProvisionedNetworkToStatusStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ProvisionedNetworkToStatusInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2O, false, ProvisionedNetworkToStatusTable, ProvisionedNetworkToStatusColumn), + ) +} +func newProvisionedNetworkToNetworkStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ProvisionedNetworkToNetworkInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, ProvisionedNetworkToNetworkTable, ProvisionedNetworkToNetworkColumn), + ) +} +func newProvisionedNetworkToBuildStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ProvisionedNetworkToBuildInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, ProvisionedNetworkToBuildTable, ProvisionedNetworkToBuildColumn), + ) +} +func newProvisionedNetworkToTeamStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ProvisionedNetworkToTeamInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, ProvisionedNetworkToTeamTable, ProvisionedNetworkToTeamColumn), + ) +} +func newProvisionedNetworkToProvisionedHostStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ProvisionedNetworkToProvisionedHostInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, ProvisionedNetworkToProvisionedHostTable, ProvisionedNetworkToProvisionedHostColumn), + ) +} +func newProvisionedNetworkToPlanStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ProvisionedNetworkToPlanInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2O, true, ProvisionedNetworkToPlanTable, ProvisionedNetworkToPlanColumn), + ) +} diff --git a/ent/provisionednetwork/where.go b/ent/provisionednetwork/where.go index ee2fe01b..88aa6858 100755 --- a/ent/provisionednetwork/where.go +++ b/ent/provisionednetwork/where.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package provisionednetwork @@ -11,321 +11,187 @@ import ( // ID filters vertices based on their ID field. func ID(id uuid.UUID) predicate.ProvisionedNetwork { - return predicate.ProvisionedNetwork(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.ProvisionedNetwork(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id uuid.UUID) predicate.ProvisionedNetwork { - return predicate.ProvisionedNetwork(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.ProvisionedNetwork(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id uuid.UUID) predicate.ProvisionedNetwork { - return predicate.ProvisionedNetwork(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.ProvisionedNetwork(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...uuid.UUID) predicate.ProvisionedNetwork { - return predicate.ProvisionedNetwork(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.ProvisionedNetwork(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...uuid.UUID) predicate.ProvisionedNetwork { - return predicate.ProvisionedNetwork(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.ProvisionedNetwork(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id uuid.UUID) predicate.ProvisionedNetwork { - return predicate.ProvisionedNetwork(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.ProvisionedNetwork(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id uuid.UUID) predicate.ProvisionedNetwork { - return predicate.ProvisionedNetwork(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.ProvisionedNetwork(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id uuid.UUID) predicate.ProvisionedNetwork { - return predicate.ProvisionedNetwork(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.ProvisionedNetwork(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id uuid.UUID) predicate.ProvisionedNetwork { - return predicate.ProvisionedNetwork(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.ProvisionedNetwork(sql.FieldLTE(FieldID, id)) } // Name applies equality check predicate on the "name" field. It's identical to NameEQ. func Name(v string) predicate.ProvisionedNetwork { - return predicate.ProvisionedNetwork(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldName), v)) - }) + return predicate.ProvisionedNetwork(sql.FieldEQ(FieldName, v)) } // Cidr applies equality check predicate on the "cidr" field. It's identical to CidrEQ. func Cidr(v string) predicate.ProvisionedNetwork { - return predicate.ProvisionedNetwork(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCidr), v)) - }) + return predicate.ProvisionedNetwork(sql.FieldEQ(FieldCidr, v)) } // NameEQ applies the EQ predicate on the "name" field. func NameEQ(v string) predicate.ProvisionedNetwork { - return predicate.ProvisionedNetwork(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldName), v)) - }) + return predicate.ProvisionedNetwork(sql.FieldEQ(FieldName, v)) } // NameNEQ applies the NEQ predicate on the "name" field. func NameNEQ(v string) predicate.ProvisionedNetwork { - return predicate.ProvisionedNetwork(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldName), v)) - }) + return predicate.ProvisionedNetwork(sql.FieldNEQ(FieldName, v)) } // NameIn applies the In predicate on the "name" field. func NameIn(vs ...string) predicate.ProvisionedNetwork { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.ProvisionedNetwork(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldName), v...)) - }) + return predicate.ProvisionedNetwork(sql.FieldIn(FieldName, vs...)) } // NameNotIn applies the NotIn predicate on the "name" field. func NameNotIn(vs ...string) predicate.ProvisionedNetwork { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.ProvisionedNetwork(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldName), v...)) - }) + return predicate.ProvisionedNetwork(sql.FieldNotIn(FieldName, vs...)) } // NameGT applies the GT predicate on the "name" field. func NameGT(v string) predicate.ProvisionedNetwork { - return predicate.ProvisionedNetwork(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldName), v)) - }) + return predicate.ProvisionedNetwork(sql.FieldGT(FieldName, v)) } // NameGTE applies the GTE predicate on the "name" field. func NameGTE(v string) predicate.ProvisionedNetwork { - return predicate.ProvisionedNetwork(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldName), v)) - }) + return predicate.ProvisionedNetwork(sql.FieldGTE(FieldName, v)) } // NameLT applies the LT predicate on the "name" field. func NameLT(v string) predicate.ProvisionedNetwork { - return predicate.ProvisionedNetwork(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldName), v)) - }) + return predicate.ProvisionedNetwork(sql.FieldLT(FieldName, v)) } // NameLTE applies the LTE predicate on the "name" field. func NameLTE(v string) predicate.ProvisionedNetwork { - return predicate.ProvisionedNetwork(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldName), v)) - }) + return predicate.ProvisionedNetwork(sql.FieldLTE(FieldName, v)) } // NameContains applies the Contains predicate on the "name" field. func NameContains(v string) predicate.ProvisionedNetwork { - return predicate.ProvisionedNetwork(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldName), v)) - }) + return predicate.ProvisionedNetwork(sql.FieldContains(FieldName, v)) } // NameHasPrefix applies the HasPrefix predicate on the "name" field. func NameHasPrefix(v string) predicate.ProvisionedNetwork { - return predicate.ProvisionedNetwork(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldName), v)) - }) + return predicate.ProvisionedNetwork(sql.FieldHasPrefix(FieldName, v)) } // NameHasSuffix applies the HasSuffix predicate on the "name" field. func NameHasSuffix(v string) predicate.ProvisionedNetwork { - return predicate.ProvisionedNetwork(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldName), v)) - }) + return predicate.ProvisionedNetwork(sql.FieldHasSuffix(FieldName, v)) } // NameEqualFold applies the EqualFold predicate on the "name" field. func NameEqualFold(v string) predicate.ProvisionedNetwork { - return predicate.ProvisionedNetwork(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldName), v)) - }) + return predicate.ProvisionedNetwork(sql.FieldEqualFold(FieldName, v)) } // NameContainsFold applies the ContainsFold predicate on the "name" field. func NameContainsFold(v string) predicate.ProvisionedNetwork { - return predicate.ProvisionedNetwork(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldName), v)) - }) + return predicate.ProvisionedNetwork(sql.FieldContainsFold(FieldName, v)) } // CidrEQ applies the EQ predicate on the "cidr" field. func CidrEQ(v string) predicate.ProvisionedNetwork { - return predicate.ProvisionedNetwork(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCidr), v)) - }) + return predicate.ProvisionedNetwork(sql.FieldEQ(FieldCidr, v)) } // CidrNEQ applies the NEQ predicate on the "cidr" field. func CidrNEQ(v string) predicate.ProvisionedNetwork { - return predicate.ProvisionedNetwork(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldCidr), v)) - }) + return predicate.ProvisionedNetwork(sql.FieldNEQ(FieldCidr, v)) } // CidrIn applies the In predicate on the "cidr" field. func CidrIn(vs ...string) predicate.ProvisionedNetwork { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.ProvisionedNetwork(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldCidr), v...)) - }) + return predicate.ProvisionedNetwork(sql.FieldIn(FieldCidr, vs...)) } // CidrNotIn applies the NotIn predicate on the "cidr" field. func CidrNotIn(vs ...string) predicate.ProvisionedNetwork { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.ProvisionedNetwork(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldCidr), v...)) - }) + return predicate.ProvisionedNetwork(sql.FieldNotIn(FieldCidr, vs...)) } // CidrGT applies the GT predicate on the "cidr" field. func CidrGT(v string) predicate.ProvisionedNetwork { - return predicate.ProvisionedNetwork(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldCidr), v)) - }) + return predicate.ProvisionedNetwork(sql.FieldGT(FieldCidr, v)) } // CidrGTE applies the GTE predicate on the "cidr" field. func CidrGTE(v string) predicate.ProvisionedNetwork { - return predicate.ProvisionedNetwork(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldCidr), v)) - }) + return predicate.ProvisionedNetwork(sql.FieldGTE(FieldCidr, v)) } // CidrLT applies the LT predicate on the "cidr" field. func CidrLT(v string) predicate.ProvisionedNetwork { - return predicate.ProvisionedNetwork(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldCidr), v)) - }) + return predicate.ProvisionedNetwork(sql.FieldLT(FieldCidr, v)) } // CidrLTE applies the LTE predicate on the "cidr" field. func CidrLTE(v string) predicate.ProvisionedNetwork { - return predicate.ProvisionedNetwork(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldCidr), v)) - }) + return predicate.ProvisionedNetwork(sql.FieldLTE(FieldCidr, v)) } // CidrContains applies the Contains predicate on the "cidr" field. func CidrContains(v string) predicate.ProvisionedNetwork { - return predicate.ProvisionedNetwork(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldCidr), v)) - }) + return predicate.ProvisionedNetwork(sql.FieldContains(FieldCidr, v)) } // CidrHasPrefix applies the HasPrefix predicate on the "cidr" field. func CidrHasPrefix(v string) predicate.ProvisionedNetwork { - return predicate.ProvisionedNetwork(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldCidr), v)) - }) + return predicate.ProvisionedNetwork(sql.FieldHasPrefix(FieldCidr, v)) } // CidrHasSuffix applies the HasSuffix predicate on the "cidr" field. func CidrHasSuffix(v string) predicate.ProvisionedNetwork { - return predicate.ProvisionedNetwork(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldCidr), v)) - }) + return predicate.ProvisionedNetwork(sql.FieldHasSuffix(FieldCidr, v)) } // CidrEqualFold applies the EqualFold predicate on the "cidr" field. func CidrEqualFold(v string) predicate.ProvisionedNetwork { - return predicate.ProvisionedNetwork(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldCidr), v)) - }) + return predicate.ProvisionedNetwork(sql.FieldEqualFold(FieldCidr, v)) } // CidrContainsFold applies the ContainsFold predicate on the "cidr" field. func CidrContainsFold(v string) predicate.ProvisionedNetwork { - return predicate.ProvisionedNetwork(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldCidr), v)) - }) + return predicate.ProvisionedNetwork(sql.FieldContainsFold(FieldCidr, v)) } // HasProvisionedNetworkToStatus applies the HasEdge predicate on the "ProvisionedNetworkToStatus" edge. @@ -333,7 +199,6 @@ func HasProvisionedNetworkToStatus() predicate.ProvisionedNetwork { return predicate.ProvisionedNetwork(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisionedNetworkToStatusTable, FieldID), sqlgraph.Edge(sqlgraph.O2O, false, ProvisionedNetworkToStatusTable, ProvisionedNetworkToStatusColumn), ) sqlgraph.HasNeighbors(s, step) @@ -343,11 +208,7 @@ func HasProvisionedNetworkToStatus() predicate.ProvisionedNetwork { // HasProvisionedNetworkToStatusWith applies the HasEdge predicate on the "ProvisionedNetworkToStatus" edge with a given conditions (other predicates). func HasProvisionedNetworkToStatusWith(preds ...predicate.Status) predicate.ProvisionedNetwork { return predicate.ProvisionedNetwork(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisionedNetworkToStatusInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2O, false, ProvisionedNetworkToStatusTable, ProvisionedNetworkToStatusColumn), - ) + step := newProvisionedNetworkToStatusStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -361,7 +222,6 @@ func HasProvisionedNetworkToNetwork() predicate.ProvisionedNetwork { return predicate.ProvisionedNetwork(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisionedNetworkToNetworkTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, false, ProvisionedNetworkToNetworkTable, ProvisionedNetworkToNetworkColumn), ) sqlgraph.HasNeighbors(s, step) @@ -371,11 +231,7 @@ func HasProvisionedNetworkToNetwork() predicate.ProvisionedNetwork { // HasProvisionedNetworkToNetworkWith applies the HasEdge predicate on the "ProvisionedNetworkToNetwork" edge with a given conditions (other predicates). func HasProvisionedNetworkToNetworkWith(preds ...predicate.Network) predicate.ProvisionedNetwork { return predicate.ProvisionedNetwork(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisionedNetworkToNetworkInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, ProvisionedNetworkToNetworkTable, ProvisionedNetworkToNetworkColumn), - ) + step := newProvisionedNetworkToNetworkStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -389,7 +245,6 @@ func HasProvisionedNetworkToBuild() predicate.ProvisionedNetwork { return predicate.ProvisionedNetwork(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisionedNetworkToBuildTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, false, ProvisionedNetworkToBuildTable, ProvisionedNetworkToBuildColumn), ) sqlgraph.HasNeighbors(s, step) @@ -399,11 +254,7 @@ func HasProvisionedNetworkToBuild() predicate.ProvisionedNetwork { // HasProvisionedNetworkToBuildWith applies the HasEdge predicate on the "ProvisionedNetworkToBuild" edge with a given conditions (other predicates). func HasProvisionedNetworkToBuildWith(preds ...predicate.Build) predicate.ProvisionedNetwork { return predicate.ProvisionedNetwork(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisionedNetworkToBuildInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, ProvisionedNetworkToBuildTable, ProvisionedNetworkToBuildColumn), - ) + step := newProvisionedNetworkToBuildStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -417,7 +268,6 @@ func HasProvisionedNetworkToTeam() predicate.ProvisionedNetwork { return predicate.ProvisionedNetwork(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisionedNetworkToTeamTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, false, ProvisionedNetworkToTeamTable, ProvisionedNetworkToTeamColumn), ) sqlgraph.HasNeighbors(s, step) @@ -427,11 +277,7 @@ func HasProvisionedNetworkToTeam() predicate.ProvisionedNetwork { // HasProvisionedNetworkToTeamWith applies the HasEdge predicate on the "ProvisionedNetworkToTeam" edge with a given conditions (other predicates). func HasProvisionedNetworkToTeamWith(preds ...predicate.Team) predicate.ProvisionedNetwork { return predicate.ProvisionedNetwork(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisionedNetworkToTeamInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, ProvisionedNetworkToTeamTable, ProvisionedNetworkToTeamColumn), - ) + step := newProvisionedNetworkToTeamStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -445,7 +291,6 @@ func HasProvisionedNetworkToProvisionedHost() predicate.ProvisionedNetwork { return predicate.ProvisionedNetwork(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisionedNetworkToProvisionedHostTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, true, ProvisionedNetworkToProvisionedHostTable, ProvisionedNetworkToProvisionedHostColumn), ) sqlgraph.HasNeighbors(s, step) @@ -455,11 +300,7 @@ func HasProvisionedNetworkToProvisionedHost() predicate.ProvisionedNetwork { // HasProvisionedNetworkToProvisionedHostWith applies the HasEdge predicate on the "ProvisionedNetworkToProvisionedHost" edge with a given conditions (other predicates). func HasProvisionedNetworkToProvisionedHostWith(preds ...predicate.ProvisionedHost) predicate.ProvisionedNetwork { return predicate.ProvisionedNetwork(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisionedNetworkToProvisionedHostInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, true, ProvisionedNetworkToProvisionedHostTable, ProvisionedNetworkToProvisionedHostColumn), - ) + step := newProvisionedNetworkToProvisionedHostStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -473,7 +314,6 @@ func HasProvisionedNetworkToPlan() predicate.ProvisionedNetwork { return predicate.ProvisionedNetwork(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisionedNetworkToPlanTable, FieldID), sqlgraph.Edge(sqlgraph.O2O, true, ProvisionedNetworkToPlanTable, ProvisionedNetworkToPlanColumn), ) sqlgraph.HasNeighbors(s, step) @@ -483,11 +323,7 @@ func HasProvisionedNetworkToPlan() predicate.ProvisionedNetwork { // HasProvisionedNetworkToPlanWith applies the HasEdge predicate on the "ProvisionedNetworkToPlan" edge with a given conditions (other predicates). func HasProvisionedNetworkToPlanWith(preds ...predicate.Plan) predicate.ProvisionedNetwork { return predicate.ProvisionedNetwork(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisionedNetworkToPlanInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2O, true, ProvisionedNetworkToPlanTable, ProvisionedNetworkToPlanColumn), - ) + step := newProvisionedNetworkToPlanStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -498,32 +334,15 @@ func HasProvisionedNetworkToPlanWith(preds ...predicate.Plan) predicate.Provisio // And groups predicates with the AND operator between them. func And(predicates ...predicate.ProvisionedNetwork) predicate.ProvisionedNetwork { - return predicate.ProvisionedNetwork(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.ProvisionedNetwork(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.ProvisionedNetwork) predicate.ProvisionedNetwork { - return predicate.ProvisionedNetwork(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.ProvisionedNetwork(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.ProvisionedNetwork) predicate.ProvisionedNetwork { - return predicate.ProvisionedNetwork(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.ProvisionedNetwork(sql.NotPredicates(p)) } diff --git a/ent/provisionednetwork_create.go b/ent/provisionednetwork_create.go index f8b2ee97..7b23b629 100755 --- a/ent/provisionednetwork_create.go +++ b/ent/provisionednetwork_create.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -175,44 +175,8 @@ func (pnc *ProvisionedNetworkCreate) Mutation() *ProvisionedNetworkMutation { // Save creates the ProvisionedNetwork in the database. func (pnc *ProvisionedNetworkCreate) Save(ctx context.Context) (*ProvisionedNetwork, error) { - var ( - err error - node *ProvisionedNetwork - ) pnc.defaults() - if len(pnc.hooks) == 0 { - if err = pnc.check(); err != nil { - return nil, err - } - node, err = pnc.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*ProvisionedNetworkMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = pnc.check(); err != nil { - return nil, err - } - pnc.mutation = mutation - if node, err = pnc.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(pnc.hooks) - 1; i >= 0; i-- { - if pnc.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = pnc.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, pnc.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, pnc.sqlSave, pnc.mutation, pnc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -260,10 +224,13 @@ func (pnc *ProvisionedNetworkCreate) check() error { } func (pnc *ProvisionedNetworkCreate) sqlSave(ctx context.Context) (*ProvisionedNetwork, error) { + if err := pnc.check(); err != nil { + return nil, err + } _node, _spec := pnc.createSpec() if err := sqlgraph.CreateNode(ctx, pnc.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } @@ -274,46 +241,30 @@ func (pnc *ProvisionedNetworkCreate) sqlSave(ctx context.Context) (*ProvisionedN return nil, err } } + pnc.mutation.id = &_node.ID + pnc.mutation.done = true return _node, nil } func (pnc *ProvisionedNetworkCreate) createSpec() (*ProvisionedNetwork, *sqlgraph.CreateSpec) { var ( _node = &ProvisionedNetwork{config: pnc.config} - _spec = &sqlgraph.CreateSpec{ - Table: provisionednetwork.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionednetwork.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(provisionednetwork.Table, sqlgraph.NewFieldSpec(provisionednetwork.FieldID, field.TypeUUID)) ) if id, ok := pnc.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } if value, ok := pnc.mutation.Name(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: provisionednetwork.FieldName, - }) + _spec.SetField(provisionednetwork.FieldName, field.TypeString, value) _node.Name = value } if value, ok := pnc.mutation.Cidr(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: provisionednetwork.FieldCidr, - }) + _spec.SetField(provisionednetwork.FieldCidr, field.TypeString, value) _node.Cidr = value } if value, ok := pnc.mutation.Vars(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: provisionednetwork.FieldVars, - }) + _spec.SetField(provisionednetwork.FieldVars, field.TypeJSON, value) _node.Vars = value } if nodes := pnc.mutation.ProvisionedNetworkToStatusIDs(); len(nodes) > 0 { @@ -324,10 +275,7 @@ func (pnc *ProvisionedNetworkCreate) createSpec() (*ProvisionedNetwork, *sqlgrap Columns: []string{provisionednetwork.ProvisionedNetworkToStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -343,10 +291,7 @@ func (pnc *ProvisionedNetworkCreate) createSpec() (*ProvisionedNetwork, *sqlgrap Columns: []string{provisionednetwork.ProvisionedNetworkToNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: network.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(network.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -363,10 +308,7 @@ func (pnc *ProvisionedNetworkCreate) createSpec() (*ProvisionedNetwork, *sqlgrap Columns: []string{provisionednetwork.ProvisionedNetworkToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -383,10 +325,7 @@ func (pnc *ProvisionedNetworkCreate) createSpec() (*ProvisionedNetwork, *sqlgrap Columns: []string{provisionednetwork.ProvisionedNetworkToTeamColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: team.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(team.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -403,10 +342,7 @@ func (pnc *ProvisionedNetworkCreate) createSpec() (*ProvisionedNetwork, *sqlgrap Columns: []string{provisionednetwork.ProvisionedNetworkToProvisionedHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionedhost.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionedhost.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -422,10 +358,7 @@ func (pnc *ProvisionedNetworkCreate) createSpec() (*ProvisionedNetwork, *sqlgrap Columns: []string{provisionednetwork.ProvisionedNetworkToPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -440,11 +373,15 @@ func (pnc *ProvisionedNetworkCreate) createSpec() (*ProvisionedNetwork, *sqlgrap // ProvisionedNetworkCreateBulk is the builder for creating many ProvisionedNetwork entities in bulk. type ProvisionedNetworkCreateBulk struct { config + err error builders []*ProvisionedNetworkCreate } // Save creates the ProvisionedNetwork entities in the database. func (pncb *ProvisionedNetworkCreateBulk) Save(ctx context.Context) ([]*ProvisionedNetwork, error) { + if pncb.err != nil { + return nil, pncb.err + } specs := make([]*sqlgraph.CreateSpec, len(pncb.builders)) nodes := make([]*ProvisionedNetwork, len(pncb.builders)) mutators := make([]Mutator, len(pncb.builders)) @@ -461,8 +398,8 @@ func (pncb *ProvisionedNetworkCreateBulk) Save(ctx context.Context) ([]*Provisio return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, pncb.builders[i+1].mutation) } else { @@ -470,7 +407,7 @@ func (pncb *ProvisionedNetworkCreateBulk) Save(ctx context.Context) ([]*Provisio // Invoke the actual operation on the latest mutation in the chain. if err = sqlgraph.BatchCreate(ctx, pncb.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } } } diff --git a/ent/provisionednetwork_delete.go b/ent/provisionednetwork_delete.go index 1cf68fc9..cbdd65a6 100755 --- a/ent/provisionednetwork_delete.go +++ b/ent/provisionednetwork_delete.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (pnd *ProvisionedNetworkDelete) Where(ps ...predicate.ProvisionedNetwork) * // Exec executes the deletion query and returns how many vertices were deleted. func (pnd *ProvisionedNetworkDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(pnd.hooks) == 0 { - affected, err = pnd.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*ProvisionedNetworkMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - pnd.mutation = mutation - affected, err = pnd.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(pnd.hooks) - 1; i >= 0; i-- { - if pnd.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = pnd.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, pnd.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, pnd.sqlExec, pnd.mutation, pnd.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (pnd *ProvisionedNetworkDelete) ExecX(ctx context.Context) int { } func (pnd *ProvisionedNetworkDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: provisionednetwork.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionednetwork.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(provisionednetwork.Table, sqlgraph.NewFieldSpec(provisionednetwork.FieldID, field.TypeUUID)) if ps := pnd.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -84,7 +48,12 @@ func (pnd *ProvisionedNetworkDelete) sqlExec(ctx context.Context) (int, error) { } } } - return sqlgraph.DeleteNodes(ctx, pnd.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, pnd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + pnd.mutation.done = true + return affected, err } // ProvisionedNetworkDeleteOne is the builder for deleting a single ProvisionedNetwork entity. @@ -92,6 +61,12 @@ type ProvisionedNetworkDeleteOne struct { pnd *ProvisionedNetworkDelete } +// Where appends a list predicates to the ProvisionedNetworkDelete builder. +func (pndo *ProvisionedNetworkDeleteOne) Where(ps ...predicate.ProvisionedNetwork) *ProvisionedNetworkDeleteOne { + pndo.pnd.mutation.Where(ps...) + return pndo +} + // Exec executes the deletion query. func (pndo *ProvisionedNetworkDeleteOne) Exec(ctx context.Context) error { n, err := pndo.pnd.Exec(ctx) @@ -107,5 +82,7 @@ func (pndo *ProvisionedNetworkDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (pndo *ProvisionedNetworkDeleteOne) ExecX(ctx context.Context) { - pndo.pnd.ExecX(ctx) + if err := pndo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/ent/provisionednetwork_query.go b/ent/provisionednetwork_query.go index c04a9b4e..e50496f5 100755 --- a/ent/provisionednetwork_query.go +++ b/ent/provisionednetwork_query.go @@ -1,11 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" "database/sql/driver" - "errors" "fmt" "math" @@ -26,20 +25,20 @@ import ( // ProvisionedNetworkQuery is the builder for querying ProvisionedNetwork entities. type ProvisionedNetworkQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string - predicates []predicate.ProvisionedNetwork - // eager-loading edges. - withProvisionedNetworkToStatus *StatusQuery - withProvisionedNetworkToNetwork *NetworkQuery - withProvisionedNetworkToBuild *BuildQuery - withProvisionedNetworkToTeam *TeamQuery - withProvisionedNetworkToProvisionedHost *ProvisionedHostQuery - withProvisionedNetworkToPlan *PlanQuery - withFKs bool + ctx *QueryContext + order []provisionednetwork.OrderOption + inters []Interceptor + predicates []predicate.ProvisionedNetwork + withProvisionedNetworkToStatus *StatusQuery + withProvisionedNetworkToNetwork *NetworkQuery + withProvisionedNetworkToBuild *BuildQuery + withProvisionedNetworkToTeam *TeamQuery + withProvisionedNetworkToProvisionedHost *ProvisionedHostQuery + withProvisionedNetworkToPlan *PlanQuery + withFKs bool + modifiers []func(*sql.Selector) + loadTotal []func(context.Context, []*ProvisionedNetwork) error + withNamedProvisionedNetworkToProvisionedHost map[string]*ProvisionedHostQuery // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -51,34 +50,34 @@ func (pnq *ProvisionedNetworkQuery) Where(ps ...predicate.ProvisionedNetwork) *P return pnq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (pnq *ProvisionedNetworkQuery) Limit(limit int) *ProvisionedNetworkQuery { - pnq.limit = &limit + pnq.ctx.Limit = &limit return pnq } -// Offset adds an offset step to the query. +// Offset to start from. func (pnq *ProvisionedNetworkQuery) Offset(offset int) *ProvisionedNetworkQuery { - pnq.offset = &offset + pnq.ctx.Offset = &offset return pnq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (pnq *ProvisionedNetworkQuery) Unique(unique bool) *ProvisionedNetworkQuery { - pnq.unique = &unique + pnq.ctx.Unique = &unique return pnq } -// Order adds an order step to the query. -func (pnq *ProvisionedNetworkQuery) Order(o ...OrderFunc) *ProvisionedNetworkQuery { +// Order specifies how the records should be ordered. +func (pnq *ProvisionedNetworkQuery) Order(o ...provisionednetwork.OrderOption) *ProvisionedNetworkQuery { pnq.order = append(pnq.order, o...) return pnq } // QueryProvisionedNetworkToStatus chains the current query on the "ProvisionedNetworkToStatus" edge. func (pnq *ProvisionedNetworkQuery) QueryProvisionedNetworkToStatus() *StatusQuery { - query := &StatusQuery{config: pnq.config} + query := (&StatusClient{config: pnq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := pnq.prepareQuery(ctx); err != nil { return nil, err @@ -100,7 +99,7 @@ func (pnq *ProvisionedNetworkQuery) QueryProvisionedNetworkToStatus() *StatusQue // QueryProvisionedNetworkToNetwork chains the current query on the "ProvisionedNetworkToNetwork" edge. func (pnq *ProvisionedNetworkQuery) QueryProvisionedNetworkToNetwork() *NetworkQuery { - query := &NetworkQuery{config: pnq.config} + query := (&NetworkClient{config: pnq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := pnq.prepareQuery(ctx); err != nil { return nil, err @@ -122,7 +121,7 @@ func (pnq *ProvisionedNetworkQuery) QueryProvisionedNetworkToNetwork() *NetworkQ // QueryProvisionedNetworkToBuild chains the current query on the "ProvisionedNetworkToBuild" edge. func (pnq *ProvisionedNetworkQuery) QueryProvisionedNetworkToBuild() *BuildQuery { - query := &BuildQuery{config: pnq.config} + query := (&BuildClient{config: pnq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := pnq.prepareQuery(ctx); err != nil { return nil, err @@ -144,7 +143,7 @@ func (pnq *ProvisionedNetworkQuery) QueryProvisionedNetworkToBuild() *BuildQuery // QueryProvisionedNetworkToTeam chains the current query on the "ProvisionedNetworkToTeam" edge. func (pnq *ProvisionedNetworkQuery) QueryProvisionedNetworkToTeam() *TeamQuery { - query := &TeamQuery{config: pnq.config} + query := (&TeamClient{config: pnq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := pnq.prepareQuery(ctx); err != nil { return nil, err @@ -166,7 +165,7 @@ func (pnq *ProvisionedNetworkQuery) QueryProvisionedNetworkToTeam() *TeamQuery { // QueryProvisionedNetworkToProvisionedHost chains the current query on the "ProvisionedNetworkToProvisionedHost" edge. func (pnq *ProvisionedNetworkQuery) QueryProvisionedNetworkToProvisionedHost() *ProvisionedHostQuery { - query := &ProvisionedHostQuery{config: pnq.config} + query := (&ProvisionedHostClient{config: pnq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := pnq.prepareQuery(ctx); err != nil { return nil, err @@ -188,7 +187,7 @@ func (pnq *ProvisionedNetworkQuery) QueryProvisionedNetworkToProvisionedHost() * // QueryProvisionedNetworkToPlan chains the current query on the "ProvisionedNetworkToPlan" edge. func (pnq *ProvisionedNetworkQuery) QueryProvisionedNetworkToPlan() *PlanQuery { - query := &PlanQuery{config: pnq.config} + query := (&PlanClient{config: pnq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := pnq.prepareQuery(ctx); err != nil { return nil, err @@ -211,7 +210,7 @@ func (pnq *ProvisionedNetworkQuery) QueryProvisionedNetworkToPlan() *PlanQuery { // First returns the first ProvisionedNetwork entity from the query. // Returns a *NotFoundError when no ProvisionedNetwork was found. func (pnq *ProvisionedNetworkQuery) First(ctx context.Context) (*ProvisionedNetwork, error) { - nodes, err := pnq.Limit(1).All(ctx) + nodes, err := pnq.Limit(1).All(setContextOp(ctx, pnq.ctx, "First")) if err != nil { return nil, err } @@ -234,7 +233,7 @@ func (pnq *ProvisionedNetworkQuery) FirstX(ctx context.Context) *ProvisionedNetw // Returns a *NotFoundError when no ProvisionedNetwork ID was found. func (pnq *ProvisionedNetworkQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = pnq.Limit(1).IDs(ctx); err != nil { + if ids, err = pnq.Limit(1).IDs(setContextOp(ctx, pnq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -257,7 +256,7 @@ func (pnq *ProvisionedNetworkQuery) FirstIDX(ctx context.Context) uuid.UUID { // Returns a *NotSingularError when more than one ProvisionedNetwork entity is found. // Returns a *NotFoundError when no ProvisionedNetwork entities are found. func (pnq *ProvisionedNetworkQuery) Only(ctx context.Context) (*ProvisionedNetwork, error) { - nodes, err := pnq.Limit(2).All(ctx) + nodes, err := pnq.Limit(2).All(setContextOp(ctx, pnq.ctx, "Only")) if err != nil { return nil, err } @@ -285,7 +284,7 @@ func (pnq *ProvisionedNetworkQuery) OnlyX(ctx context.Context) *ProvisionedNetwo // Returns a *NotFoundError when no entities are found. func (pnq *ProvisionedNetworkQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = pnq.Limit(2).IDs(ctx); err != nil { + if ids, err = pnq.Limit(2).IDs(setContextOp(ctx, pnq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -310,10 +309,12 @@ func (pnq *ProvisionedNetworkQuery) OnlyIDX(ctx context.Context) uuid.UUID { // All executes the query and returns a list of ProvisionedNetworks. func (pnq *ProvisionedNetworkQuery) All(ctx context.Context) ([]*ProvisionedNetwork, error) { + ctx = setContextOp(ctx, pnq.ctx, "All") if err := pnq.prepareQuery(ctx); err != nil { return nil, err } - return pnq.sqlAll(ctx) + qr := querierAll[[]*ProvisionedNetwork, *ProvisionedNetworkQuery]() + return withInterceptors[[]*ProvisionedNetwork](ctx, pnq, qr, pnq.inters) } // AllX is like All, but panics if an error occurs. @@ -326,9 +327,12 @@ func (pnq *ProvisionedNetworkQuery) AllX(ctx context.Context) []*ProvisionedNetw } // IDs executes the query and returns a list of ProvisionedNetwork IDs. -func (pnq *ProvisionedNetworkQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { - var ids []uuid.UUID - if err := pnq.Select(provisionednetwork.FieldID).Scan(ctx, &ids); err != nil { +func (pnq *ProvisionedNetworkQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if pnq.ctx.Unique == nil && pnq.path != nil { + pnq.Unique(true) + } + ctx = setContextOp(ctx, pnq.ctx, "IDs") + if err = pnq.Select(provisionednetwork.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -345,10 +349,11 @@ func (pnq *ProvisionedNetworkQuery) IDsX(ctx context.Context) []uuid.UUID { // Count returns the count of the given query. func (pnq *ProvisionedNetworkQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, pnq.ctx, "Count") if err := pnq.prepareQuery(ctx); err != nil { return 0, err } - return pnq.sqlCount(ctx) + return withInterceptors[int](ctx, pnq, querierCount[*ProvisionedNetworkQuery](), pnq.inters) } // CountX is like Count, but panics if an error occurs. @@ -362,10 +367,15 @@ func (pnq *ProvisionedNetworkQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (pnq *ProvisionedNetworkQuery) Exist(ctx context.Context) (bool, error) { - if err := pnq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, pnq.ctx, "Exist") + switch _, err := pnq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return pnq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -385,9 +395,9 @@ func (pnq *ProvisionedNetworkQuery) Clone() *ProvisionedNetworkQuery { } return &ProvisionedNetworkQuery{ config: pnq.config, - limit: pnq.limit, - offset: pnq.offset, - order: append([]OrderFunc{}, pnq.order...), + ctx: pnq.ctx.Clone(), + order: append([]provisionednetwork.OrderOption{}, pnq.order...), + inters: append([]Interceptor{}, pnq.inters...), predicates: append([]predicate.ProvisionedNetwork{}, pnq.predicates...), withProvisionedNetworkToStatus: pnq.withProvisionedNetworkToStatus.Clone(), withProvisionedNetworkToNetwork: pnq.withProvisionedNetworkToNetwork.Clone(), @@ -396,16 +406,15 @@ func (pnq *ProvisionedNetworkQuery) Clone() *ProvisionedNetworkQuery { withProvisionedNetworkToProvisionedHost: pnq.withProvisionedNetworkToProvisionedHost.Clone(), withProvisionedNetworkToPlan: pnq.withProvisionedNetworkToPlan.Clone(), // clone intermediate query. - sql: pnq.sql.Clone(), - path: pnq.path, - unique: pnq.unique, + sql: pnq.sql.Clone(), + path: pnq.path, } } // WithProvisionedNetworkToStatus tells the query-builder to eager-load the nodes that are connected to // the "ProvisionedNetworkToStatus" edge. The optional arguments are used to configure the query builder of the edge. func (pnq *ProvisionedNetworkQuery) WithProvisionedNetworkToStatus(opts ...func(*StatusQuery)) *ProvisionedNetworkQuery { - query := &StatusQuery{config: pnq.config} + query := (&StatusClient{config: pnq.config}).Query() for _, opt := range opts { opt(query) } @@ -416,7 +425,7 @@ func (pnq *ProvisionedNetworkQuery) WithProvisionedNetworkToStatus(opts ...func( // WithProvisionedNetworkToNetwork tells the query-builder to eager-load the nodes that are connected to // the "ProvisionedNetworkToNetwork" edge. The optional arguments are used to configure the query builder of the edge. func (pnq *ProvisionedNetworkQuery) WithProvisionedNetworkToNetwork(opts ...func(*NetworkQuery)) *ProvisionedNetworkQuery { - query := &NetworkQuery{config: pnq.config} + query := (&NetworkClient{config: pnq.config}).Query() for _, opt := range opts { opt(query) } @@ -427,7 +436,7 @@ func (pnq *ProvisionedNetworkQuery) WithProvisionedNetworkToNetwork(opts ...func // WithProvisionedNetworkToBuild tells the query-builder to eager-load the nodes that are connected to // the "ProvisionedNetworkToBuild" edge. The optional arguments are used to configure the query builder of the edge. func (pnq *ProvisionedNetworkQuery) WithProvisionedNetworkToBuild(opts ...func(*BuildQuery)) *ProvisionedNetworkQuery { - query := &BuildQuery{config: pnq.config} + query := (&BuildClient{config: pnq.config}).Query() for _, opt := range opts { opt(query) } @@ -438,7 +447,7 @@ func (pnq *ProvisionedNetworkQuery) WithProvisionedNetworkToBuild(opts ...func(* // WithProvisionedNetworkToTeam tells the query-builder to eager-load the nodes that are connected to // the "ProvisionedNetworkToTeam" edge. The optional arguments are used to configure the query builder of the edge. func (pnq *ProvisionedNetworkQuery) WithProvisionedNetworkToTeam(opts ...func(*TeamQuery)) *ProvisionedNetworkQuery { - query := &TeamQuery{config: pnq.config} + query := (&TeamClient{config: pnq.config}).Query() for _, opt := range opts { opt(query) } @@ -449,7 +458,7 @@ func (pnq *ProvisionedNetworkQuery) WithProvisionedNetworkToTeam(opts ...func(*T // WithProvisionedNetworkToProvisionedHost tells the query-builder to eager-load the nodes that are connected to // the "ProvisionedNetworkToProvisionedHost" edge. The optional arguments are used to configure the query builder of the edge. func (pnq *ProvisionedNetworkQuery) WithProvisionedNetworkToProvisionedHost(opts ...func(*ProvisionedHostQuery)) *ProvisionedNetworkQuery { - query := &ProvisionedHostQuery{config: pnq.config} + query := (&ProvisionedHostClient{config: pnq.config}).Query() for _, opt := range opts { opt(query) } @@ -460,7 +469,7 @@ func (pnq *ProvisionedNetworkQuery) WithProvisionedNetworkToProvisionedHost(opts // WithProvisionedNetworkToPlan tells the query-builder to eager-load the nodes that are connected to // the "ProvisionedNetworkToPlan" edge. The optional arguments are used to configure the query builder of the edge. func (pnq *ProvisionedNetworkQuery) WithProvisionedNetworkToPlan(opts ...func(*PlanQuery)) *ProvisionedNetworkQuery { - query := &PlanQuery{config: pnq.config} + query := (&PlanClient{config: pnq.config}).Query() for _, opt := range opts { opt(query) } @@ -482,17 +491,13 @@ func (pnq *ProvisionedNetworkQuery) WithProvisionedNetworkToPlan(opts ...func(*P // GroupBy(provisionednetwork.FieldName). // Aggregate(ent.Count()). // Scan(ctx, &v) -// func (pnq *ProvisionedNetworkQuery) GroupBy(field string, fields ...string) *ProvisionedNetworkGroupBy { - group := &ProvisionedNetworkGroupBy{config: pnq.config} - group.fields = append([]string{field}, fields...) - group.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := pnq.prepareQuery(ctx); err != nil { - return nil, err - } - return pnq.sqlQuery(ctx), nil - } - return group + pnq.ctx.Fields = append([]string{field}, fields...) + grbuild := &ProvisionedNetworkGroupBy{build: pnq} + grbuild.flds = &pnq.ctx.Fields + grbuild.label = provisionednetwork.Label + grbuild.scan = grbuild.Scan + return grbuild } // Select allows the selection one or more fields/columns for the given query, @@ -507,14 +512,31 @@ func (pnq *ProvisionedNetworkQuery) GroupBy(field string, fields ...string) *Pro // client.ProvisionedNetwork.Query(). // Select(provisionednetwork.FieldName). // Scan(ctx, &v) -// func (pnq *ProvisionedNetworkQuery) Select(fields ...string) *ProvisionedNetworkSelect { - pnq.fields = append(pnq.fields, fields...) - return &ProvisionedNetworkSelect{ProvisionedNetworkQuery: pnq} + pnq.ctx.Fields = append(pnq.ctx.Fields, fields...) + sbuild := &ProvisionedNetworkSelect{ProvisionedNetworkQuery: pnq} + sbuild.label = provisionednetwork.Label + sbuild.flds, sbuild.scan = &pnq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a ProvisionedNetworkSelect configured with the given aggregations. +func (pnq *ProvisionedNetworkQuery) Aggregate(fns ...AggregateFunc) *ProvisionedNetworkSelect { + return pnq.Select().Aggregate(fns...) } func (pnq *ProvisionedNetworkQuery) prepareQuery(ctx context.Context) error { - for _, f := range pnq.fields { + for _, inter := range pnq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, pnq); err != nil { + return err + } + } + } + for _, f := range pnq.ctx.Fields { if !provisionednetwork.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -529,7 +551,7 @@ func (pnq *ProvisionedNetworkQuery) prepareQuery(ctx context.Context) error { return nil } -func (pnq *ProvisionedNetworkQuery) sqlAll(ctx context.Context) ([]*ProvisionedNetwork, error) { +func (pnq *ProvisionedNetworkQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*ProvisionedNetwork, error) { var ( nodes = []*ProvisionedNetwork{} withFKs = pnq.withFKs @@ -549,236 +571,292 @@ func (pnq *ProvisionedNetworkQuery) sqlAll(ctx context.Context) ([]*ProvisionedN if withFKs { _spec.Node.Columns = append(_spec.Node.Columns, provisionednetwork.ForeignKeys...) } - _spec.ScanValues = func(columns []string) ([]interface{}, error) { + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*ProvisionedNetwork).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { node := &ProvisionedNetwork{config: pnq.config} nodes = append(nodes, node) - return node.scanValues(columns) - } - _spec.Assign = func(columns []string, values []interface{}) error { - if len(nodes) == 0 { - return fmt.Errorf("ent: Assign called without calling ScanValues") - } - node := nodes[len(nodes)-1] node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(pnq.modifiers) > 0 { + _spec.Modifiers = pnq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } if err := sqlgraph.QueryNodes(ctx, pnq.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := pnq.withProvisionedNetworkToStatus; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*ProvisionedNetwork) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - } - query.withFKs = true - query.Where(predicate.Status(func(s *sql.Selector) { - s.Where(sql.InValues(provisionednetwork.ProvisionedNetworkToStatusColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { + if err := pnq.loadProvisionedNetworkToStatus(ctx, query, nodes, nil, + func(n *ProvisionedNetwork, e *Status) { n.Edges.ProvisionedNetworkToStatus = e }); err != nil { return nil, err } - for _, n := range neighbors { - fk := n.provisioned_network_provisioned_network_to_status - if fk == nil { - return nil, fmt.Errorf(`foreign-key "provisioned_network_provisioned_network_to_status" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "provisioned_network_provisioned_network_to_status" returned %v for node %v`, *fk, n.ID) - } - node.Edges.ProvisionedNetworkToStatus = n - } } - if query := pnq.withProvisionedNetworkToNetwork; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*ProvisionedNetwork) - for i := range nodes { - if nodes[i].provisioned_network_provisioned_network_to_network == nil { - continue - } - fk := *nodes[i].provisioned_network_provisioned_network_to_network - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + if err := pnq.loadProvisionedNetworkToNetwork(ctx, query, nodes, nil, + func(n *ProvisionedNetwork, e *Network) { n.Edges.ProvisionedNetworkToNetwork = e }); err != nil { + return nil, err } - query.Where(network.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + } + if query := pnq.withProvisionedNetworkToBuild; query != nil { + if err := pnq.loadProvisionedNetworkToBuild(ctx, query, nodes, nil, + func(n *ProvisionedNetwork, e *Build) { n.Edges.ProvisionedNetworkToBuild = e }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "provisioned_network_provisioned_network_to_network" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.ProvisionedNetworkToNetwork = n - } + } + if query := pnq.withProvisionedNetworkToTeam; query != nil { + if err := pnq.loadProvisionedNetworkToTeam(ctx, query, nodes, nil, + func(n *ProvisionedNetwork, e *Team) { n.Edges.ProvisionedNetworkToTeam = e }); err != nil { + return nil, err } } - - if query := pnq.withProvisionedNetworkToBuild; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*ProvisionedNetwork) - for i := range nodes { - if nodes[i].provisioned_network_provisioned_network_to_build == nil { - continue - } - fk := *nodes[i].provisioned_network_provisioned_network_to_build - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + if query := pnq.withProvisionedNetworkToProvisionedHost; query != nil { + if err := pnq.loadProvisionedNetworkToProvisionedHost(ctx, query, nodes, + func(n *ProvisionedNetwork) { n.Edges.ProvisionedNetworkToProvisionedHost = []*ProvisionedHost{} }, + func(n *ProvisionedNetwork, e *ProvisionedHost) { + n.Edges.ProvisionedNetworkToProvisionedHost = append(n.Edges.ProvisionedNetworkToProvisionedHost, e) + }); err != nil { + return nil, err } - query.Where(build.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + } + if query := pnq.withProvisionedNetworkToPlan; query != nil { + if err := pnq.loadProvisionedNetworkToPlan(ctx, query, nodes, nil, + func(n *ProvisionedNetwork, e *Plan) { n.Edges.ProvisionedNetworkToPlan = e }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "provisioned_network_provisioned_network_to_build" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.ProvisionedNetworkToBuild = n - } + } + for name, query := range pnq.withNamedProvisionedNetworkToProvisionedHost { + if err := pnq.loadProvisionedNetworkToProvisionedHost(ctx, query, nodes, + func(n *ProvisionedNetwork) { n.appendNamedProvisionedNetworkToProvisionedHost(name) }, + func(n *ProvisionedNetwork, e *ProvisionedHost) { + n.appendNamedProvisionedNetworkToProvisionedHost(name, e) + }); err != nil { + return nil, err } } + for i := range pnq.loadTotal { + if err := pnq.loadTotal[i](ctx, nodes); err != nil { + return nil, err + } + } + return nodes, nil +} - if query := pnq.withProvisionedNetworkToTeam; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*ProvisionedNetwork) +func (pnq *ProvisionedNetworkQuery) loadProvisionedNetworkToStatus(ctx context.Context, query *StatusQuery, nodes []*ProvisionedNetwork, init func(*ProvisionedNetwork), assign func(*ProvisionedNetwork, *Status)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*ProvisionedNetwork) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + } + query.withFKs = true + query.Where(predicate.Status(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(provisionednetwork.ProvisionedNetworkToStatusColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.provisioned_network_provisioned_network_to_status + if fk == nil { + return fmt.Errorf(`foreign-key "provisioned_network_provisioned_network_to_status" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "provisioned_network_provisioned_network_to_status" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} +func (pnq *ProvisionedNetworkQuery) loadProvisionedNetworkToNetwork(ctx context.Context, query *NetworkQuery, nodes []*ProvisionedNetwork, init func(*ProvisionedNetwork), assign func(*ProvisionedNetwork, *Network)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*ProvisionedNetwork) + for i := range nodes { + if nodes[i].provisioned_network_provisioned_network_to_network == nil { + continue + } + fk := *nodes[i].provisioned_network_provisioned_network_to_network + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(network.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "provisioned_network_provisioned_network_to_network" returned %v`, n.ID) + } for i := range nodes { - if nodes[i].provisioned_network_provisioned_network_to_team == nil { - continue - } - fk := *nodes[i].provisioned_network_provisioned_network_to_team - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + assign(nodes[i], n) } - query.Where(team.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + } + return nil +} +func (pnq *ProvisionedNetworkQuery) loadProvisionedNetworkToBuild(ctx context.Context, query *BuildQuery, nodes []*ProvisionedNetwork, init func(*ProvisionedNetwork), assign func(*ProvisionedNetwork, *Build)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*ProvisionedNetwork) + for i := range nodes { + if nodes[i].provisioned_network_provisioned_network_to_build == nil { + continue } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "provisioned_network_provisioned_network_to_team" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.ProvisionedNetworkToTeam = n - } + fk := *nodes[i].provisioned_network_provisioned_network_to_build + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) } + nodeids[fk] = append(nodeids[fk], nodes[i]) } - - if query := pnq.withProvisionedNetworkToProvisionedHost; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*ProvisionedNetwork) + if len(ids) == 0 { + return nil + } + query.Where(build.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "provisioned_network_provisioned_network_to_build" returned %v`, n.ID) + } for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.ProvisionedNetworkToProvisionedHost = []*ProvisionedHost{} + assign(nodes[i], n) } - query.withFKs = true - query.Where(predicate.ProvisionedHost(func(s *sql.Selector) { - s.Where(sql.InValues(provisionednetwork.ProvisionedNetworkToProvisionedHostColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + } + return nil +} +func (pnq *ProvisionedNetworkQuery) loadProvisionedNetworkToTeam(ctx context.Context, query *TeamQuery, nodes []*ProvisionedNetwork, init func(*ProvisionedNetwork), assign func(*ProvisionedNetwork, *Team)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*ProvisionedNetwork) + for i := range nodes { + if nodes[i].provisioned_network_provisioned_network_to_team == nil { + continue } - for _, n := range neighbors { - fk := n.provisioned_host_provisioned_host_to_provisioned_network - if fk == nil { - return nil, fmt.Errorf(`foreign-key "provisioned_host_provisioned_host_to_provisioned_network" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "provisioned_host_provisioned_host_to_provisioned_network" returned %v for node %v`, *fk, n.ID) - } - node.Edges.ProvisionedNetworkToProvisionedHost = append(node.Edges.ProvisionedNetworkToProvisionedHost, n) + fk := *nodes[i].provisioned_network_provisioned_network_to_team + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) } + nodeids[fk] = append(nodeids[fk], nodes[i]) } - - if query := pnq.withProvisionedNetworkToPlan; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*ProvisionedNetwork) + if len(ids) == 0 { + return nil + } + query.Where(team.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "provisioned_network_provisioned_network_to_team" returned %v`, n.ID) + } for i := range nodes { - if nodes[i].plan_plan_to_provisioned_network == nil { - continue - } - fk := *nodes[i].plan_plan_to_provisioned_network - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + assign(nodes[i], n) } - query.Where(plan.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + } + return nil +} +func (pnq *ProvisionedNetworkQuery) loadProvisionedNetworkToProvisionedHost(ctx context.Context, query *ProvisionedHostQuery, nodes []*ProvisionedNetwork, init func(*ProvisionedNetwork), assign func(*ProvisionedNetwork, *ProvisionedHost)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*ProvisionedNetwork) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "plan_plan_to_provisioned_network" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.ProvisionedNetworkToPlan = n - } + } + query.withFKs = true + query.Where(predicate.ProvisionedHost(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(provisionednetwork.ProvisionedNetworkToProvisionedHostColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.provisioned_host_provisioned_host_to_provisioned_network + if fk == nil { + return fmt.Errorf(`foreign-key "provisioned_host_provisioned_host_to_provisioned_network" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "provisioned_host_provisioned_host_to_provisioned_network" returned %v for node %v`, *fk, n.ID) } + assign(node, n) } - - return nodes, nil + return nil +} +func (pnq *ProvisionedNetworkQuery) loadProvisionedNetworkToPlan(ctx context.Context, query *PlanQuery, nodes []*ProvisionedNetwork, init func(*ProvisionedNetwork), assign func(*ProvisionedNetwork, *Plan)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*ProvisionedNetwork) + for i := range nodes { + if nodes[i].plan_plan_to_provisioned_network == nil { + continue + } + fk := *nodes[i].plan_plan_to_provisioned_network + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(plan.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "plan_plan_to_provisioned_network" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil } func (pnq *ProvisionedNetworkQuery) sqlCount(ctx context.Context) (int, error) { _spec := pnq.querySpec() - _spec.Node.Columns = pnq.fields - if len(pnq.fields) > 0 { - _spec.Unique = pnq.unique != nil && *pnq.unique + if len(pnq.modifiers) > 0 { + _spec.Modifiers = pnq.modifiers } - return sqlgraph.CountNodes(ctx, pnq.driver, _spec) -} - -func (pnq *ProvisionedNetworkQuery) sqlExist(ctx context.Context) (bool, error) { - n, err := pnq.sqlCount(ctx) - if err != nil { - return false, fmt.Errorf("ent: check existence: %w", err) + _spec.Node.Columns = pnq.ctx.Fields + if len(pnq.ctx.Fields) > 0 { + _spec.Unique = pnq.ctx.Unique != nil && *pnq.ctx.Unique } - return n > 0, nil + return sqlgraph.CountNodes(ctx, pnq.driver, _spec) } func (pnq *ProvisionedNetworkQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: provisionednetwork.Table, - Columns: provisionednetwork.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionednetwork.FieldID, - }, - }, - From: pnq.sql, - Unique: true, - } - if unique := pnq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(provisionednetwork.Table, provisionednetwork.Columns, sqlgraph.NewFieldSpec(provisionednetwork.FieldID, field.TypeUUID)) + _spec.From = pnq.sql + if unique := pnq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if pnq.path != nil { + _spec.Unique = true } - if fields := pnq.fields; len(fields) > 0 { + if fields := pnq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, provisionednetwork.FieldID) for i := range fields { @@ -794,10 +872,10 @@ func (pnq *ProvisionedNetworkQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := pnq.limit; limit != nil { + if limit := pnq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := pnq.offset; offset != nil { + if offset := pnq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := pnq.order; len(ps) > 0 { @@ -813,7 +891,7 @@ func (pnq *ProvisionedNetworkQuery) querySpec() *sqlgraph.QuerySpec { func (pnq *ProvisionedNetworkQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(pnq.driver.Dialect()) t1 := builder.Table(provisionednetwork.Table) - columns := pnq.fields + columns := pnq.ctx.Fields if len(columns) == 0 { columns = provisionednetwork.Columns } @@ -822,7 +900,7 @@ func (pnq *ProvisionedNetworkQuery) sqlQuery(ctx context.Context) *sql.Selector selector = pnq.sql selector.Select(selector.Columns(columns...)...) } - if pnq.unique != nil && *pnq.unique { + if pnq.ctx.Unique != nil && *pnq.ctx.Unique { selector.Distinct() } for _, p := range pnq.predicates { @@ -831,25 +909,35 @@ func (pnq *ProvisionedNetworkQuery) sqlQuery(ctx context.Context) *sql.Selector for _, p := range pnq.order { p(selector) } - if offset := pnq.offset; offset != nil { + if offset := pnq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := pnq.limit; limit != nil { + if limit := pnq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector } +// WithNamedProvisionedNetworkToProvisionedHost tells the query-builder to eager-load the nodes that are connected to the "ProvisionedNetworkToProvisionedHost" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (pnq *ProvisionedNetworkQuery) WithNamedProvisionedNetworkToProvisionedHost(name string, opts ...func(*ProvisionedHostQuery)) *ProvisionedNetworkQuery { + query := (&ProvisionedHostClient{config: pnq.config}).Query() + for _, opt := range opts { + opt(query) + } + if pnq.withNamedProvisionedNetworkToProvisionedHost == nil { + pnq.withNamedProvisionedNetworkToProvisionedHost = make(map[string]*ProvisionedHostQuery) + } + pnq.withNamedProvisionedNetworkToProvisionedHost[name] = query + return pnq +} + // ProvisionedNetworkGroupBy is the group-by builder for ProvisionedNetwork entities. type ProvisionedNetworkGroupBy struct { - config - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) + selector + build *ProvisionedNetworkQuery } // Aggregate adds the given aggregation functions to the group-by query. @@ -858,471 +946,77 @@ func (pngb *ProvisionedNetworkGroupBy) Aggregate(fns ...AggregateFunc) *Provisio return pngb } -// Scan applies the group-by query and scans the result into the given value. -func (pngb *ProvisionedNetworkGroupBy) Scan(ctx context.Context, v interface{}) error { - query, err := pngb.path(ctx) - if err != nil { +// Scan applies the selector query and scans the result into the given value. +func (pngb *ProvisionedNetworkGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, pngb.build.ctx, "GroupBy") + if err := pngb.build.prepareQuery(ctx); err != nil { return err } - pngb.sql = query - return pngb.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (pngb *ProvisionedNetworkGroupBy) ScanX(ctx context.Context, v interface{}) { - if err := pngb.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from group-by. -// It is only allowed when executing a group-by query with one field. -func (pngb *ProvisionedNetworkGroupBy) Strings(ctx context.Context) ([]string, error) { - if len(pngb.fields) > 1 { - return nil, errors.New("ent: ProvisionedNetworkGroupBy.Strings is not achievable when grouping more than 1 field") - } - var v []string - if err := pngb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (pngb *ProvisionedNetworkGroupBy) StringsX(ctx context.Context) []string { - v, err := pngb.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (pngb *ProvisionedNetworkGroupBy) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = pngb.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{provisionednetwork.Label} - default: - err = fmt.Errorf("ent: ProvisionedNetworkGroupBy.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (pngb *ProvisionedNetworkGroupBy) StringX(ctx context.Context) string { - v, err := pngb.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from group-by. -// It is only allowed when executing a group-by query with one field. -func (pngb *ProvisionedNetworkGroupBy) Ints(ctx context.Context) ([]int, error) { - if len(pngb.fields) > 1 { - return nil, errors.New("ent: ProvisionedNetworkGroupBy.Ints is not achievable when grouping more than 1 field") - } - var v []int - if err := pngb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (pngb *ProvisionedNetworkGroupBy) IntsX(ctx context.Context) []int { - v, err := pngb.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (pngb *ProvisionedNetworkGroupBy) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = pngb.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{provisionednetwork.Label} - default: - err = fmt.Errorf("ent: ProvisionedNetworkGroupBy.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (pngb *ProvisionedNetworkGroupBy) IntX(ctx context.Context) int { - v, err := pngb.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from group-by. -// It is only allowed when executing a group-by query with one field. -func (pngb *ProvisionedNetworkGroupBy) Float64s(ctx context.Context) ([]float64, error) { - if len(pngb.fields) > 1 { - return nil, errors.New("ent: ProvisionedNetworkGroupBy.Float64s is not achievable when grouping more than 1 field") - } - var v []float64 - if err := pngb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (pngb *ProvisionedNetworkGroupBy) Float64sX(ctx context.Context) []float64 { - v, err := pngb.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (pngb *ProvisionedNetworkGroupBy) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = pngb.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{provisionednetwork.Label} - default: - err = fmt.Errorf("ent: ProvisionedNetworkGroupBy.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (pngb *ProvisionedNetworkGroupBy) Float64X(ctx context.Context) float64 { - v, err := pngb.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from group-by. -// It is only allowed when executing a group-by query with one field. -func (pngb *ProvisionedNetworkGroupBy) Bools(ctx context.Context) ([]bool, error) { - if len(pngb.fields) > 1 { - return nil, errors.New("ent: ProvisionedNetworkGroupBy.Bools is not achievable when grouping more than 1 field") - } - var v []bool - if err := pngb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil + return scanWithInterceptors[*ProvisionedNetworkQuery, *ProvisionedNetworkGroupBy](ctx, pngb.build, pngb, pngb.build.inters, v) } -// BoolsX is like Bools, but panics if an error occurs. -func (pngb *ProvisionedNetworkGroupBy) BoolsX(ctx context.Context) []bool { - v, err := pngb.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (pngb *ProvisionedNetworkGroupBy) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = pngb.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{provisionednetwork.Label} - default: - err = fmt.Errorf("ent: ProvisionedNetworkGroupBy.Bools returned %d results when one was expected", len(v)) - } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (pngb *ProvisionedNetworkGroupBy) BoolX(ctx context.Context) bool { - v, err := pngb.Bool(ctx) - if err != nil { - panic(err) +func (pngb *ProvisionedNetworkGroupBy) sqlScan(ctx context.Context, root *ProvisionedNetworkQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(pngb.fns)) + for _, fn := range pngb.fns { + aggregation = append(aggregation, fn(selector)) } - return v -} - -func (pngb *ProvisionedNetworkGroupBy) sqlScan(ctx context.Context, v interface{}) error { - for _, f := range pngb.fields { - if !provisionednetwork.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*pngb.flds)+len(pngb.fns)) + for _, f := range *pngb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := pngb.sqlQuery() + selector.GroupBy(selector.Columns(*pngb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := pngb.driver.Query(ctx, query, args, rows); err != nil { + if err := pngb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (pngb *ProvisionedNetworkGroupBy) sqlQuery() *sql.Selector { - selector := pngb.sql.Select() - aggregation := make([]string, 0, len(pngb.fns)) - for _, fn := range pngb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(pngb.fields)+len(pngb.fns)) - for _, f := range pngb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(pngb.fields...)...) -} - // ProvisionedNetworkSelect is the builder for selecting fields of ProvisionedNetwork entities. type ProvisionedNetworkSelect struct { *ProvisionedNetworkQuery - // intermediate query (i.e. traversal path). - sql *sql.Selector + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (pns *ProvisionedNetworkSelect) Aggregate(fns ...AggregateFunc) *ProvisionedNetworkSelect { + pns.fns = append(pns.fns, fns...) + return pns } // Scan applies the selector query and scans the result into the given value. -func (pns *ProvisionedNetworkSelect) Scan(ctx context.Context, v interface{}) error { +func (pns *ProvisionedNetworkSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, pns.ctx, "Select") if err := pns.prepareQuery(ctx); err != nil { return err } - pns.sql = pns.ProvisionedNetworkQuery.sqlQuery(ctx) - return pns.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (pns *ProvisionedNetworkSelect) ScanX(ctx context.Context, v interface{}) { - if err := pns.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from a selector. It is only allowed when selecting one field. -func (pns *ProvisionedNetworkSelect) Strings(ctx context.Context) ([]string, error) { - if len(pns.fields) > 1 { - return nil, errors.New("ent: ProvisionedNetworkSelect.Strings is not achievable when selecting more than 1 field") - } - var v []string - if err := pns.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (pns *ProvisionedNetworkSelect) StringsX(ctx context.Context) []string { - v, err := pns.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a selector. It is only allowed when selecting one field. -func (pns *ProvisionedNetworkSelect) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = pns.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{provisionednetwork.Label} - default: - err = fmt.Errorf("ent: ProvisionedNetworkSelect.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (pns *ProvisionedNetworkSelect) StringX(ctx context.Context) string { - v, err := pns.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from a selector. It is only allowed when selecting one field. -func (pns *ProvisionedNetworkSelect) Ints(ctx context.Context) ([]int, error) { - if len(pns.fields) > 1 { - return nil, errors.New("ent: ProvisionedNetworkSelect.Ints is not achievable when selecting more than 1 field") - } - var v []int - if err := pns.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (pns *ProvisionedNetworkSelect) IntsX(ctx context.Context) []int { - v, err := pns.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a selector. It is only allowed when selecting one field. -func (pns *ProvisionedNetworkSelect) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = pns.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{provisionednetwork.Label} - default: - err = fmt.Errorf("ent: ProvisionedNetworkSelect.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (pns *ProvisionedNetworkSelect) IntX(ctx context.Context) int { - v, err := pns.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. -func (pns *ProvisionedNetworkSelect) Float64s(ctx context.Context) ([]float64, error) { - if len(pns.fields) > 1 { - return nil, errors.New("ent: ProvisionedNetworkSelect.Float64s is not achievable when selecting more than 1 field") - } - var v []float64 - if err := pns.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil + return scanWithInterceptors[*ProvisionedNetworkQuery, *ProvisionedNetworkSelect](ctx, pns.ProvisionedNetworkQuery, pns, pns.inters, v) } -// Float64sX is like Float64s, but panics if an error occurs. -func (pns *ProvisionedNetworkSelect) Float64sX(ctx context.Context) []float64 { - v, err := pns.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. -func (pns *ProvisionedNetworkSelect) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = pns.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{provisionednetwork.Label} - default: - err = fmt.Errorf("ent: ProvisionedNetworkSelect.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (pns *ProvisionedNetworkSelect) Float64X(ctx context.Context) float64 { - v, err := pns.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from a selector. It is only allowed when selecting one field. -func (pns *ProvisionedNetworkSelect) Bools(ctx context.Context) ([]bool, error) { - if len(pns.fields) > 1 { - return nil, errors.New("ent: ProvisionedNetworkSelect.Bools is not achievable when selecting more than 1 field") - } - var v []bool - if err := pns.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (pns *ProvisionedNetworkSelect) BoolsX(ctx context.Context) []bool { - v, err := pns.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a selector. It is only allowed when selecting one field. -func (pns *ProvisionedNetworkSelect) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = pns.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{provisionednetwork.Label} - default: - err = fmt.Errorf("ent: ProvisionedNetworkSelect.Bools returned %d results when one was expected", len(v)) +func (pns *ProvisionedNetworkSelect) sqlScan(ctx context.Context, root *ProvisionedNetworkQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(pns.fns)) + for _, fn := range pns.fns { + aggregation = append(aggregation, fn(selector)) } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (pns *ProvisionedNetworkSelect) BoolX(ctx context.Context) bool { - v, err := pns.Bool(ctx) - if err != nil { - panic(err) + switch n := len(*pns.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) } - return v -} - -func (pns *ProvisionedNetworkSelect) sqlScan(ctx context.Context, v interface{}) error { rows := &sql.Rows{} - query, args := pns.sql.Query() + query, args := selector.Query() if err := pns.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/ent/provisionednetwork_update.go b/ent/provisionednetwork_update.go index 0b62cba7..b99a896b 100755 --- a/ent/provisionednetwork_update.go +++ b/ent/provisionednetwork_update.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -40,12 +40,28 @@ func (pnu *ProvisionedNetworkUpdate) SetName(s string) *ProvisionedNetworkUpdate return pnu } +// SetNillableName sets the "name" field if the given value is not nil. +func (pnu *ProvisionedNetworkUpdate) SetNillableName(s *string) *ProvisionedNetworkUpdate { + if s != nil { + pnu.SetName(*s) + } + return pnu +} + // SetCidr sets the "cidr" field. func (pnu *ProvisionedNetworkUpdate) SetCidr(s string) *ProvisionedNetworkUpdate { pnu.mutation.SetCidr(s) return pnu } +// SetNillableCidr sets the "cidr" field if the given value is not nil. +func (pnu *ProvisionedNetworkUpdate) SetNillableCidr(s *string) *ProvisionedNetworkUpdate { + if s != nil { + pnu.SetCidr(*s) + } + return pnu +} + // SetVars sets the "vars" field. func (pnu *ProvisionedNetworkUpdate) SetVars(m map[string]string) *ProvisionedNetworkUpdate { pnu.mutation.SetVars(m) @@ -220,34 +236,7 @@ func (pnu *ProvisionedNetworkUpdate) ClearProvisionedNetworkToPlan() *Provisione // Save executes the query and returns the number of nodes affected by the update operation. func (pnu *ProvisionedNetworkUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(pnu.hooks) == 0 { - affected, err = pnu.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*ProvisionedNetworkMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - pnu.mutation = mutation - affected, err = pnu.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(pnu.hooks) - 1; i >= 0; i-- { - if pnu.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = pnu.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, pnu.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, pnu.sqlSave, pnu.mutation, pnu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -273,16 +262,7 @@ func (pnu *ProvisionedNetworkUpdate) ExecX(ctx context.Context) { } func (pnu *ProvisionedNetworkUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: provisionednetwork.Table, - Columns: provisionednetwork.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionednetwork.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(provisionednetwork.Table, provisionednetwork.Columns, sqlgraph.NewFieldSpec(provisionednetwork.FieldID, field.TypeUUID)) if ps := pnu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -291,25 +271,13 @@ func (pnu *ProvisionedNetworkUpdate) sqlSave(ctx context.Context) (n int, err er } } if value, ok := pnu.mutation.Name(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: provisionednetwork.FieldName, - }) + _spec.SetField(provisionednetwork.FieldName, field.TypeString, value) } if value, ok := pnu.mutation.Cidr(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: provisionednetwork.FieldCidr, - }) + _spec.SetField(provisionednetwork.FieldCidr, field.TypeString, value) } if value, ok := pnu.mutation.Vars(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: provisionednetwork.FieldVars, - }) + _spec.SetField(provisionednetwork.FieldVars, field.TypeJSON, value) } if pnu.mutation.ProvisionedNetworkToStatusCleared() { edge := &sqlgraph.EdgeSpec{ @@ -319,10 +287,7 @@ func (pnu *ProvisionedNetworkUpdate) sqlSave(ctx context.Context) (n int, err er Columns: []string{provisionednetwork.ProvisionedNetworkToStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -335,10 +300,7 @@ func (pnu *ProvisionedNetworkUpdate) sqlSave(ctx context.Context) (n int, err er Columns: []string{provisionednetwork.ProvisionedNetworkToStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -354,10 +316,7 @@ func (pnu *ProvisionedNetworkUpdate) sqlSave(ctx context.Context) (n int, err er Columns: []string{provisionednetwork.ProvisionedNetworkToNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: network.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(network.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -370,10 +329,7 @@ func (pnu *ProvisionedNetworkUpdate) sqlSave(ctx context.Context) (n int, err er Columns: []string{provisionednetwork.ProvisionedNetworkToNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: network.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(network.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -389,10 +345,7 @@ func (pnu *ProvisionedNetworkUpdate) sqlSave(ctx context.Context) (n int, err er Columns: []string{provisionednetwork.ProvisionedNetworkToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -405,10 +358,7 @@ func (pnu *ProvisionedNetworkUpdate) sqlSave(ctx context.Context) (n int, err er Columns: []string{provisionednetwork.ProvisionedNetworkToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -424,10 +374,7 @@ func (pnu *ProvisionedNetworkUpdate) sqlSave(ctx context.Context) (n int, err er Columns: []string{provisionednetwork.ProvisionedNetworkToTeamColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: team.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(team.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -440,10 +387,7 @@ func (pnu *ProvisionedNetworkUpdate) sqlSave(ctx context.Context) (n int, err er Columns: []string{provisionednetwork.ProvisionedNetworkToTeamColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: team.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(team.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -459,10 +403,7 @@ func (pnu *ProvisionedNetworkUpdate) sqlSave(ctx context.Context) (n int, err er Columns: []string{provisionednetwork.ProvisionedNetworkToProvisionedHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionedhost.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionedhost.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -475,10 +416,7 @@ func (pnu *ProvisionedNetworkUpdate) sqlSave(ctx context.Context) (n int, err er Columns: []string{provisionednetwork.ProvisionedNetworkToProvisionedHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionedhost.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionedhost.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -494,10 +432,7 @@ func (pnu *ProvisionedNetworkUpdate) sqlSave(ctx context.Context) (n int, err er Columns: []string{provisionednetwork.ProvisionedNetworkToProvisionedHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionedhost.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionedhost.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -513,10 +448,7 @@ func (pnu *ProvisionedNetworkUpdate) sqlSave(ctx context.Context) (n int, err er Columns: []string{provisionednetwork.ProvisionedNetworkToPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -529,10 +461,7 @@ func (pnu *ProvisionedNetworkUpdate) sqlSave(ctx context.Context) (n int, err er Columns: []string{provisionednetwork.ProvisionedNetworkToPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -544,10 +473,11 @@ func (pnu *ProvisionedNetworkUpdate) sqlSave(ctx context.Context) (n int, err er if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{provisionednetwork.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return 0, err } + pnu.mutation.done = true return n, nil } @@ -565,12 +495,28 @@ func (pnuo *ProvisionedNetworkUpdateOne) SetName(s string) *ProvisionedNetworkUp return pnuo } +// SetNillableName sets the "name" field if the given value is not nil. +func (pnuo *ProvisionedNetworkUpdateOne) SetNillableName(s *string) *ProvisionedNetworkUpdateOne { + if s != nil { + pnuo.SetName(*s) + } + return pnuo +} + // SetCidr sets the "cidr" field. func (pnuo *ProvisionedNetworkUpdateOne) SetCidr(s string) *ProvisionedNetworkUpdateOne { pnuo.mutation.SetCidr(s) return pnuo } +// SetNillableCidr sets the "cidr" field if the given value is not nil. +func (pnuo *ProvisionedNetworkUpdateOne) SetNillableCidr(s *string) *ProvisionedNetworkUpdateOne { + if s != nil { + pnuo.SetCidr(*s) + } + return pnuo +} + // SetVars sets the "vars" field. func (pnuo *ProvisionedNetworkUpdateOne) SetVars(m map[string]string) *ProvisionedNetworkUpdateOne { pnuo.mutation.SetVars(m) @@ -743,6 +689,12 @@ func (pnuo *ProvisionedNetworkUpdateOne) ClearProvisionedNetworkToPlan() *Provis return pnuo } +// Where appends a list predicates to the ProvisionedNetworkUpdate builder. +func (pnuo *ProvisionedNetworkUpdateOne) Where(ps ...predicate.ProvisionedNetwork) *ProvisionedNetworkUpdateOne { + pnuo.mutation.Where(ps...) + return pnuo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (pnuo *ProvisionedNetworkUpdateOne) Select(field string, fields ...string) *ProvisionedNetworkUpdateOne { @@ -752,34 +704,7 @@ func (pnuo *ProvisionedNetworkUpdateOne) Select(field string, fields ...string) // Save executes the query and returns the updated ProvisionedNetwork entity. func (pnuo *ProvisionedNetworkUpdateOne) Save(ctx context.Context) (*ProvisionedNetwork, error) { - var ( - err error - node *ProvisionedNetwork - ) - if len(pnuo.hooks) == 0 { - node, err = pnuo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*ProvisionedNetworkMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - pnuo.mutation = mutation - node, err = pnuo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(pnuo.hooks) - 1; i >= 0; i-- { - if pnuo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = pnuo.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, pnuo.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, pnuo.sqlSave, pnuo.mutation, pnuo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -805,16 +730,7 @@ func (pnuo *ProvisionedNetworkUpdateOne) ExecX(ctx context.Context) { } func (pnuo *ProvisionedNetworkUpdateOne) sqlSave(ctx context.Context) (_node *ProvisionedNetwork, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: provisionednetwork.Table, - Columns: provisionednetwork.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionednetwork.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(provisionednetwork.Table, provisionednetwork.Columns, sqlgraph.NewFieldSpec(provisionednetwork.FieldID, field.TypeUUID)) id, ok := pnuo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "ProvisionedNetwork.id" for update`)} @@ -840,25 +756,13 @@ func (pnuo *ProvisionedNetworkUpdateOne) sqlSave(ctx context.Context) (_node *Pr } } if value, ok := pnuo.mutation.Name(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: provisionednetwork.FieldName, - }) + _spec.SetField(provisionednetwork.FieldName, field.TypeString, value) } if value, ok := pnuo.mutation.Cidr(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: provisionednetwork.FieldCidr, - }) + _spec.SetField(provisionednetwork.FieldCidr, field.TypeString, value) } if value, ok := pnuo.mutation.Vars(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: provisionednetwork.FieldVars, - }) + _spec.SetField(provisionednetwork.FieldVars, field.TypeJSON, value) } if pnuo.mutation.ProvisionedNetworkToStatusCleared() { edge := &sqlgraph.EdgeSpec{ @@ -868,10 +772,7 @@ func (pnuo *ProvisionedNetworkUpdateOne) sqlSave(ctx context.Context) (_node *Pr Columns: []string{provisionednetwork.ProvisionedNetworkToStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -884,10 +785,7 @@ func (pnuo *ProvisionedNetworkUpdateOne) sqlSave(ctx context.Context) (_node *Pr Columns: []string{provisionednetwork.ProvisionedNetworkToStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -903,10 +801,7 @@ func (pnuo *ProvisionedNetworkUpdateOne) sqlSave(ctx context.Context) (_node *Pr Columns: []string{provisionednetwork.ProvisionedNetworkToNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: network.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(network.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -919,10 +814,7 @@ func (pnuo *ProvisionedNetworkUpdateOne) sqlSave(ctx context.Context) (_node *Pr Columns: []string{provisionednetwork.ProvisionedNetworkToNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: network.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(network.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -938,10 +830,7 @@ func (pnuo *ProvisionedNetworkUpdateOne) sqlSave(ctx context.Context) (_node *Pr Columns: []string{provisionednetwork.ProvisionedNetworkToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -954,10 +843,7 @@ func (pnuo *ProvisionedNetworkUpdateOne) sqlSave(ctx context.Context) (_node *Pr Columns: []string{provisionednetwork.ProvisionedNetworkToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -973,10 +859,7 @@ func (pnuo *ProvisionedNetworkUpdateOne) sqlSave(ctx context.Context) (_node *Pr Columns: []string{provisionednetwork.ProvisionedNetworkToTeamColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: team.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(team.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -989,10 +872,7 @@ func (pnuo *ProvisionedNetworkUpdateOne) sqlSave(ctx context.Context) (_node *Pr Columns: []string{provisionednetwork.ProvisionedNetworkToTeamColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: team.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(team.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1008,10 +888,7 @@ func (pnuo *ProvisionedNetworkUpdateOne) sqlSave(ctx context.Context) (_node *Pr Columns: []string{provisionednetwork.ProvisionedNetworkToProvisionedHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionedhost.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionedhost.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1024,10 +901,7 @@ func (pnuo *ProvisionedNetworkUpdateOne) sqlSave(ctx context.Context) (_node *Pr Columns: []string{provisionednetwork.ProvisionedNetworkToProvisionedHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionedhost.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionedhost.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1043,10 +917,7 @@ func (pnuo *ProvisionedNetworkUpdateOne) sqlSave(ctx context.Context) (_node *Pr Columns: []string{provisionednetwork.ProvisionedNetworkToProvisionedHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionedhost.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionedhost.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1062,10 +933,7 @@ func (pnuo *ProvisionedNetworkUpdateOne) sqlSave(ctx context.Context) (_node *Pr Columns: []string{provisionednetwork.ProvisionedNetworkToPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1078,10 +946,7 @@ func (pnuo *ProvisionedNetworkUpdateOne) sqlSave(ctx context.Context) (_node *Pr Columns: []string{provisionednetwork.ProvisionedNetworkToPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1096,9 +961,10 @@ func (pnuo *ProvisionedNetworkUpdateOne) sqlSave(ctx context.Context) (_node *Pr if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{provisionednetwork.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } + pnuo.mutation.done = true return _node, nil } diff --git a/ent/provisioningstep.go b/ent/provisioningstep.go index 344d5420..921be27e 100755 --- a/ent/provisioningstep.go +++ b/ent/provisioningstep.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -6,6 +6,7 @@ import ( "fmt" "strings" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/gen0cide/laforge/ent/ansible" "github.com/gen0cide/laforge/ent/command" @@ -35,6 +36,7 @@ type ProvisioningStep struct { // The values are being populated by the ProvisioningStepQuery when eager-loading is set. Edges ProvisioningStepEdges `json:"edges"` + // vvvvvvvvvvvv CUSTOM vvvvvvvvvvvv // Edges put into the main struct to be loaded via hcl // ProvisioningStepToStatus holds the value of the ProvisioningStepToStatus edge. HCLProvisioningStepToStatus *Status `json:"ProvisioningStepToStatus,omitempty"` @@ -60,7 +62,7 @@ type ProvisioningStep struct { HCLProvisioningStepToAgentTask []*AgentTask `json:"ProvisioningStepToAgentTask,omitempty"` // ProvisioningStepToGinFileMiddleware holds the value of the ProvisioningStepToGinFileMiddleware edge. HCLProvisioningStepToGinFileMiddleware *GinFileMiddleware `json:"ProvisioningStepToGinFileMiddleware,omitempty"` - // + // ^^^^^^^^^^^^ CUSTOM ^^^^^^^^^^^^^ gin_file_middleware_gin_file_middleware_to_provisioning_step *uuid.UUID plan_plan_to_provisioning_step *uuid.UUID provisioning_step_provisioning_step_to_provisioned_host *uuid.UUID @@ -71,6 +73,7 @@ type ProvisioningStep struct { provisioning_step_provisioning_step_to_file_download *uuid.UUID provisioning_step_provisioning_step_to_file_extract *uuid.UUID provisioning_step_provisioning_step_to_ansible *uuid.UUID + selectValues sql.SelectValues } // ProvisioningStepEdges holds the relations/edges for other nodes in the graph. @@ -102,6 +105,10 @@ type ProvisioningStepEdges struct { // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. loadedTypes [12]bool + // totalCount holds the count of the edges above. + totalCount [12]map[string]int + + namedProvisioningStepToAgentTask map[string][]*AgentTask } // ProvisioningStepToStatusOrErr returns the ProvisioningStepToStatus value or an error if the edge @@ -109,8 +116,7 @@ type ProvisioningStepEdges struct { func (e ProvisioningStepEdges) ProvisioningStepToStatusOrErr() (*Status, error) { if e.loadedTypes[0] { if e.ProvisioningStepToStatus == nil { - // The edge ProvisioningStepToStatus was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: status.Label} } return e.ProvisioningStepToStatus, nil @@ -123,8 +129,7 @@ func (e ProvisioningStepEdges) ProvisioningStepToStatusOrErr() (*Status, error) func (e ProvisioningStepEdges) ProvisioningStepToProvisionedHostOrErr() (*ProvisionedHost, error) { if e.loadedTypes[1] { if e.ProvisioningStepToProvisionedHost == nil { - // The edge ProvisioningStepToProvisionedHost was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: provisionedhost.Label} } return e.ProvisioningStepToProvisionedHost, nil @@ -137,8 +142,7 @@ func (e ProvisioningStepEdges) ProvisioningStepToProvisionedHostOrErr() (*Provis func (e ProvisioningStepEdges) ProvisioningStepToScriptOrErr() (*Script, error) { if e.loadedTypes[2] { if e.ProvisioningStepToScript == nil { - // The edge ProvisioningStepToScript was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: script.Label} } return e.ProvisioningStepToScript, nil @@ -151,8 +155,7 @@ func (e ProvisioningStepEdges) ProvisioningStepToScriptOrErr() (*Script, error) func (e ProvisioningStepEdges) ProvisioningStepToCommandOrErr() (*Command, error) { if e.loadedTypes[3] { if e.ProvisioningStepToCommand == nil { - // The edge ProvisioningStepToCommand was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: command.Label} } return e.ProvisioningStepToCommand, nil @@ -165,8 +168,7 @@ func (e ProvisioningStepEdges) ProvisioningStepToCommandOrErr() (*Command, error func (e ProvisioningStepEdges) ProvisioningStepToDNSRecordOrErr() (*DNSRecord, error) { if e.loadedTypes[4] { if e.ProvisioningStepToDNSRecord == nil { - // The edge ProvisioningStepToDNSRecord was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: dnsrecord.Label} } return e.ProvisioningStepToDNSRecord, nil @@ -179,8 +181,7 @@ func (e ProvisioningStepEdges) ProvisioningStepToDNSRecordOrErr() (*DNSRecord, e func (e ProvisioningStepEdges) ProvisioningStepToFileDeleteOrErr() (*FileDelete, error) { if e.loadedTypes[5] { if e.ProvisioningStepToFileDelete == nil { - // The edge ProvisioningStepToFileDelete was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: filedelete.Label} } return e.ProvisioningStepToFileDelete, nil @@ -193,8 +194,7 @@ func (e ProvisioningStepEdges) ProvisioningStepToFileDeleteOrErr() (*FileDelete, func (e ProvisioningStepEdges) ProvisioningStepToFileDownloadOrErr() (*FileDownload, error) { if e.loadedTypes[6] { if e.ProvisioningStepToFileDownload == nil { - // The edge ProvisioningStepToFileDownload was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: filedownload.Label} } return e.ProvisioningStepToFileDownload, nil @@ -207,8 +207,7 @@ func (e ProvisioningStepEdges) ProvisioningStepToFileDownloadOrErr() (*FileDownl func (e ProvisioningStepEdges) ProvisioningStepToFileExtractOrErr() (*FileExtract, error) { if e.loadedTypes[7] { if e.ProvisioningStepToFileExtract == nil { - // The edge ProvisioningStepToFileExtract was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: fileextract.Label} } return e.ProvisioningStepToFileExtract, nil @@ -221,8 +220,7 @@ func (e ProvisioningStepEdges) ProvisioningStepToFileExtractOrErr() (*FileExtrac func (e ProvisioningStepEdges) ProvisioningStepToAnsibleOrErr() (*Ansible, error) { if e.loadedTypes[8] { if e.ProvisioningStepToAnsible == nil { - // The edge ProvisioningStepToAnsible was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: ansible.Label} } return e.ProvisioningStepToAnsible, nil @@ -235,8 +233,7 @@ func (e ProvisioningStepEdges) ProvisioningStepToAnsibleOrErr() (*Ansible, error func (e ProvisioningStepEdges) ProvisioningStepToPlanOrErr() (*Plan, error) { if e.loadedTypes[9] { if e.ProvisioningStepToPlan == nil { - // The edge ProvisioningStepToPlan was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: plan.Label} } return e.ProvisioningStepToPlan, nil @@ -258,8 +255,7 @@ func (e ProvisioningStepEdges) ProvisioningStepToAgentTaskOrErr() ([]*AgentTask, func (e ProvisioningStepEdges) ProvisioningStepToGinFileMiddlewareOrErr() (*GinFileMiddleware, error) { if e.loadedTypes[11] { if e.ProvisioningStepToGinFileMiddleware == nil { - // The edge ProvisioningStepToGinFileMiddleware was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: ginfilemiddleware.Label} } return e.ProvisioningStepToGinFileMiddleware, nil @@ -268,8 +264,8 @@ func (e ProvisioningStepEdges) ProvisioningStepToGinFileMiddlewareOrErr() (*GinF } // scanValues returns the types for scanning values from sql.Rows. -func (*ProvisioningStep) scanValues(columns []string) ([]interface{}, error) { - values := make([]interface{}, len(columns)) +func (*ProvisioningStep) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) for i := range columns { switch columns[i] { case provisioningstep.FieldStepNumber: @@ -299,7 +295,7 @@ func (*ProvisioningStep) scanValues(columns []string) ([]interface{}, error) { case provisioningstep.ForeignKeys[9]: // provisioning_step_provisioning_step_to_ansible values[i] = &sql.NullScanner{S: new(uuid.UUID)} default: - return nil, fmt.Errorf("unexpected column %q for type ProvisioningStep", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -307,7 +303,7 @@ func (*ProvisioningStep) scanValues(columns []string) ([]interface{}, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the ProvisioningStep fields. -func (ps *ProvisioningStep) assignValues(columns []string, values []interface{}) error { +func (ps *ProvisioningStep) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -401,86 +397,94 @@ func (ps *ProvisioningStep) assignValues(columns []string, values []interface{}) ps.provisioning_step_provisioning_step_to_ansible = new(uuid.UUID) *ps.provisioning_step_provisioning_step_to_ansible = *value.S.(*uuid.UUID) } + default: + ps.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the ProvisioningStep. +// This includes values selected through modifiers, order, etc. +func (ps *ProvisioningStep) Value(name string) (ent.Value, error) { + return ps.selectValues.Get(name) +} + // QueryProvisioningStepToStatus queries the "ProvisioningStepToStatus" edge of the ProvisioningStep entity. func (ps *ProvisioningStep) QueryProvisioningStepToStatus() *StatusQuery { - return (&ProvisioningStepClient{config: ps.config}).QueryProvisioningStepToStatus(ps) + return NewProvisioningStepClient(ps.config).QueryProvisioningStepToStatus(ps) } // QueryProvisioningStepToProvisionedHost queries the "ProvisioningStepToProvisionedHost" edge of the ProvisioningStep entity. func (ps *ProvisioningStep) QueryProvisioningStepToProvisionedHost() *ProvisionedHostQuery { - return (&ProvisioningStepClient{config: ps.config}).QueryProvisioningStepToProvisionedHost(ps) + return NewProvisioningStepClient(ps.config).QueryProvisioningStepToProvisionedHost(ps) } // QueryProvisioningStepToScript queries the "ProvisioningStepToScript" edge of the ProvisioningStep entity. func (ps *ProvisioningStep) QueryProvisioningStepToScript() *ScriptQuery { - return (&ProvisioningStepClient{config: ps.config}).QueryProvisioningStepToScript(ps) + return NewProvisioningStepClient(ps.config).QueryProvisioningStepToScript(ps) } // QueryProvisioningStepToCommand queries the "ProvisioningStepToCommand" edge of the ProvisioningStep entity. func (ps *ProvisioningStep) QueryProvisioningStepToCommand() *CommandQuery { - return (&ProvisioningStepClient{config: ps.config}).QueryProvisioningStepToCommand(ps) + return NewProvisioningStepClient(ps.config).QueryProvisioningStepToCommand(ps) } // QueryProvisioningStepToDNSRecord queries the "ProvisioningStepToDNSRecord" edge of the ProvisioningStep entity. func (ps *ProvisioningStep) QueryProvisioningStepToDNSRecord() *DNSRecordQuery { - return (&ProvisioningStepClient{config: ps.config}).QueryProvisioningStepToDNSRecord(ps) + return NewProvisioningStepClient(ps.config).QueryProvisioningStepToDNSRecord(ps) } // QueryProvisioningStepToFileDelete queries the "ProvisioningStepToFileDelete" edge of the ProvisioningStep entity. func (ps *ProvisioningStep) QueryProvisioningStepToFileDelete() *FileDeleteQuery { - return (&ProvisioningStepClient{config: ps.config}).QueryProvisioningStepToFileDelete(ps) + return NewProvisioningStepClient(ps.config).QueryProvisioningStepToFileDelete(ps) } // QueryProvisioningStepToFileDownload queries the "ProvisioningStepToFileDownload" edge of the ProvisioningStep entity. func (ps *ProvisioningStep) QueryProvisioningStepToFileDownload() *FileDownloadQuery { - return (&ProvisioningStepClient{config: ps.config}).QueryProvisioningStepToFileDownload(ps) + return NewProvisioningStepClient(ps.config).QueryProvisioningStepToFileDownload(ps) } // QueryProvisioningStepToFileExtract queries the "ProvisioningStepToFileExtract" edge of the ProvisioningStep entity. func (ps *ProvisioningStep) QueryProvisioningStepToFileExtract() *FileExtractQuery { - return (&ProvisioningStepClient{config: ps.config}).QueryProvisioningStepToFileExtract(ps) + return NewProvisioningStepClient(ps.config).QueryProvisioningStepToFileExtract(ps) } // QueryProvisioningStepToAnsible queries the "ProvisioningStepToAnsible" edge of the ProvisioningStep entity. func (ps *ProvisioningStep) QueryProvisioningStepToAnsible() *AnsibleQuery { - return (&ProvisioningStepClient{config: ps.config}).QueryProvisioningStepToAnsible(ps) + return NewProvisioningStepClient(ps.config).QueryProvisioningStepToAnsible(ps) } // QueryProvisioningStepToPlan queries the "ProvisioningStepToPlan" edge of the ProvisioningStep entity. func (ps *ProvisioningStep) QueryProvisioningStepToPlan() *PlanQuery { - return (&ProvisioningStepClient{config: ps.config}).QueryProvisioningStepToPlan(ps) + return NewProvisioningStepClient(ps.config).QueryProvisioningStepToPlan(ps) } // QueryProvisioningStepToAgentTask queries the "ProvisioningStepToAgentTask" edge of the ProvisioningStep entity. func (ps *ProvisioningStep) QueryProvisioningStepToAgentTask() *AgentTaskQuery { - return (&ProvisioningStepClient{config: ps.config}).QueryProvisioningStepToAgentTask(ps) + return NewProvisioningStepClient(ps.config).QueryProvisioningStepToAgentTask(ps) } // QueryProvisioningStepToGinFileMiddleware queries the "ProvisioningStepToGinFileMiddleware" edge of the ProvisioningStep entity. func (ps *ProvisioningStep) QueryProvisioningStepToGinFileMiddleware() *GinFileMiddlewareQuery { - return (&ProvisioningStepClient{config: ps.config}).QueryProvisioningStepToGinFileMiddleware(ps) + return NewProvisioningStepClient(ps.config).QueryProvisioningStepToGinFileMiddleware(ps) } // Update returns a builder for updating this ProvisioningStep. // Note that you need to call ProvisioningStep.Unwrap() before calling this method if this ProvisioningStep // was returned from a transaction, and the transaction was committed or rolled back. func (ps *ProvisioningStep) Update() *ProvisioningStepUpdateOne { - return (&ProvisioningStepClient{config: ps.config}).UpdateOne(ps) + return NewProvisioningStepClient(ps.config).UpdateOne(ps) } // Unwrap unwraps the ProvisioningStep entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. func (ps *ProvisioningStep) Unwrap() *ProvisioningStep { - tx, ok := ps.config.driver.(*txDriver) + _tx, ok := ps.config.driver.(*txDriver) if !ok { panic("ent: ProvisioningStep is not a transactional entity") } - ps.config.driver = tx.drv + ps.config.driver = _tx.drv return ps } @@ -488,20 +492,39 @@ func (ps *ProvisioningStep) Unwrap() *ProvisioningStep { func (ps *ProvisioningStep) String() string { var builder strings.Builder builder.WriteString("ProvisioningStep(") - builder.WriteString(fmt.Sprintf("id=%v", ps.ID)) - builder.WriteString(", type=") + builder.WriteString(fmt.Sprintf("id=%v, ", ps.ID)) + builder.WriteString("type=") builder.WriteString(fmt.Sprintf("%v", ps.Type)) - builder.WriteString(", step_number=") + builder.WriteString(", ") + builder.WriteString("step_number=") builder.WriteString(fmt.Sprintf("%v", ps.StepNumber)) builder.WriteByte(')') return builder.String() } -// ProvisioningSteps is a parsable slice of ProvisioningStep. -type ProvisioningSteps []*ProvisioningStep +// NamedProvisioningStepToAgentTask returns the ProvisioningStepToAgentTask named value or an error if the edge was not +// loaded in eager-loading with this name. +func (ps *ProvisioningStep) NamedProvisioningStepToAgentTask(name string) ([]*AgentTask, error) { + if ps.Edges.namedProvisioningStepToAgentTask == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := ps.Edges.namedProvisioningStepToAgentTask[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} -func (ps ProvisioningSteps) config(cfg config) { - for _i := range ps { - ps[_i].config = cfg +func (ps *ProvisioningStep) appendNamedProvisioningStepToAgentTask(name string, edges ...*AgentTask) { + if ps.Edges.namedProvisioningStepToAgentTask == nil { + ps.Edges.namedProvisioningStepToAgentTask = make(map[string][]*AgentTask) + } + if len(edges) == 0 { + ps.Edges.namedProvisioningStepToAgentTask[name] = []*AgentTask{} + } else { + ps.Edges.namedProvisioningStepToAgentTask[name] = append(ps.Edges.namedProvisioningStepToAgentTask[name], edges...) } } + +// ProvisioningSteps is a parsable slice of ProvisioningStep. +type ProvisioningSteps []*ProvisioningStep diff --git a/ent/provisioningstep/provisioningstep.go b/ent/provisioningstep/provisioningstep.go index 6359e33a..521ca457 100755 --- a/ent/provisioningstep/provisioningstep.go +++ b/ent/provisioningstep/provisioningstep.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package provisioningstep @@ -7,6 +7,8 @@ import ( "io" "strconv" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -201,19 +203,212 @@ func TypeValidator(_type Type) error { } } +// OrderOption defines the ordering options for the ProvisioningStep queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByType orders the results by the type field. +func ByType(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldType, opts...).ToFunc() +} + +// ByStepNumber orders the results by the step_number field. +func ByStepNumber(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStepNumber, opts...).ToFunc() +} + +// ByProvisioningStepToStatusField orders the results by ProvisioningStepToStatus field. +func ByProvisioningStepToStatusField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newProvisioningStepToStatusStep(), sql.OrderByField(field, opts...)) + } +} + +// ByProvisioningStepToProvisionedHostField orders the results by ProvisioningStepToProvisionedHost field. +func ByProvisioningStepToProvisionedHostField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newProvisioningStepToProvisionedHostStep(), sql.OrderByField(field, opts...)) + } +} + +// ByProvisioningStepToScriptField orders the results by ProvisioningStepToScript field. +func ByProvisioningStepToScriptField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newProvisioningStepToScriptStep(), sql.OrderByField(field, opts...)) + } +} + +// ByProvisioningStepToCommandField orders the results by ProvisioningStepToCommand field. +func ByProvisioningStepToCommandField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newProvisioningStepToCommandStep(), sql.OrderByField(field, opts...)) + } +} + +// ByProvisioningStepToDNSRecordField orders the results by ProvisioningStepToDNSRecord field. +func ByProvisioningStepToDNSRecordField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newProvisioningStepToDNSRecordStep(), sql.OrderByField(field, opts...)) + } +} + +// ByProvisioningStepToFileDeleteField orders the results by ProvisioningStepToFileDelete field. +func ByProvisioningStepToFileDeleteField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newProvisioningStepToFileDeleteStep(), sql.OrderByField(field, opts...)) + } +} + +// ByProvisioningStepToFileDownloadField orders the results by ProvisioningStepToFileDownload field. +func ByProvisioningStepToFileDownloadField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newProvisioningStepToFileDownloadStep(), sql.OrderByField(field, opts...)) + } +} + +// ByProvisioningStepToFileExtractField orders the results by ProvisioningStepToFileExtract field. +func ByProvisioningStepToFileExtractField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newProvisioningStepToFileExtractStep(), sql.OrderByField(field, opts...)) + } +} + +// ByProvisioningStepToAnsibleField orders the results by ProvisioningStepToAnsible field. +func ByProvisioningStepToAnsibleField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newProvisioningStepToAnsibleStep(), sql.OrderByField(field, opts...)) + } +} + +// ByProvisioningStepToPlanField orders the results by ProvisioningStepToPlan field. +func ByProvisioningStepToPlanField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newProvisioningStepToPlanStep(), sql.OrderByField(field, opts...)) + } +} + +// ByProvisioningStepToAgentTaskCount orders the results by ProvisioningStepToAgentTask count. +func ByProvisioningStepToAgentTaskCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newProvisioningStepToAgentTaskStep(), opts...) + } +} + +// ByProvisioningStepToAgentTask orders the results by ProvisioningStepToAgentTask terms. +func ByProvisioningStepToAgentTask(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newProvisioningStepToAgentTaskStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByProvisioningStepToGinFileMiddlewareField orders the results by ProvisioningStepToGinFileMiddleware field. +func ByProvisioningStepToGinFileMiddlewareField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newProvisioningStepToGinFileMiddlewareStep(), sql.OrderByField(field, opts...)) + } +} +func newProvisioningStepToStatusStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ProvisioningStepToStatusInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2O, false, ProvisioningStepToStatusTable, ProvisioningStepToStatusColumn), + ) +} +func newProvisioningStepToProvisionedHostStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ProvisioningStepToProvisionedHostInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, ProvisioningStepToProvisionedHostTable, ProvisioningStepToProvisionedHostColumn), + ) +} +func newProvisioningStepToScriptStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ProvisioningStepToScriptInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, ProvisioningStepToScriptTable, ProvisioningStepToScriptColumn), + ) +} +func newProvisioningStepToCommandStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ProvisioningStepToCommandInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, ProvisioningStepToCommandTable, ProvisioningStepToCommandColumn), + ) +} +func newProvisioningStepToDNSRecordStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ProvisioningStepToDNSRecordInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, ProvisioningStepToDNSRecordTable, ProvisioningStepToDNSRecordColumn), + ) +} +func newProvisioningStepToFileDeleteStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ProvisioningStepToFileDeleteInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, ProvisioningStepToFileDeleteTable, ProvisioningStepToFileDeleteColumn), + ) +} +func newProvisioningStepToFileDownloadStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ProvisioningStepToFileDownloadInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, ProvisioningStepToFileDownloadTable, ProvisioningStepToFileDownloadColumn), + ) +} +func newProvisioningStepToFileExtractStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ProvisioningStepToFileExtractInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, ProvisioningStepToFileExtractTable, ProvisioningStepToFileExtractColumn), + ) +} +func newProvisioningStepToAnsibleStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ProvisioningStepToAnsibleInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, ProvisioningStepToAnsibleTable, ProvisioningStepToAnsibleColumn), + ) +} +func newProvisioningStepToPlanStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ProvisioningStepToPlanInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2O, true, ProvisioningStepToPlanTable, ProvisioningStepToPlanColumn), + ) +} +func newProvisioningStepToAgentTaskStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ProvisioningStepToAgentTaskInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, ProvisioningStepToAgentTaskTable, ProvisioningStepToAgentTaskColumn), + ) +} +func newProvisioningStepToGinFileMiddlewareStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ProvisioningStepToGinFileMiddlewareInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2O, true, ProvisioningStepToGinFileMiddlewareTable, ProvisioningStepToGinFileMiddlewareColumn), + ) +} + // MarshalGQL implements graphql.Marshaler interface. -func (_type Type) MarshalGQL(w io.Writer) { - io.WriteString(w, strconv.Quote(_type.String())) +func (e Type) MarshalGQL(w io.Writer) { + io.WriteString(w, strconv.Quote(e.String())) } // UnmarshalGQL implements graphql.Unmarshaler interface. -func (_type *Type) UnmarshalGQL(val interface{}) error { +func (e *Type) UnmarshalGQL(val interface{}) error { str, ok := val.(string) if !ok { return fmt.Errorf("enum %T must be a string", val) } - *_type = Type(str) - if err := TypeValidator(*_type); err != nil { + *e = Type(str) + if err := TypeValidator(*e); err != nil { return fmt.Errorf("%s is not a valid Type", str) } return nil diff --git a/ent/provisioningstep/where.go b/ent/provisioningstep/where.go index b9961070..6cea57f3 100755 --- a/ent/provisioningstep/where.go +++ b/ent/provisioningstep/where.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package provisioningstep @@ -11,216 +11,112 @@ import ( // ID filters vertices based on their ID field. func ID(id uuid.UUID) predicate.ProvisioningStep { - return predicate.ProvisioningStep(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.ProvisioningStep(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id uuid.UUID) predicate.ProvisioningStep { - return predicate.ProvisioningStep(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.ProvisioningStep(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id uuid.UUID) predicate.ProvisioningStep { - return predicate.ProvisioningStep(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.ProvisioningStep(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...uuid.UUID) predicate.ProvisioningStep { - return predicate.ProvisioningStep(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.ProvisioningStep(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...uuid.UUID) predicate.ProvisioningStep { - return predicate.ProvisioningStep(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.ProvisioningStep(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id uuid.UUID) predicate.ProvisioningStep { - return predicate.ProvisioningStep(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.ProvisioningStep(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id uuid.UUID) predicate.ProvisioningStep { - return predicate.ProvisioningStep(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.ProvisioningStep(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id uuid.UUID) predicate.ProvisioningStep { - return predicate.ProvisioningStep(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.ProvisioningStep(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id uuid.UUID) predicate.ProvisioningStep { - return predicate.ProvisioningStep(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.ProvisioningStep(sql.FieldLTE(FieldID, id)) } // StepNumber applies equality check predicate on the "step_number" field. It's identical to StepNumberEQ. func StepNumber(v int) predicate.ProvisioningStep { - return predicate.ProvisioningStep(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldStepNumber), v)) - }) + return predicate.ProvisioningStep(sql.FieldEQ(FieldStepNumber, v)) } // TypeEQ applies the EQ predicate on the "type" field. func TypeEQ(v Type) predicate.ProvisioningStep { - return predicate.ProvisioningStep(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldType), v)) - }) + return predicate.ProvisioningStep(sql.FieldEQ(FieldType, v)) } // TypeNEQ applies the NEQ predicate on the "type" field. func TypeNEQ(v Type) predicate.ProvisioningStep { - return predicate.ProvisioningStep(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldType), v)) - }) + return predicate.ProvisioningStep(sql.FieldNEQ(FieldType, v)) } // TypeIn applies the In predicate on the "type" field. func TypeIn(vs ...Type) predicate.ProvisioningStep { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.ProvisioningStep(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldType), v...)) - }) + return predicate.ProvisioningStep(sql.FieldIn(FieldType, vs...)) } // TypeNotIn applies the NotIn predicate on the "type" field. func TypeNotIn(vs ...Type) predicate.ProvisioningStep { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.ProvisioningStep(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldType), v...)) - }) + return predicate.ProvisioningStep(sql.FieldNotIn(FieldType, vs...)) } // StepNumberEQ applies the EQ predicate on the "step_number" field. func StepNumberEQ(v int) predicate.ProvisioningStep { - return predicate.ProvisioningStep(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldStepNumber), v)) - }) + return predicate.ProvisioningStep(sql.FieldEQ(FieldStepNumber, v)) } // StepNumberNEQ applies the NEQ predicate on the "step_number" field. func StepNumberNEQ(v int) predicate.ProvisioningStep { - return predicate.ProvisioningStep(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldStepNumber), v)) - }) + return predicate.ProvisioningStep(sql.FieldNEQ(FieldStepNumber, v)) } // StepNumberIn applies the In predicate on the "step_number" field. func StepNumberIn(vs ...int) predicate.ProvisioningStep { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.ProvisioningStep(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldStepNumber), v...)) - }) + return predicate.ProvisioningStep(sql.FieldIn(FieldStepNumber, vs...)) } // StepNumberNotIn applies the NotIn predicate on the "step_number" field. func StepNumberNotIn(vs ...int) predicate.ProvisioningStep { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.ProvisioningStep(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldStepNumber), v...)) - }) + return predicate.ProvisioningStep(sql.FieldNotIn(FieldStepNumber, vs...)) } // StepNumberGT applies the GT predicate on the "step_number" field. func StepNumberGT(v int) predicate.ProvisioningStep { - return predicate.ProvisioningStep(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldStepNumber), v)) - }) + return predicate.ProvisioningStep(sql.FieldGT(FieldStepNumber, v)) } // StepNumberGTE applies the GTE predicate on the "step_number" field. func StepNumberGTE(v int) predicate.ProvisioningStep { - return predicate.ProvisioningStep(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldStepNumber), v)) - }) + return predicate.ProvisioningStep(sql.FieldGTE(FieldStepNumber, v)) } // StepNumberLT applies the LT predicate on the "step_number" field. func StepNumberLT(v int) predicate.ProvisioningStep { - return predicate.ProvisioningStep(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldStepNumber), v)) - }) + return predicate.ProvisioningStep(sql.FieldLT(FieldStepNumber, v)) } // StepNumberLTE applies the LTE predicate on the "step_number" field. func StepNumberLTE(v int) predicate.ProvisioningStep { - return predicate.ProvisioningStep(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldStepNumber), v)) - }) + return predicate.ProvisioningStep(sql.FieldLTE(FieldStepNumber, v)) } // HasProvisioningStepToStatus applies the HasEdge predicate on the "ProvisioningStepToStatus" edge. @@ -228,7 +124,6 @@ func HasProvisioningStepToStatus() predicate.ProvisioningStep { return predicate.ProvisioningStep(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisioningStepToStatusTable, FieldID), sqlgraph.Edge(sqlgraph.O2O, false, ProvisioningStepToStatusTable, ProvisioningStepToStatusColumn), ) sqlgraph.HasNeighbors(s, step) @@ -238,11 +133,7 @@ func HasProvisioningStepToStatus() predicate.ProvisioningStep { // HasProvisioningStepToStatusWith applies the HasEdge predicate on the "ProvisioningStepToStatus" edge with a given conditions (other predicates). func HasProvisioningStepToStatusWith(preds ...predicate.Status) predicate.ProvisioningStep { return predicate.ProvisioningStep(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisioningStepToStatusInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2O, false, ProvisioningStepToStatusTable, ProvisioningStepToStatusColumn), - ) + step := newProvisioningStepToStatusStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -256,7 +147,6 @@ func HasProvisioningStepToProvisionedHost() predicate.ProvisioningStep { return predicate.ProvisioningStep(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisioningStepToProvisionedHostTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, false, ProvisioningStepToProvisionedHostTable, ProvisioningStepToProvisionedHostColumn), ) sqlgraph.HasNeighbors(s, step) @@ -266,11 +156,7 @@ func HasProvisioningStepToProvisionedHost() predicate.ProvisioningStep { // HasProvisioningStepToProvisionedHostWith applies the HasEdge predicate on the "ProvisioningStepToProvisionedHost" edge with a given conditions (other predicates). func HasProvisioningStepToProvisionedHostWith(preds ...predicate.ProvisionedHost) predicate.ProvisioningStep { return predicate.ProvisioningStep(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisioningStepToProvisionedHostInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, ProvisioningStepToProvisionedHostTable, ProvisioningStepToProvisionedHostColumn), - ) + step := newProvisioningStepToProvisionedHostStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -284,7 +170,6 @@ func HasProvisioningStepToScript() predicate.ProvisioningStep { return predicate.ProvisioningStep(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisioningStepToScriptTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, false, ProvisioningStepToScriptTable, ProvisioningStepToScriptColumn), ) sqlgraph.HasNeighbors(s, step) @@ -294,11 +179,7 @@ func HasProvisioningStepToScript() predicate.ProvisioningStep { // HasProvisioningStepToScriptWith applies the HasEdge predicate on the "ProvisioningStepToScript" edge with a given conditions (other predicates). func HasProvisioningStepToScriptWith(preds ...predicate.Script) predicate.ProvisioningStep { return predicate.ProvisioningStep(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisioningStepToScriptInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, ProvisioningStepToScriptTable, ProvisioningStepToScriptColumn), - ) + step := newProvisioningStepToScriptStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -312,7 +193,6 @@ func HasProvisioningStepToCommand() predicate.ProvisioningStep { return predicate.ProvisioningStep(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisioningStepToCommandTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, false, ProvisioningStepToCommandTable, ProvisioningStepToCommandColumn), ) sqlgraph.HasNeighbors(s, step) @@ -322,11 +202,7 @@ func HasProvisioningStepToCommand() predicate.ProvisioningStep { // HasProvisioningStepToCommandWith applies the HasEdge predicate on the "ProvisioningStepToCommand" edge with a given conditions (other predicates). func HasProvisioningStepToCommandWith(preds ...predicate.Command) predicate.ProvisioningStep { return predicate.ProvisioningStep(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisioningStepToCommandInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, ProvisioningStepToCommandTable, ProvisioningStepToCommandColumn), - ) + step := newProvisioningStepToCommandStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -340,7 +216,6 @@ func HasProvisioningStepToDNSRecord() predicate.ProvisioningStep { return predicate.ProvisioningStep(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisioningStepToDNSRecordTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, false, ProvisioningStepToDNSRecordTable, ProvisioningStepToDNSRecordColumn), ) sqlgraph.HasNeighbors(s, step) @@ -350,11 +225,7 @@ func HasProvisioningStepToDNSRecord() predicate.ProvisioningStep { // HasProvisioningStepToDNSRecordWith applies the HasEdge predicate on the "ProvisioningStepToDNSRecord" edge with a given conditions (other predicates). func HasProvisioningStepToDNSRecordWith(preds ...predicate.DNSRecord) predicate.ProvisioningStep { return predicate.ProvisioningStep(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisioningStepToDNSRecordInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, ProvisioningStepToDNSRecordTable, ProvisioningStepToDNSRecordColumn), - ) + step := newProvisioningStepToDNSRecordStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -368,7 +239,6 @@ func HasProvisioningStepToFileDelete() predicate.ProvisioningStep { return predicate.ProvisioningStep(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisioningStepToFileDeleteTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, false, ProvisioningStepToFileDeleteTable, ProvisioningStepToFileDeleteColumn), ) sqlgraph.HasNeighbors(s, step) @@ -378,11 +248,7 @@ func HasProvisioningStepToFileDelete() predicate.ProvisioningStep { // HasProvisioningStepToFileDeleteWith applies the HasEdge predicate on the "ProvisioningStepToFileDelete" edge with a given conditions (other predicates). func HasProvisioningStepToFileDeleteWith(preds ...predicate.FileDelete) predicate.ProvisioningStep { return predicate.ProvisioningStep(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisioningStepToFileDeleteInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, ProvisioningStepToFileDeleteTable, ProvisioningStepToFileDeleteColumn), - ) + step := newProvisioningStepToFileDeleteStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -396,7 +262,6 @@ func HasProvisioningStepToFileDownload() predicate.ProvisioningStep { return predicate.ProvisioningStep(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisioningStepToFileDownloadTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, false, ProvisioningStepToFileDownloadTable, ProvisioningStepToFileDownloadColumn), ) sqlgraph.HasNeighbors(s, step) @@ -406,11 +271,7 @@ func HasProvisioningStepToFileDownload() predicate.ProvisioningStep { // HasProvisioningStepToFileDownloadWith applies the HasEdge predicate on the "ProvisioningStepToFileDownload" edge with a given conditions (other predicates). func HasProvisioningStepToFileDownloadWith(preds ...predicate.FileDownload) predicate.ProvisioningStep { return predicate.ProvisioningStep(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisioningStepToFileDownloadInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, ProvisioningStepToFileDownloadTable, ProvisioningStepToFileDownloadColumn), - ) + step := newProvisioningStepToFileDownloadStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -424,7 +285,6 @@ func HasProvisioningStepToFileExtract() predicate.ProvisioningStep { return predicate.ProvisioningStep(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisioningStepToFileExtractTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, false, ProvisioningStepToFileExtractTable, ProvisioningStepToFileExtractColumn), ) sqlgraph.HasNeighbors(s, step) @@ -434,11 +294,7 @@ func HasProvisioningStepToFileExtract() predicate.ProvisioningStep { // HasProvisioningStepToFileExtractWith applies the HasEdge predicate on the "ProvisioningStepToFileExtract" edge with a given conditions (other predicates). func HasProvisioningStepToFileExtractWith(preds ...predicate.FileExtract) predicate.ProvisioningStep { return predicate.ProvisioningStep(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisioningStepToFileExtractInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, ProvisioningStepToFileExtractTable, ProvisioningStepToFileExtractColumn), - ) + step := newProvisioningStepToFileExtractStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -452,7 +308,6 @@ func HasProvisioningStepToAnsible() predicate.ProvisioningStep { return predicate.ProvisioningStep(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisioningStepToAnsibleTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, false, ProvisioningStepToAnsibleTable, ProvisioningStepToAnsibleColumn), ) sqlgraph.HasNeighbors(s, step) @@ -462,11 +317,7 @@ func HasProvisioningStepToAnsible() predicate.ProvisioningStep { // HasProvisioningStepToAnsibleWith applies the HasEdge predicate on the "ProvisioningStepToAnsible" edge with a given conditions (other predicates). func HasProvisioningStepToAnsibleWith(preds ...predicate.Ansible) predicate.ProvisioningStep { return predicate.ProvisioningStep(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisioningStepToAnsibleInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, ProvisioningStepToAnsibleTable, ProvisioningStepToAnsibleColumn), - ) + step := newProvisioningStepToAnsibleStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -480,7 +331,6 @@ func HasProvisioningStepToPlan() predicate.ProvisioningStep { return predicate.ProvisioningStep(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisioningStepToPlanTable, FieldID), sqlgraph.Edge(sqlgraph.O2O, true, ProvisioningStepToPlanTable, ProvisioningStepToPlanColumn), ) sqlgraph.HasNeighbors(s, step) @@ -490,11 +340,7 @@ func HasProvisioningStepToPlan() predicate.ProvisioningStep { // HasProvisioningStepToPlanWith applies the HasEdge predicate on the "ProvisioningStepToPlan" edge with a given conditions (other predicates). func HasProvisioningStepToPlanWith(preds ...predicate.Plan) predicate.ProvisioningStep { return predicate.ProvisioningStep(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisioningStepToPlanInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2O, true, ProvisioningStepToPlanTable, ProvisioningStepToPlanColumn), - ) + step := newProvisioningStepToPlanStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -508,7 +354,6 @@ func HasProvisioningStepToAgentTask() predicate.ProvisioningStep { return predicate.ProvisioningStep(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisioningStepToAgentTaskTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, true, ProvisioningStepToAgentTaskTable, ProvisioningStepToAgentTaskColumn), ) sqlgraph.HasNeighbors(s, step) @@ -518,11 +363,7 @@ func HasProvisioningStepToAgentTask() predicate.ProvisioningStep { // HasProvisioningStepToAgentTaskWith applies the HasEdge predicate on the "ProvisioningStepToAgentTask" edge with a given conditions (other predicates). func HasProvisioningStepToAgentTaskWith(preds ...predicate.AgentTask) predicate.ProvisioningStep { return predicate.ProvisioningStep(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisioningStepToAgentTaskInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, true, ProvisioningStepToAgentTaskTable, ProvisioningStepToAgentTaskColumn), - ) + step := newProvisioningStepToAgentTaskStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -536,7 +377,6 @@ func HasProvisioningStepToGinFileMiddleware() predicate.ProvisioningStep { return predicate.ProvisioningStep(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisioningStepToGinFileMiddlewareTable, FieldID), sqlgraph.Edge(sqlgraph.O2O, true, ProvisioningStepToGinFileMiddlewareTable, ProvisioningStepToGinFileMiddlewareColumn), ) sqlgraph.HasNeighbors(s, step) @@ -546,11 +386,7 @@ func HasProvisioningStepToGinFileMiddleware() predicate.ProvisioningStep { // HasProvisioningStepToGinFileMiddlewareWith applies the HasEdge predicate on the "ProvisioningStepToGinFileMiddleware" edge with a given conditions (other predicates). func HasProvisioningStepToGinFileMiddlewareWith(preds ...predicate.GinFileMiddleware) predicate.ProvisioningStep { return predicate.ProvisioningStep(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(ProvisioningStepToGinFileMiddlewareInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2O, true, ProvisioningStepToGinFileMiddlewareTable, ProvisioningStepToGinFileMiddlewareColumn), - ) + step := newProvisioningStepToGinFileMiddlewareStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -561,32 +397,15 @@ func HasProvisioningStepToGinFileMiddlewareWith(preds ...predicate.GinFileMiddle // And groups predicates with the AND operator between them. func And(predicates ...predicate.ProvisioningStep) predicate.ProvisioningStep { - return predicate.ProvisioningStep(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.ProvisioningStep(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.ProvisioningStep) predicate.ProvisioningStep { - return predicate.ProvisioningStep(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.ProvisioningStep(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.ProvisioningStep) predicate.ProvisioningStep { - return predicate.ProvisioningStep(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.ProvisioningStep(sql.NotPredicates(p)) } diff --git a/ent/provisioningstep_create.go b/ent/provisioningstep_create.go index b97c0c11..d95e7927 100755 --- a/ent/provisioningstep_create.go +++ b/ent/provisioningstep_create.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -289,44 +289,8 @@ func (psc *ProvisioningStepCreate) Mutation() *ProvisioningStepMutation { // Save creates the ProvisioningStep in the database. func (psc *ProvisioningStepCreate) Save(ctx context.Context) (*ProvisioningStep, error) { - var ( - err error - node *ProvisioningStep - ) psc.defaults() - if len(psc.hooks) == 0 { - if err = psc.check(); err != nil { - return nil, err - } - node, err = psc.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*ProvisioningStepMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = psc.check(); err != nil { - return nil, err - } - psc.mutation = mutation - if node, err = psc.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(psc.hooks) - 1; i >= 0; i-- { - if psc.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = psc.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, psc.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, psc.sqlSave, psc.mutation, psc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -376,10 +340,13 @@ func (psc *ProvisioningStepCreate) check() error { } func (psc *ProvisioningStepCreate) sqlSave(ctx context.Context) (*ProvisioningStep, error) { + if err := psc.check(); err != nil { + return nil, err + } _node, _spec := psc.createSpec() if err := sqlgraph.CreateNode(ctx, psc.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } @@ -390,38 +357,26 @@ func (psc *ProvisioningStepCreate) sqlSave(ctx context.Context) (*ProvisioningSt return nil, err } } + psc.mutation.id = &_node.ID + psc.mutation.done = true return _node, nil } func (psc *ProvisioningStepCreate) createSpec() (*ProvisioningStep, *sqlgraph.CreateSpec) { var ( _node = &ProvisioningStep{config: psc.config} - _spec = &sqlgraph.CreateSpec{ - Table: provisioningstep.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisioningstep.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(provisioningstep.Table, sqlgraph.NewFieldSpec(provisioningstep.FieldID, field.TypeUUID)) ) if id, ok := psc.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } if value, ok := psc.mutation.GetType(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: provisioningstep.FieldType, - }) + _spec.SetField(provisioningstep.FieldType, field.TypeEnum, value) _node.Type = value } if value, ok := psc.mutation.StepNumber(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: provisioningstep.FieldStepNumber, - }) + _spec.SetField(provisioningstep.FieldStepNumber, field.TypeInt, value) _node.StepNumber = value } if nodes := psc.mutation.ProvisioningStepToStatusIDs(); len(nodes) > 0 { @@ -432,10 +387,7 @@ func (psc *ProvisioningStepCreate) createSpec() (*ProvisioningStep, *sqlgraph.Cr Columns: []string{provisioningstep.ProvisioningStepToStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -451,10 +403,7 @@ func (psc *ProvisioningStepCreate) createSpec() (*ProvisioningStep, *sqlgraph.Cr Columns: []string{provisioningstep.ProvisioningStepToProvisionedHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionedhost.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionedhost.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -471,10 +420,7 @@ func (psc *ProvisioningStepCreate) createSpec() (*ProvisioningStep, *sqlgraph.Cr Columns: []string{provisioningstep.ProvisioningStepToScriptColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: script.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(script.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -491,10 +437,7 @@ func (psc *ProvisioningStepCreate) createSpec() (*ProvisioningStep, *sqlgraph.Cr Columns: []string{provisioningstep.ProvisioningStepToCommandColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: command.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(command.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -511,10 +454,7 @@ func (psc *ProvisioningStepCreate) createSpec() (*ProvisioningStep, *sqlgraph.Cr Columns: []string{provisioningstep.ProvisioningStepToDNSRecordColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: dnsrecord.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(dnsrecord.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -531,10 +471,7 @@ func (psc *ProvisioningStepCreate) createSpec() (*ProvisioningStep, *sqlgraph.Cr Columns: []string{provisioningstep.ProvisioningStepToFileDeleteColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: filedelete.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(filedelete.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -551,10 +488,7 @@ func (psc *ProvisioningStepCreate) createSpec() (*ProvisioningStep, *sqlgraph.Cr Columns: []string{provisioningstep.ProvisioningStepToFileDownloadColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: filedownload.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(filedownload.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -571,10 +505,7 @@ func (psc *ProvisioningStepCreate) createSpec() (*ProvisioningStep, *sqlgraph.Cr Columns: []string{provisioningstep.ProvisioningStepToFileExtractColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: fileextract.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(fileextract.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -591,10 +522,7 @@ func (psc *ProvisioningStepCreate) createSpec() (*ProvisioningStep, *sqlgraph.Cr Columns: []string{provisioningstep.ProvisioningStepToAnsibleColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: ansible.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(ansible.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -611,10 +539,7 @@ func (psc *ProvisioningStepCreate) createSpec() (*ProvisioningStep, *sqlgraph.Cr Columns: []string{provisioningstep.ProvisioningStepToPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -631,10 +556,7 @@ func (psc *ProvisioningStepCreate) createSpec() (*ProvisioningStep, *sqlgraph.Cr Columns: []string{provisioningstep.ProvisioningStepToAgentTaskColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: agenttask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(agenttask.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -650,10 +572,7 @@ func (psc *ProvisioningStepCreate) createSpec() (*ProvisioningStep, *sqlgraph.Cr Columns: []string{provisioningstep.ProvisioningStepToGinFileMiddlewareColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: ginfilemiddleware.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(ginfilemiddleware.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -668,11 +587,15 @@ func (psc *ProvisioningStepCreate) createSpec() (*ProvisioningStep, *sqlgraph.Cr // ProvisioningStepCreateBulk is the builder for creating many ProvisioningStep entities in bulk. type ProvisioningStepCreateBulk struct { config + err error builders []*ProvisioningStepCreate } // Save creates the ProvisioningStep entities in the database. func (pscb *ProvisioningStepCreateBulk) Save(ctx context.Context) ([]*ProvisioningStep, error) { + if pscb.err != nil { + return nil, pscb.err + } specs := make([]*sqlgraph.CreateSpec, len(pscb.builders)) nodes := make([]*ProvisioningStep, len(pscb.builders)) mutators := make([]Mutator, len(pscb.builders)) @@ -689,8 +612,8 @@ func (pscb *ProvisioningStepCreateBulk) Save(ctx context.Context) ([]*Provisioni return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, pscb.builders[i+1].mutation) } else { @@ -698,7 +621,7 @@ func (pscb *ProvisioningStepCreateBulk) Save(ctx context.Context) ([]*Provisioni // Invoke the actual operation on the latest mutation in the chain. if err = sqlgraph.BatchCreate(ctx, pscb.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } } } diff --git a/ent/provisioningstep_delete.go b/ent/provisioningstep_delete.go index f3477825..0a61fc62 100755 --- a/ent/provisioningstep_delete.go +++ b/ent/provisioningstep_delete.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (psd *ProvisioningStepDelete) Where(ps ...predicate.ProvisioningStep) *Prov // Exec executes the deletion query and returns how many vertices were deleted. func (psd *ProvisioningStepDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(psd.hooks) == 0 { - affected, err = psd.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*ProvisioningStepMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - psd.mutation = mutation - affected, err = psd.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(psd.hooks) - 1; i >= 0; i-- { - if psd.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = psd.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, psd.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, psd.sqlExec, psd.mutation, psd.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (psd *ProvisioningStepDelete) ExecX(ctx context.Context) int { } func (psd *ProvisioningStepDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: provisioningstep.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisioningstep.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(provisioningstep.Table, sqlgraph.NewFieldSpec(provisioningstep.FieldID, field.TypeUUID)) if ps := psd.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -84,7 +48,12 @@ func (psd *ProvisioningStepDelete) sqlExec(ctx context.Context) (int, error) { } } } - return sqlgraph.DeleteNodes(ctx, psd.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, psd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + psd.mutation.done = true + return affected, err } // ProvisioningStepDeleteOne is the builder for deleting a single ProvisioningStep entity. @@ -92,6 +61,12 @@ type ProvisioningStepDeleteOne struct { psd *ProvisioningStepDelete } +// Where appends a list predicates to the ProvisioningStepDelete builder. +func (psdo *ProvisioningStepDeleteOne) Where(ps ...predicate.ProvisioningStep) *ProvisioningStepDeleteOne { + psdo.psd.mutation.Where(ps...) + return psdo +} + // Exec executes the deletion query. func (psdo *ProvisioningStepDeleteOne) Exec(ctx context.Context) error { n, err := psdo.psd.Exec(ctx) @@ -107,5 +82,7 @@ func (psdo *ProvisioningStepDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (psdo *ProvisioningStepDeleteOne) ExecX(ctx context.Context) { - psdo.psd.ExecX(ctx) + if err := psdo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/ent/provisioningstep_query.go b/ent/provisioningstep_query.go index 6e938e06..bc714d33 100755 --- a/ent/provisioningstep_query.go +++ b/ent/provisioningstep_query.go @@ -1,11 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" "database/sql/driver" - "errors" "fmt" "math" @@ -32,13 +31,10 @@ import ( // ProvisioningStepQuery is the builder for querying ProvisioningStep entities. type ProvisioningStepQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string - predicates []predicate.ProvisioningStep - // eager-loading edges. + ctx *QueryContext + order []provisioningstep.OrderOption + inters []Interceptor + predicates []predicate.ProvisioningStep withProvisioningStepToStatus *StatusQuery withProvisioningStepToProvisionedHost *ProvisionedHostQuery withProvisioningStepToScript *ScriptQuery @@ -52,6 +48,9 @@ type ProvisioningStepQuery struct { withProvisioningStepToAgentTask *AgentTaskQuery withProvisioningStepToGinFileMiddleware *GinFileMiddlewareQuery withFKs bool + modifiers []func(*sql.Selector) + loadTotal []func(context.Context, []*ProvisioningStep) error + withNamedProvisioningStepToAgentTask map[string]*AgentTaskQuery // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -63,34 +62,34 @@ func (psq *ProvisioningStepQuery) Where(ps ...predicate.ProvisioningStep) *Provi return psq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (psq *ProvisioningStepQuery) Limit(limit int) *ProvisioningStepQuery { - psq.limit = &limit + psq.ctx.Limit = &limit return psq } -// Offset adds an offset step to the query. +// Offset to start from. func (psq *ProvisioningStepQuery) Offset(offset int) *ProvisioningStepQuery { - psq.offset = &offset + psq.ctx.Offset = &offset return psq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (psq *ProvisioningStepQuery) Unique(unique bool) *ProvisioningStepQuery { - psq.unique = &unique + psq.ctx.Unique = &unique return psq } -// Order adds an order step to the query. -func (psq *ProvisioningStepQuery) Order(o ...OrderFunc) *ProvisioningStepQuery { +// Order specifies how the records should be ordered. +func (psq *ProvisioningStepQuery) Order(o ...provisioningstep.OrderOption) *ProvisioningStepQuery { psq.order = append(psq.order, o...) return psq } // QueryProvisioningStepToStatus chains the current query on the "ProvisioningStepToStatus" edge. func (psq *ProvisioningStepQuery) QueryProvisioningStepToStatus() *StatusQuery { - query := &StatusQuery{config: psq.config} + query := (&StatusClient{config: psq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := psq.prepareQuery(ctx); err != nil { return nil, err @@ -112,7 +111,7 @@ func (psq *ProvisioningStepQuery) QueryProvisioningStepToStatus() *StatusQuery { // QueryProvisioningStepToProvisionedHost chains the current query on the "ProvisioningStepToProvisionedHost" edge. func (psq *ProvisioningStepQuery) QueryProvisioningStepToProvisionedHost() *ProvisionedHostQuery { - query := &ProvisionedHostQuery{config: psq.config} + query := (&ProvisionedHostClient{config: psq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := psq.prepareQuery(ctx); err != nil { return nil, err @@ -134,7 +133,7 @@ func (psq *ProvisioningStepQuery) QueryProvisioningStepToProvisionedHost() *Prov // QueryProvisioningStepToScript chains the current query on the "ProvisioningStepToScript" edge. func (psq *ProvisioningStepQuery) QueryProvisioningStepToScript() *ScriptQuery { - query := &ScriptQuery{config: psq.config} + query := (&ScriptClient{config: psq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := psq.prepareQuery(ctx); err != nil { return nil, err @@ -156,7 +155,7 @@ func (psq *ProvisioningStepQuery) QueryProvisioningStepToScript() *ScriptQuery { // QueryProvisioningStepToCommand chains the current query on the "ProvisioningStepToCommand" edge. func (psq *ProvisioningStepQuery) QueryProvisioningStepToCommand() *CommandQuery { - query := &CommandQuery{config: psq.config} + query := (&CommandClient{config: psq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := psq.prepareQuery(ctx); err != nil { return nil, err @@ -178,7 +177,7 @@ func (psq *ProvisioningStepQuery) QueryProvisioningStepToCommand() *CommandQuery // QueryProvisioningStepToDNSRecord chains the current query on the "ProvisioningStepToDNSRecord" edge. func (psq *ProvisioningStepQuery) QueryProvisioningStepToDNSRecord() *DNSRecordQuery { - query := &DNSRecordQuery{config: psq.config} + query := (&DNSRecordClient{config: psq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := psq.prepareQuery(ctx); err != nil { return nil, err @@ -200,7 +199,7 @@ func (psq *ProvisioningStepQuery) QueryProvisioningStepToDNSRecord() *DNSRecordQ // QueryProvisioningStepToFileDelete chains the current query on the "ProvisioningStepToFileDelete" edge. func (psq *ProvisioningStepQuery) QueryProvisioningStepToFileDelete() *FileDeleteQuery { - query := &FileDeleteQuery{config: psq.config} + query := (&FileDeleteClient{config: psq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := psq.prepareQuery(ctx); err != nil { return nil, err @@ -222,7 +221,7 @@ func (psq *ProvisioningStepQuery) QueryProvisioningStepToFileDelete() *FileDelet // QueryProvisioningStepToFileDownload chains the current query on the "ProvisioningStepToFileDownload" edge. func (psq *ProvisioningStepQuery) QueryProvisioningStepToFileDownload() *FileDownloadQuery { - query := &FileDownloadQuery{config: psq.config} + query := (&FileDownloadClient{config: psq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := psq.prepareQuery(ctx); err != nil { return nil, err @@ -244,7 +243,7 @@ func (psq *ProvisioningStepQuery) QueryProvisioningStepToFileDownload() *FileDow // QueryProvisioningStepToFileExtract chains the current query on the "ProvisioningStepToFileExtract" edge. func (psq *ProvisioningStepQuery) QueryProvisioningStepToFileExtract() *FileExtractQuery { - query := &FileExtractQuery{config: psq.config} + query := (&FileExtractClient{config: psq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := psq.prepareQuery(ctx); err != nil { return nil, err @@ -266,7 +265,7 @@ func (psq *ProvisioningStepQuery) QueryProvisioningStepToFileExtract() *FileExtr // QueryProvisioningStepToAnsible chains the current query on the "ProvisioningStepToAnsible" edge. func (psq *ProvisioningStepQuery) QueryProvisioningStepToAnsible() *AnsibleQuery { - query := &AnsibleQuery{config: psq.config} + query := (&AnsibleClient{config: psq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := psq.prepareQuery(ctx); err != nil { return nil, err @@ -288,7 +287,7 @@ func (psq *ProvisioningStepQuery) QueryProvisioningStepToAnsible() *AnsibleQuery // QueryProvisioningStepToPlan chains the current query on the "ProvisioningStepToPlan" edge. func (psq *ProvisioningStepQuery) QueryProvisioningStepToPlan() *PlanQuery { - query := &PlanQuery{config: psq.config} + query := (&PlanClient{config: psq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := psq.prepareQuery(ctx); err != nil { return nil, err @@ -310,7 +309,7 @@ func (psq *ProvisioningStepQuery) QueryProvisioningStepToPlan() *PlanQuery { // QueryProvisioningStepToAgentTask chains the current query on the "ProvisioningStepToAgentTask" edge. func (psq *ProvisioningStepQuery) QueryProvisioningStepToAgentTask() *AgentTaskQuery { - query := &AgentTaskQuery{config: psq.config} + query := (&AgentTaskClient{config: psq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := psq.prepareQuery(ctx); err != nil { return nil, err @@ -332,7 +331,7 @@ func (psq *ProvisioningStepQuery) QueryProvisioningStepToAgentTask() *AgentTaskQ // QueryProvisioningStepToGinFileMiddleware chains the current query on the "ProvisioningStepToGinFileMiddleware" edge. func (psq *ProvisioningStepQuery) QueryProvisioningStepToGinFileMiddleware() *GinFileMiddlewareQuery { - query := &GinFileMiddlewareQuery{config: psq.config} + query := (&GinFileMiddlewareClient{config: psq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := psq.prepareQuery(ctx); err != nil { return nil, err @@ -355,7 +354,7 @@ func (psq *ProvisioningStepQuery) QueryProvisioningStepToGinFileMiddleware() *Gi // First returns the first ProvisioningStep entity from the query. // Returns a *NotFoundError when no ProvisioningStep was found. func (psq *ProvisioningStepQuery) First(ctx context.Context) (*ProvisioningStep, error) { - nodes, err := psq.Limit(1).All(ctx) + nodes, err := psq.Limit(1).All(setContextOp(ctx, psq.ctx, "First")) if err != nil { return nil, err } @@ -378,7 +377,7 @@ func (psq *ProvisioningStepQuery) FirstX(ctx context.Context) *ProvisioningStep // Returns a *NotFoundError when no ProvisioningStep ID was found. func (psq *ProvisioningStepQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = psq.Limit(1).IDs(ctx); err != nil { + if ids, err = psq.Limit(1).IDs(setContextOp(ctx, psq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -401,7 +400,7 @@ func (psq *ProvisioningStepQuery) FirstIDX(ctx context.Context) uuid.UUID { // Returns a *NotSingularError when more than one ProvisioningStep entity is found. // Returns a *NotFoundError when no ProvisioningStep entities are found. func (psq *ProvisioningStepQuery) Only(ctx context.Context) (*ProvisioningStep, error) { - nodes, err := psq.Limit(2).All(ctx) + nodes, err := psq.Limit(2).All(setContextOp(ctx, psq.ctx, "Only")) if err != nil { return nil, err } @@ -429,7 +428,7 @@ func (psq *ProvisioningStepQuery) OnlyX(ctx context.Context) *ProvisioningStep { // Returns a *NotFoundError when no entities are found. func (psq *ProvisioningStepQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = psq.Limit(2).IDs(ctx); err != nil { + if ids, err = psq.Limit(2).IDs(setContextOp(ctx, psq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -454,10 +453,12 @@ func (psq *ProvisioningStepQuery) OnlyIDX(ctx context.Context) uuid.UUID { // All executes the query and returns a list of ProvisioningSteps. func (psq *ProvisioningStepQuery) All(ctx context.Context) ([]*ProvisioningStep, error) { + ctx = setContextOp(ctx, psq.ctx, "All") if err := psq.prepareQuery(ctx); err != nil { return nil, err } - return psq.sqlAll(ctx) + qr := querierAll[[]*ProvisioningStep, *ProvisioningStepQuery]() + return withInterceptors[[]*ProvisioningStep](ctx, psq, qr, psq.inters) } // AllX is like All, but panics if an error occurs. @@ -470,9 +471,12 @@ func (psq *ProvisioningStepQuery) AllX(ctx context.Context) []*ProvisioningStep } // IDs executes the query and returns a list of ProvisioningStep IDs. -func (psq *ProvisioningStepQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { - var ids []uuid.UUID - if err := psq.Select(provisioningstep.FieldID).Scan(ctx, &ids); err != nil { +func (psq *ProvisioningStepQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if psq.ctx.Unique == nil && psq.path != nil { + psq.Unique(true) + } + ctx = setContextOp(ctx, psq.ctx, "IDs") + if err = psq.Select(provisioningstep.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -489,10 +493,11 @@ func (psq *ProvisioningStepQuery) IDsX(ctx context.Context) []uuid.UUID { // Count returns the count of the given query. func (psq *ProvisioningStepQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, psq.ctx, "Count") if err := psq.prepareQuery(ctx); err != nil { return 0, err } - return psq.sqlCount(ctx) + return withInterceptors[int](ctx, psq, querierCount[*ProvisioningStepQuery](), psq.inters) } // CountX is like Count, but panics if an error occurs. @@ -506,10 +511,15 @@ func (psq *ProvisioningStepQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (psq *ProvisioningStepQuery) Exist(ctx context.Context) (bool, error) { - if err := psq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, psq.ctx, "Exist") + switch _, err := psq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return psq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -529,9 +539,9 @@ func (psq *ProvisioningStepQuery) Clone() *ProvisioningStepQuery { } return &ProvisioningStepQuery{ config: psq.config, - limit: psq.limit, - offset: psq.offset, - order: append([]OrderFunc{}, psq.order...), + ctx: psq.ctx.Clone(), + order: append([]provisioningstep.OrderOption{}, psq.order...), + inters: append([]Interceptor{}, psq.inters...), predicates: append([]predicate.ProvisioningStep{}, psq.predicates...), withProvisioningStepToStatus: psq.withProvisioningStepToStatus.Clone(), withProvisioningStepToProvisionedHost: psq.withProvisioningStepToProvisionedHost.Clone(), @@ -546,16 +556,15 @@ func (psq *ProvisioningStepQuery) Clone() *ProvisioningStepQuery { withProvisioningStepToAgentTask: psq.withProvisioningStepToAgentTask.Clone(), withProvisioningStepToGinFileMiddleware: psq.withProvisioningStepToGinFileMiddleware.Clone(), // clone intermediate query. - sql: psq.sql.Clone(), - path: psq.path, - unique: psq.unique, + sql: psq.sql.Clone(), + path: psq.path, } } // WithProvisioningStepToStatus tells the query-builder to eager-load the nodes that are connected to // the "ProvisioningStepToStatus" edge. The optional arguments are used to configure the query builder of the edge. func (psq *ProvisioningStepQuery) WithProvisioningStepToStatus(opts ...func(*StatusQuery)) *ProvisioningStepQuery { - query := &StatusQuery{config: psq.config} + query := (&StatusClient{config: psq.config}).Query() for _, opt := range opts { opt(query) } @@ -566,7 +575,7 @@ func (psq *ProvisioningStepQuery) WithProvisioningStepToStatus(opts ...func(*Sta // WithProvisioningStepToProvisionedHost tells the query-builder to eager-load the nodes that are connected to // the "ProvisioningStepToProvisionedHost" edge. The optional arguments are used to configure the query builder of the edge. func (psq *ProvisioningStepQuery) WithProvisioningStepToProvisionedHost(opts ...func(*ProvisionedHostQuery)) *ProvisioningStepQuery { - query := &ProvisionedHostQuery{config: psq.config} + query := (&ProvisionedHostClient{config: psq.config}).Query() for _, opt := range opts { opt(query) } @@ -577,7 +586,7 @@ func (psq *ProvisioningStepQuery) WithProvisioningStepToProvisionedHost(opts ... // WithProvisioningStepToScript tells the query-builder to eager-load the nodes that are connected to // the "ProvisioningStepToScript" edge. The optional arguments are used to configure the query builder of the edge. func (psq *ProvisioningStepQuery) WithProvisioningStepToScript(opts ...func(*ScriptQuery)) *ProvisioningStepQuery { - query := &ScriptQuery{config: psq.config} + query := (&ScriptClient{config: psq.config}).Query() for _, opt := range opts { opt(query) } @@ -588,7 +597,7 @@ func (psq *ProvisioningStepQuery) WithProvisioningStepToScript(opts ...func(*Scr // WithProvisioningStepToCommand tells the query-builder to eager-load the nodes that are connected to // the "ProvisioningStepToCommand" edge. The optional arguments are used to configure the query builder of the edge. func (psq *ProvisioningStepQuery) WithProvisioningStepToCommand(opts ...func(*CommandQuery)) *ProvisioningStepQuery { - query := &CommandQuery{config: psq.config} + query := (&CommandClient{config: psq.config}).Query() for _, opt := range opts { opt(query) } @@ -599,7 +608,7 @@ func (psq *ProvisioningStepQuery) WithProvisioningStepToCommand(opts ...func(*Co // WithProvisioningStepToDNSRecord tells the query-builder to eager-load the nodes that are connected to // the "ProvisioningStepToDNSRecord" edge. The optional arguments are used to configure the query builder of the edge. func (psq *ProvisioningStepQuery) WithProvisioningStepToDNSRecord(opts ...func(*DNSRecordQuery)) *ProvisioningStepQuery { - query := &DNSRecordQuery{config: psq.config} + query := (&DNSRecordClient{config: psq.config}).Query() for _, opt := range opts { opt(query) } @@ -610,7 +619,7 @@ func (psq *ProvisioningStepQuery) WithProvisioningStepToDNSRecord(opts ...func(* // WithProvisioningStepToFileDelete tells the query-builder to eager-load the nodes that are connected to // the "ProvisioningStepToFileDelete" edge. The optional arguments are used to configure the query builder of the edge. func (psq *ProvisioningStepQuery) WithProvisioningStepToFileDelete(opts ...func(*FileDeleteQuery)) *ProvisioningStepQuery { - query := &FileDeleteQuery{config: psq.config} + query := (&FileDeleteClient{config: psq.config}).Query() for _, opt := range opts { opt(query) } @@ -621,7 +630,7 @@ func (psq *ProvisioningStepQuery) WithProvisioningStepToFileDelete(opts ...func( // WithProvisioningStepToFileDownload tells the query-builder to eager-load the nodes that are connected to // the "ProvisioningStepToFileDownload" edge. The optional arguments are used to configure the query builder of the edge. func (psq *ProvisioningStepQuery) WithProvisioningStepToFileDownload(opts ...func(*FileDownloadQuery)) *ProvisioningStepQuery { - query := &FileDownloadQuery{config: psq.config} + query := (&FileDownloadClient{config: psq.config}).Query() for _, opt := range opts { opt(query) } @@ -632,7 +641,7 @@ func (psq *ProvisioningStepQuery) WithProvisioningStepToFileDownload(opts ...fun // WithProvisioningStepToFileExtract tells the query-builder to eager-load the nodes that are connected to // the "ProvisioningStepToFileExtract" edge. The optional arguments are used to configure the query builder of the edge. func (psq *ProvisioningStepQuery) WithProvisioningStepToFileExtract(opts ...func(*FileExtractQuery)) *ProvisioningStepQuery { - query := &FileExtractQuery{config: psq.config} + query := (&FileExtractClient{config: psq.config}).Query() for _, opt := range opts { opt(query) } @@ -643,7 +652,7 @@ func (psq *ProvisioningStepQuery) WithProvisioningStepToFileExtract(opts ...func // WithProvisioningStepToAnsible tells the query-builder to eager-load the nodes that are connected to // the "ProvisioningStepToAnsible" edge. The optional arguments are used to configure the query builder of the edge. func (psq *ProvisioningStepQuery) WithProvisioningStepToAnsible(opts ...func(*AnsibleQuery)) *ProvisioningStepQuery { - query := &AnsibleQuery{config: psq.config} + query := (&AnsibleClient{config: psq.config}).Query() for _, opt := range opts { opt(query) } @@ -654,7 +663,7 @@ func (psq *ProvisioningStepQuery) WithProvisioningStepToAnsible(opts ...func(*An // WithProvisioningStepToPlan tells the query-builder to eager-load the nodes that are connected to // the "ProvisioningStepToPlan" edge. The optional arguments are used to configure the query builder of the edge. func (psq *ProvisioningStepQuery) WithProvisioningStepToPlan(opts ...func(*PlanQuery)) *ProvisioningStepQuery { - query := &PlanQuery{config: psq.config} + query := (&PlanClient{config: psq.config}).Query() for _, opt := range opts { opt(query) } @@ -665,7 +674,7 @@ func (psq *ProvisioningStepQuery) WithProvisioningStepToPlan(opts ...func(*PlanQ // WithProvisioningStepToAgentTask tells the query-builder to eager-load the nodes that are connected to // the "ProvisioningStepToAgentTask" edge. The optional arguments are used to configure the query builder of the edge. func (psq *ProvisioningStepQuery) WithProvisioningStepToAgentTask(opts ...func(*AgentTaskQuery)) *ProvisioningStepQuery { - query := &AgentTaskQuery{config: psq.config} + query := (&AgentTaskClient{config: psq.config}).Query() for _, opt := range opts { opt(query) } @@ -676,7 +685,7 @@ func (psq *ProvisioningStepQuery) WithProvisioningStepToAgentTask(opts ...func(* // WithProvisioningStepToGinFileMiddleware tells the query-builder to eager-load the nodes that are connected to // the "ProvisioningStepToGinFileMiddleware" edge. The optional arguments are used to configure the query builder of the edge. func (psq *ProvisioningStepQuery) WithProvisioningStepToGinFileMiddleware(opts ...func(*GinFileMiddlewareQuery)) *ProvisioningStepQuery { - query := &GinFileMiddlewareQuery{config: psq.config} + query := (&GinFileMiddlewareClient{config: psq.config}).Query() for _, opt := range opts { opt(query) } @@ -698,17 +707,13 @@ func (psq *ProvisioningStepQuery) WithProvisioningStepToGinFileMiddleware(opts . // GroupBy(provisioningstep.FieldType). // Aggregate(ent.Count()). // Scan(ctx, &v) -// func (psq *ProvisioningStepQuery) GroupBy(field string, fields ...string) *ProvisioningStepGroupBy { - group := &ProvisioningStepGroupBy{config: psq.config} - group.fields = append([]string{field}, fields...) - group.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := psq.prepareQuery(ctx); err != nil { - return nil, err - } - return psq.sqlQuery(ctx), nil - } - return group + psq.ctx.Fields = append([]string{field}, fields...) + grbuild := &ProvisioningStepGroupBy{build: psq} + grbuild.flds = &psq.ctx.Fields + grbuild.label = provisioningstep.Label + grbuild.scan = grbuild.Scan + return grbuild } // Select allows the selection one or more fields/columns for the given query, @@ -723,14 +728,31 @@ func (psq *ProvisioningStepQuery) GroupBy(field string, fields ...string) *Provi // client.ProvisioningStep.Query(). // Select(provisioningstep.FieldType). // Scan(ctx, &v) -// func (psq *ProvisioningStepQuery) Select(fields ...string) *ProvisioningStepSelect { - psq.fields = append(psq.fields, fields...) - return &ProvisioningStepSelect{ProvisioningStepQuery: psq} + psq.ctx.Fields = append(psq.ctx.Fields, fields...) + sbuild := &ProvisioningStepSelect{ProvisioningStepQuery: psq} + sbuild.label = provisioningstep.Label + sbuild.flds, sbuild.scan = &psq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a ProvisioningStepSelect configured with the given aggregations. +func (psq *ProvisioningStepQuery) Aggregate(fns ...AggregateFunc) *ProvisioningStepSelect { + return psq.Select().Aggregate(fns...) } func (psq *ProvisioningStepQuery) prepareQuery(ctx context.Context) error { - for _, f := range psq.fields { + for _, inter := range psq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, psq); err != nil { + return err + } + } + } + for _, f := range psq.ctx.Fields { if !provisioningstep.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -745,7 +767,7 @@ func (psq *ProvisioningStepQuery) prepareQuery(ctx context.Context) error { return nil } -func (psq *ProvisioningStepQuery) sqlAll(ctx context.Context) ([]*ProvisioningStep, error) { +func (psq *ProvisioningStepQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*ProvisioningStep, error) { var ( nodes = []*ProvisioningStep{} withFKs = psq.withFKs @@ -771,954 +793,678 @@ func (psq *ProvisioningStepQuery) sqlAll(ctx context.Context) ([]*ProvisioningSt if withFKs { _spec.Node.Columns = append(_spec.Node.Columns, provisioningstep.ForeignKeys...) } - _spec.ScanValues = func(columns []string) ([]interface{}, error) { + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*ProvisioningStep).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { node := &ProvisioningStep{config: psq.config} nodes = append(nodes, node) - return node.scanValues(columns) - } - _spec.Assign = func(columns []string, values []interface{}) error { - if len(nodes) == 0 { - return fmt.Errorf("ent: Assign called without calling ScanValues") - } - node := nodes[len(nodes)-1] node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(psq.modifiers) > 0 { + _spec.Modifiers = psq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } if err := sqlgraph.QueryNodes(ctx, psq.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := psq.withProvisioningStepToStatus; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*ProvisioningStep) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - } - query.withFKs = true - query.Where(predicate.Status(func(s *sql.Selector) { - s.Where(sql.InValues(provisioningstep.ProvisioningStepToStatusColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { + if err := psq.loadProvisioningStepToStatus(ctx, query, nodes, nil, + func(n *ProvisioningStep, e *Status) { n.Edges.ProvisioningStepToStatus = e }); err != nil { return nil, err } - for _, n := range neighbors { - fk := n.provisioning_step_provisioning_step_to_status - if fk == nil { - return nil, fmt.Errorf(`foreign-key "provisioning_step_provisioning_step_to_status" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "provisioning_step_provisioning_step_to_status" returned %v for node %v`, *fk, n.ID) - } - node.Edges.ProvisioningStepToStatus = n - } } - if query := psq.withProvisioningStepToProvisionedHost; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*ProvisioningStep) - for i := range nodes { - if nodes[i].provisioning_step_provisioning_step_to_provisioned_host == nil { - continue - } - fk := *nodes[i].provisioning_step_provisioning_step_to_provisioned_host - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) - } - query.Where(provisionedhost.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + if err := psq.loadProvisioningStepToProvisionedHost(ctx, query, nodes, nil, + func(n *ProvisioningStep, e *ProvisionedHost) { n.Edges.ProvisioningStepToProvisionedHost = e }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "provisioning_step_provisioning_step_to_provisioned_host" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.ProvisioningStepToProvisionedHost = n - } - } } - if query := psq.withProvisioningStepToScript; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*ProvisioningStep) - for i := range nodes { - if nodes[i].provisioning_step_provisioning_step_to_script == nil { - continue - } - fk := *nodes[i].provisioning_step_provisioning_step_to_script - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) - } - query.Where(script.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + if err := psq.loadProvisioningStepToScript(ctx, query, nodes, nil, + func(n *ProvisioningStep, e *Script) { n.Edges.ProvisioningStepToScript = e }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "provisioning_step_provisioning_step_to_script" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.ProvisioningStepToScript = n - } - } } - if query := psq.withProvisioningStepToCommand; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*ProvisioningStep) - for i := range nodes { - if nodes[i].provisioning_step_provisioning_step_to_command == nil { - continue - } - fk := *nodes[i].provisioning_step_provisioning_step_to_command - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) - } - query.Where(command.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + if err := psq.loadProvisioningStepToCommand(ctx, query, nodes, nil, + func(n *ProvisioningStep, e *Command) { n.Edges.ProvisioningStepToCommand = e }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "provisioning_step_provisioning_step_to_command" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.ProvisioningStepToCommand = n - } - } } - if query := psq.withProvisioningStepToDNSRecord; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*ProvisioningStep) - for i := range nodes { - if nodes[i].provisioning_step_provisioning_step_to_dns_record == nil { - continue - } - fk := *nodes[i].provisioning_step_provisioning_step_to_dns_record - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) - } - query.Where(dnsrecord.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + if err := psq.loadProvisioningStepToDNSRecord(ctx, query, nodes, nil, + func(n *ProvisioningStep, e *DNSRecord) { n.Edges.ProvisioningStepToDNSRecord = e }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "provisioning_step_provisioning_step_to_dns_record" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.ProvisioningStepToDNSRecord = n - } - } } - if query := psq.withProvisioningStepToFileDelete; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*ProvisioningStep) - for i := range nodes { - if nodes[i].provisioning_step_provisioning_step_to_file_delete == nil { - continue - } - fk := *nodes[i].provisioning_step_provisioning_step_to_file_delete - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) - } - query.Where(filedelete.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + if err := psq.loadProvisioningStepToFileDelete(ctx, query, nodes, nil, + func(n *ProvisioningStep, e *FileDelete) { n.Edges.ProvisioningStepToFileDelete = e }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "provisioning_step_provisioning_step_to_file_delete" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.ProvisioningStepToFileDelete = n - } - } } - if query := psq.withProvisioningStepToFileDownload; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*ProvisioningStep) - for i := range nodes { - if nodes[i].provisioning_step_provisioning_step_to_file_download == nil { - continue - } - fk := *nodes[i].provisioning_step_provisioning_step_to_file_download - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) - } - query.Where(filedownload.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + if err := psq.loadProvisioningStepToFileDownload(ctx, query, nodes, nil, + func(n *ProvisioningStep, e *FileDownload) { n.Edges.ProvisioningStepToFileDownload = e }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "provisioning_step_provisioning_step_to_file_download" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.ProvisioningStepToFileDownload = n - } - } } - if query := psq.withProvisioningStepToFileExtract; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*ProvisioningStep) - for i := range nodes { - if nodes[i].provisioning_step_provisioning_step_to_file_extract == nil { - continue - } - fk := *nodes[i].provisioning_step_provisioning_step_to_file_extract - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) - } - query.Where(fileextract.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + if err := psq.loadProvisioningStepToFileExtract(ctx, query, nodes, nil, + func(n *ProvisioningStep, e *FileExtract) { n.Edges.ProvisioningStepToFileExtract = e }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "provisioning_step_provisioning_step_to_file_extract" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.ProvisioningStepToFileExtract = n - } - } } - if query := psq.withProvisioningStepToAnsible; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*ProvisioningStep) - for i := range nodes { - if nodes[i].provisioning_step_provisioning_step_to_ansible == nil { - continue - } - fk := *nodes[i].provisioning_step_provisioning_step_to_ansible - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) - } - query.Where(ansible.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + if err := psq.loadProvisioningStepToAnsible(ctx, query, nodes, nil, + func(n *ProvisioningStep, e *Ansible) { n.Edges.ProvisioningStepToAnsible = e }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "provisioning_step_provisioning_step_to_ansible" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.ProvisioningStepToAnsible = n - } - } } - if query := psq.withProvisioningStepToPlan; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*ProvisioningStep) - for i := range nodes { - if nodes[i].plan_plan_to_provisioning_step == nil { - continue - } - fk := *nodes[i].plan_plan_to_provisioning_step - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) - } - query.Where(plan.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + if err := psq.loadProvisioningStepToPlan(ctx, query, nodes, nil, + func(n *ProvisioningStep, e *Plan) { n.Edges.ProvisioningStepToPlan = e }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "plan_plan_to_provisioning_step" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.ProvisioningStepToPlan = n - } - } } - if query := psq.withProvisioningStepToAgentTask; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*ProvisioningStep) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.ProvisioningStepToAgentTask = []*AgentTask{} - } - query.withFKs = true - query.Where(predicate.AgentTask(func(s *sql.Selector) { - s.Where(sql.InValues(provisioningstep.ProvisioningStepToAgentTaskColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { + if err := psq.loadProvisioningStepToAgentTask(ctx, query, nodes, + func(n *ProvisioningStep) { n.Edges.ProvisioningStepToAgentTask = []*AgentTask{} }, + func(n *ProvisioningStep, e *AgentTask) { + n.Edges.ProvisioningStepToAgentTask = append(n.Edges.ProvisioningStepToAgentTask, e) + }); err != nil { return nil, err } - for _, n := range neighbors { - fk := n.agent_task_agent_task_to_provisioning_step - if fk == nil { - return nil, fmt.Errorf(`foreign-key "agent_task_agent_task_to_provisioning_step" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "agent_task_agent_task_to_provisioning_step" returned %v for node %v`, *fk, n.ID) - } - node.Edges.ProvisioningStepToAgentTask = append(node.Edges.ProvisioningStepToAgentTask, n) - } } - if query := psq.withProvisioningStepToGinFileMiddleware; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*ProvisioningStep) - for i := range nodes { - if nodes[i].gin_file_middleware_gin_file_middleware_to_provisioning_step == nil { - continue - } - fk := *nodes[i].gin_file_middleware_gin_file_middleware_to_provisioning_step - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + if err := psq.loadProvisioningStepToGinFileMiddleware(ctx, query, nodes, nil, + func(n *ProvisioningStep, e *GinFileMiddleware) { n.Edges.ProvisioningStepToGinFileMiddleware = e }); err != nil { + return nil, err } - query.Where(ginfilemiddleware.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + } + for name, query := range psq.withNamedProvisioningStepToAgentTask { + if err := psq.loadProvisioningStepToAgentTask(ctx, query, nodes, + func(n *ProvisioningStep) { n.appendNamedProvisioningStepToAgentTask(name) }, + func(n *ProvisioningStep, e *AgentTask) { n.appendNamedProvisioningStepToAgentTask(name, e) }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "gin_file_middleware_gin_file_middleware_to_provisioning_step" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.ProvisioningStepToGinFileMiddleware = n - } + } + for i := range psq.loadTotal { + if err := psq.loadTotal[i](ctx, nodes); err != nil { + return nil, err } } - return nodes, nil } -func (psq *ProvisioningStepQuery) sqlCount(ctx context.Context) (int, error) { - _spec := psq.querySpec() - _spec.Node.Columns = psq.fields - if len(psq.fields) > 0 { - _spec.Unique = psq.unique != nil && *psq.unique +func (psq *ProvisioningStepQuery) loadProvisioningStepToStatus(ctx context.Context, query *StatusQuery, nodes []*ProvisioningStep, init func(*ProvisioningStep), assign func(*ProvisioningStep, *Status)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*ProvisioningStep) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] } - return sqlgraph.CountNodes(ctx, psq.driver, _spec) -} - -func (psq *ProvisioningStepQuery) sqlExist(ctx context.Context) (bool, error) { - n, err := psq.sqlCount(ctx) + query.withFKs = true + query.Where(predicate.Status(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(provisioningstep.ProvisioningStepToStatusColumn), fks...)) + })) + neighbors, err := query.All(ctx) if err != nil { - return false, fmt.Errorf("ent: check existence: %w", err) - } - return n > 0, nil -} - -func (psq *ProvisioningStepQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: provisioningstep.Table, - Columns: provisioningstep.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisioningstep.FieldID, - }, - }, - From: psq.sql, - Unique: true, - } - if unique := psq.unique; unique != nil { - _spec.Unique = *unique + return err } - if fields := psq.fields; len(fields) > 0 { - _spec.Node.Columns = make([]string, 0, len(fields)) - _spec.Node.Columns = append(_spec.Node.Columns, provisioningstep.FieldID) - for i := range fields { - if fields[i] != provisioningstep.FieldID { - _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) - } + for _, n := range neighbors { + fk := n.provisioning_step_provisioning_step_to_status + if fk == nil { + return fmt.Errorf(`foreign-key "provisioning_step_provisioning_step_to_status" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "provisioning_step_provisioning_step_to_status" returned %v for node %v`, *fk, n.ID) } + assign(node, n) } - if ps := psq.predicates; len(ps) > 0 { - _spec.Predicate = func(selector *sql.Selector) { - for i := range ps { - ps[i](selector) - } + return nil +} +func (psq *ProvisioningStepQuery) loadProvisioningStepToProvisionedHost(ctx context.Context, query *ProvisionedHostQuery, nodes []*ProvisioningStep, init func(*ProvisioningStep), assign func(*ProvisioningStep, *ProvisionedHost)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*ProvisioningStep) + for i := range nodes { + if nodes[i].provisioning_step_provisioning_step_to_provisioned_host == nil { + continue + } + fk := *nodes[i].provisioning_step_provisioning_step_to_provisioned_host + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) } + nodeids[fk] = append(nodeids[fk], nodes[i]) } - if limit := psq.limit; limit != nil { - _spec.Limit = *limit + if len(ids) == 0 { + return nil } - if offset := psq.offset; offset != nil { - _spec.Offset = *offset + query.Where(provisionedhost.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err } - if ps := psq.order; len(ps) > 0 { - _spec.Order = func(selector *sql.Selector) { - for i := range ps { - ps[i](selector) - } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "provisioning_step_provisioning_step_to_provisioned_host" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) } } - return _spec + return nil } - -func (psq *ProvisioningStepQuery) sqlQuery(ctx context.Context) *sql.Selector { - builder := sql.Dialect(psq.driver.Dialect()) - t1 := builder.Table(provisioningstep.Table) - columns := psq.fields - if len(columns) == 0 { - columns = provisioningstep.Columns - } - selector := builder.Select(t1.Columns(columns...)...).From(t1) - if psq.sql != nil { - selector = psq.sql - selector.Select(selector.Columns(columns...)...) +func (psq *ProvisioningStepQuery) loadProvisioningStepToScript(ctx context.Context, query *ScriptQuery, nodes []*ProvisioningStep, init func(*ProvisioningStep), assign func(*ProvisioningStep, *Script)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*ProvisioningStep) + for i := range nodes { + if nodes[i].provisioning_step_provisioning_step_to_script == nil { + continue + } + fk := *nodes[i].provisioning_step_provisioning_step_to_script + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) } - if psq.unique != nil && *psq.unique { - selector.Distinct() + if len(ids) == 0 { + return nil } - for _, p := range psq.predicates { - p(selector) + query.Where(script.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err } - for _, p := range psq.order { - p(selector) + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "provisioning_step_provisioning_step_to_script" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } } - if offset := psq.offset; offset != nil { - // limit is mandatory for offset clause. We start - // with default value, and override it below if needed. - selector.Offset(*offset).Limit(math.MaxInt32) + return nil +} +func (psq *ProvisioningStepQuery) loadProvisioningStepToCommand(ctx context.Context, query *CommandQuery, nodes []*ProvisioningStep, init func(*ProvisioningStep), assign func(*ProvisioningStep, *Command)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*ProvisioningStep) + for i := range nodes { + if nodes[i].provisioning_step_provisioning_step_to_command == nil { + continue + } + fk := *nodes[i].provisioning_step_provisioning_step_to_command + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) } - if limit := psq.limit; limit != nil { - selector.Limit(*limit) + if len(ids) == 0 { + return nil } - return selector -} - -// ProvisioningStepGroupBy is the group-by builder for ProvisioningStep entities. -type ProvisioningStepGroupBy struct { - config - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) -} - -// Aggregate adds the given aggregation functions to the group-by query. -func (psgb *ProvisioningStepGroupBy) Aggregate(fns ...AggregateFunc) *ProvisioningStepGroupBy { - psgb.fns = append(psgb.fns, fns...) - return psgb -} - -// Scan applies the group-by query and scans the result into the given value. -func (psgb *ProvisioningStepGroupBy) Scan(ctx context.Context, v interface{}) error { - query, err := psgb.path(ctx) + query.Where(command.IDIn(ids...)) + neighbors, err := query.All(ctx) if err != nil { return err } - psgb.sql = query - return psgb.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (psgb *ProvisioningStepGroupBy) ScanX(ctx context.Context, v interface{}) { - if err := psgb.Scan(ctx, v); err != nil { - panic(err) + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "provisioning_step_provisioning_step_to_command" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } } + return nil } - -// Strings returns list of strings from group-by. -// It is only allowed when executing a group-by query with one field. -func (psgb *ProvisioningStepGroupBy) Strings(ctx context.Context) ([]string, error) { - if len(psgb.fields) > 1 { - return nil, errors.New("ent: ProvisioningStepGroupBy.Strings is not achievable when grouping more than 1 field") +func (psq *ProvisioningStepQuery) loadProvisioningStepToDNSRecord(ctx context.Context, query *DNSRecordQuery, nodes []*ProvisioningStep, init func(*ProvisioningStep), assign func(*ProvisioningStep, *DNSRecord)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*ProvisioningStep) + for i := range nodes { + if nodes[i].provisioning_step_provisioning_step_to_dns_record == nil { + continue + } + fk := *nodes[i].provisioning_step_provisioning_step_to_dns_record + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) } - var v []string - if err := psgb.Scan(ctx, &v); err != nil { - return nil, err + if len(ids) == 0 { + return nil } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (psgb *ProvisioningStepGroupBy) StringsX(ctx context.Context) []string { - v, err := psgb.Strings(ctx) + query.Where(dnsrecord.IDIn(ids...)) + neighbors, err := query.All(ctx) if err != nil { - panic(err) + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "provisioning_step_provisioning_step_to_dns_record" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } } - return v + return nil } - -// String returns a single string from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (psgb *ProvisioningStepGroupBy) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = psgb.Strings(ctx); err != nil { - return +func (psq *ProvisioningStepQuery) loadProvisioningStepToFileDelete(ctx context.Context, query *FileDeleteQuery, nodes []*ProvisioningStep, init func(*ProvisioningStep), assign func(*ProvisioningStep, *FileDelete)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*ProvisioningStep) + for i := range nodes { + if nodes[i].provisioning_step_provisioning_step_to_file_delete == nil { + continue + } + fk := *nodes[i].provisioning_step_provisioning_step_to_file_delete + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{provisioningstep.Label} - default: - err = fmt.Errorf("ent: ProvisioningStepGroupBy.Strings returned %d results when one was expected", len(v)) + if len(ids) == 0 { + return nil } - return -} - -// StringX is like String, but panics if an error occurs. -func (psgb *ProvisioningStepGroupBy) StringX(ctx context.Context) string { - v, err := psgb.String(ctx) + query.Where(filedelete.IDIn(ids...)) + neighbors, err := query.All(ctx) if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from group-by. -// It is only allowed when executing a group-by query with one field. -func (psgb *ProvisioningStepGroupBy) Ints(ctx context.Context) ([]int, error) { - if len(psgb.fields) > 1 { - return nil, errors.New("ent: ProvisioningStepGroupBy.Ints is not achievable when grouping more than 1 field") - } - var v []int - if err := psgb.Scan(ctx, &v); err != nil { - return nil, err + return err } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (psgb *ProvisioningStepGroupBy) IntsX(ctx context.Context) []int { - v, err := psgb.Ints(ctx) - if err != nil { - panic(err) + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "provisioning_step_provisioning_step_to_file_delete" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } } - return v + return nil } - -// Int returns a single int from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (psgb *ProvisioningStepGroupBy) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = psgb.Ints(ctx); err != nil { - return +func (psq *ProvisioningStepQuery) loadProvisioningStepToFileDownload(ctx context.Context, query *FileDownloadQuery, nodes []*ProvisioningStep, init func(*ProvisioningStep), assign func(*ProvisioningStep, *FileDownload)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*ProvisioningStep) + for i := range nodes { + if nodes[i].provisioning_step_provisioning_step_to_file_download == nil { + continue + } + fk := *nodes[i].provisioning_step_provisioning_step_to_file_download + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{provisioningstep.Label} - default: - err = fmt.Errorf("ent: ProvisioningStepGroupBy.Ints returned %d results when one was expected", len(v)) + if len(ids) == 0 { + return nil } - return -} - -// IntX is like Int, but panics if an error occurs. -func (psgb *ProvisioningStepGroupBy) IntX(ctx context.Context) int { - v, err := psgb.Int(ctx) + query.Where(filedownload.IDIn(ids...)) + neighbors, err := query.All(ctx) if err != nil { - panic(err) + return err } - return v + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "provisioning_step_provisioning_step_to_file_download" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil } - -// Float64s returns list of float64s from group-by. -// It is only allowed when executing a group-by query with one field. -func (psgb *ProvisioningStepGroupBy) Float64s(ctx context.Context) ([]float64, error) { - if len(psgb.fields) > 1 { - return nil, errors.New("ent: ProvisioningStepGroupBy.Float64s is not achievable when grouping more than 1 field") +func (psq *ProvisioningStepQuery) loadProvisioningStepToFileExtract(ctx context.Context, query *FileExtractQuery, nodes []*ProvisioningStep, init func(*ProvisioningStep), assign func(*ProvisioningStep, *FileExtract)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*ProvisioningStep) + for i := range nodes { + if nodes[i].provisioning_step_provisioning_step_to_file_extract == nil { + continue + } + fk := *nodes[i].provisioning_step_provisioning_step_to_file_extract + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) } - var v []float64 - if err := psgb.Scan(ctx, &v); err != nil { - return nil, err + if len(ids) == 0 { + return nil } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (psgb *ProvisioningStepGroupBy) Float64sX(ctx context.Context) []float64 { - v, err := psgb.Float64s(ctx) + query.Where(fileextract.IDIn(ids...)) + neighbors, err := query.All(ctx) if err != nil { - panic(err) + return err } - return v + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "provisioning_step_provisioning_step_to_file_extract" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil } - -// Float64 returns a single float64 from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (psgb *ProvisioningStepGroupBy) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = psgb.Float64s(ctx); err != nil { - return +func (psq *ProvisioningStepQuery) loadProvisioningStepToAnsible(ctx context.Context, query *AnsibleQuery, nodes []*ProvisioningStep, init func(*ProvisioningStep), assign func(*ProvisioningStep, *Ansible)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*ProvisioningStep) + for i := range nodes { + if nodes[i].provisioning_step_provisioning_step_to_ansible == nil { + continue + } + fk := *nodes[i].provisioning_step_provisioning_step_to_ansible + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{provisioningstep.Label} - default: - err = fmt.Errorf("ent: ProvisioningStepGroupBy.Float64s returned %d results when one was expected", len(v)) + if len(ids) == 0 { + return nil } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (psgb *ProvisioningStepGroupBy) Float64X(ctx context.Context) float64 { - v, err := psgb.Float64(ctx) + query.Where(ansible.IDIn(ids...)) + neighbors, err := query.All(ctx) if err != nil { - panic(err) + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "provisioning_step_provisioning_step_to_ansible" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } } - return v + return nil } - -// Bools returns list of bools from group-by. -// It is only allowed when executing a group-by query with one field. -func (psgb *ProvisioningStepGroupBy) Bools(ctx context.Context) ([]bool, error) { - if len(psgb.fields) > 1 { - return nil, errors.New("ent: ProvisioningStepGroupBy.Bools is not achievable when grouping more than 1 field") +func (psq *ProvisioningStepQuery) loadProvisioningStepToPlan(ctx context.Context, query *PlanQuery, nodes []*ProvisioningStep, init func(*ProvisioningStep), assign func(*ProvisioningStep, *Plan)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*ProvisioningStep) + for i := range nodes { + if nodes[i].plan_plan_to_provisioning_step == nil { + continue + } + fk := *nodes[i].plan_plan_to_provisioning_step + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) } - var v []bool - if err := psgb.Scan(ctx, &v); err != nil { - return nil, err + if len(ids) == 0 { + return nil } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (psgb *ProvisioningStepGroupBy) BoolsX(ctx context.Context) []bool { - v, err := psgb.Bools(ctx) + query.Where(plan.IDIn(ids...)) + neighbors, err := query.All(ctx) if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (psgb *ProvisioningStepGroupBy) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = psgb.Bools(ctx); err != nil { - return + return err } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{provisioningstep.Label} - default: - err = fmt.Errorf("ent: ProvisioningStepGroupBy.Bools returned %d results when one was expected", len(v)) + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "plan_plan_to_provisioning_step" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } } - return + return nil } - -// BoolX is like Bool, but panics if an error occurs. -func (psgb *ProvisioningStepGroupBy) BoolX(ctx context.Context) bool { - v, err := psgb.Bool(ctx) +func (psq *ProvisioningStepQuery) loadProvisioningStepToAgentTask(ctx context.Context, query *AgentTaskQuery, nodes []*ProvisioningStep, init func(*ProvisioningStep), assign func(*ProvisioningStep, *AgentTask)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*ProvisioningStep) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.AgentTask(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(provisioningstep.ProvisioningStepToAgentTaskColumn), fks...)) + })) + neighbors, err := query.All(ctx) if err != nil { - panic(err) + return err + } + for _, n := range neighbors { + fk := n.agent_task_agent_task_to_provisioning_step + if fk == nil { + return fmt.Errorf(`foreign-key "agent_task_agent_task_to_provisioning_step" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "agent_task_agent_task_to_provisioning_step" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) } - return v + return nil } - -func (psgb *ProvisioningStepGroupBy) sqlScan(ctx context.Context, v interface{}) error { - for _, f := range psgb.fields { - if !provisioningstep.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} +func (psq *ProvisioningStepQuery) loadProvisioningStepToGinFileMiddleware(ctx context.Context, query *GinFileMiddlewareQuery, nodes []*ProvisioningStep, init func(*ProvisioningStep), assign func(*ProvisioningStep, *GinFileMiddleware)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*ProvisioningStep) + for i := range nodes { + if nodes[i].gin_file_middleware_gin_file_middleware_to_provisioning_step == nil { + continue + } + fk := *nodes[i].gin_file_middleware_gin_file_middleware_to_provisioning_step + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) } + nodeids[fk] = append(nodeids[fk], nodes[i]) } - selector := psgb.sqlQuery() - if err := selector.Err(); err != nil { - return err + if len(ids) == 0 { + return nil } - rows := &sql.Rows{} - query, args := selector.Query() - if err := psgb.driver.Query(ctx, query, args, rows); err != nil { + query.Where(ginfilemiddleware.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { return err } - defer rows.Close() - return sql.ScanSlice(rows, v) -} - -func (psgb *ProvisioningStepGroupBy) sqlQuery() *sql.Selector { - selector := psgb.sql.Select() - aggregation := make([]string, 0, len(psgb.fns)) - for _, fn := range psgb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(psgb.fields)+len(psgb.fns)) - for _, f := range psgb.fields { - columns = append(columns, selector.C(f)) + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "gin_file_middleware_gin_file_middleware_to_provisioning_step" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) } - columns = append(columns, aggregation...) - selector.Select(columns...) } - return selector.GroupBy(selector.Columns(psgb.fields...)...) -} - -// ProvisioningStepSelect is the builder for selecting fields of ProvisioningStep entities. -type ProvisioningStepSelect struct { - *ProvisioningStepQuery - // intermediate query (i.e. traversal path). - sql *sql.Selector + return nil } -// Scan applies the selector query and scans the result into the given value. -func (pss *ProvisioningStepSelect) Scan(ctx context.Context, v interface{}) error { - if err := pss.prepareQuery(ctx); err != nil { - return err +func (psq *ProvisioningStepQuery) sqlCount(ctx context.Context) (int, error) { + _spec := psq.querySpec() + if len(psq.modifiers) > 0 { + _spec.Modifiers = psq.modifiers } - pss.sql = pss.ProvisioningStepQuery.sqlQuery(ctx) - return pss.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (pss *ProvisioningStepSelect) ScanX(ctx context.Context, v interface{}) { - if err := pss.Scan(ctx, v); err != nil { - panic(err) + _spec.Node.Columns = psq.ctx.Fields + if len(psq.ctx.Fields) > 0 { + _spec.Unique = psq.ctx.Unique != nil && *psq.ctx.Unique } + return sqlgraph.CountNodes(ctx, psq.driver, _spec) } -// Strings returns list of strings from a selector. It is only allowed when selecting one field. -func (pss *ProvisioningStepSelect) Strings(ctx context.Context) ([]string, error) { - if len(pss.fields) > 1 { - return nil, errors.New("ent: ProvisioningStepSelect.Strings is not achievable when selecting more than 1 field") +func (psq *ProvisioningStepQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(provisioningstep.Table, provisioningstep.Columns, sqlgraph.NewFieldSpec(provisioningstep.FieldID, field.TypeUUID)) + _spec.From = psq.sql + if unique := psq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if psq.path != nil { + _spec.Unique = true } - var v []string - if err := pss.Scan(ctx, &v); err != nil { - return nil, err + if fields := psq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, provisioningstep.FieldID) + for i := range fields { + if fields[i] != provisioningstep.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (pss *ProvisioningStepSelect) StringsX(ctx context.Context) []string { - v, err := pss.Strings(ctx) - if err != nil { - panic(err) + if ps := psq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } } - return v -} - -// String returns a single string from a selector. It is only allowed when selecting one field. -func (pss *ProvisioningStepSelect) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = pss.Strings(ctx); err != nil { - return + if limit := psq.ctx.Limit; limit != nil { + _spec.Limit = *limit } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{provisioningstep.Label} - default: - err = fmt.Errorf("ent: ProvisioningStepSelect.Strings returned %d results when one was expected", len(v)) + if offset := psq.ctx.Offset; offset != nil { + _spec.Offset = *offset } - return -} - -// StringX is like String, but panics if an error occurs. -func (pss *ProvisioningStepSelect) StringX(ctx context.Context) string { - v, err := pss.String(ctx) - if err != nil { - panic(err) + if ps := psq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } } - return v + return _spec } -// Ints returns list of ints from a selector. It is only allowed when selecting one field. -func (pss *ProvisioningStepSelect) Ints(ctx context.Context) ([]int, error) { - if len(pss.fields) > 1 { - return nil, errors.New("ent: ProvisioningStepSelect.Ints is not achievable when selecting more than 1 field") +func (psq *ProvisioningStepQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(psq.driver.Dialect()) + t1 := builder.Table(provisioningstep.Table) + columns := psq.ctx.Fields + if len(columns) == 0 { + columns = provisioningstep.Columns } - var v []int - if err := pss.Scan(ctx, &v); err != nil { - return nil, err + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if psq.sql != nil { + selector = psq.sql + selector.Select(selector.Columns(columns...)...) } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (pss *ProvisioningStepSelect) IntsX(ctx context.Context) []int { - v, err := pss.Ints(ctx) - if err != nil { - panic(err) + if psq.ctx.Unique != nil && *psq.ctx.Unique { + selector.Distinct() } - return v -} - -// Int returns a single int from a selector. It is only allowed when selecting one field. -func (pss *ProvisioningStepSelect) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = pss.Ints(ctx); err != nil { - return + for _, p := range psq.predicates { + p(selector) } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{provisioningstep.Label} - default: - err = fmt.Errorf("ent: ProvisioningStepSelect.Ints returned %d results when one was expected", len(v)) + for _, p := range psq.order { + p(selector) } - return -} - -// IntX is like Int, but panics if an error occurs. -func (pss *ProvisioningStepSelect) IntX(ctx context.Context) int { - v, err := pss.Int(ctx) - if err != nil { - panic(err) + if offset := psq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) } - return v + if limit := psq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector } -// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. -func (pss *ProvisioningStepSelect) Float64s(ctx context.Context) ([]float64, error) { - if len(pss.fields) > 1 { - return nil, errors.New("ent: ProvisioningStepSelect.Float64s is not achievable when selecting more than 1 field") +// WithNamedProvisioningStepToAgentTask tells the query-builder to eager-load the nodes that are connected to the "ProvisioningStepToAgentTask" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (psq *ProvisioningStepQuery) WithNamedProvisioningStepToAgentTask(name string, opts ...func(*AgentTaskQuery)) *ProvisioningStepQuery { + query := (&AgentTaskClient{config: psq.config}).Query() + for _, opt := range opts { + opt(query) } - var v []float64 - if err := pss.Scan(ctx, &v); err != nil { - return nil, err + if psq.withNamedProvisioningStepToAgentTask == nil { + psq.withNamedProvisioningStepToAgentTask = make(map[string]*AgentTaskQuery) } - return v, nil + psq.withNamedProvisioningStepToAgentTask[name] = query + return psq } -// Float64sX is like Float64s, but panics if an error occurs. -func (pss *ProvisioningStepSelect) Float64sX(ctx context.Context) []float64 { - v, err := pss.Float64s(ctx) - if err != nil { - panic(err) - } - return v +// ProvisioningStepGroupBy is the group-by builder for ProvisioningStep entities. +type ProvisioningStepGroupBy struct { + selector + build *ProvisioningStepQuery } -// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. -func (pss *ProvisioningStepSelect) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = pss.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{provisioningstep.Label} - default: - err = fmt.Errorf("ent: ProvisioningStepSelect.Float64s returned %d results when one was expected", len(v)) - } - return +// Aggregate adds the given aggregation functions to the group-by query. +func (psgb *ProvisioningStepGroupBy) Aggregate(fns ...AggregateFunc) *ProvisioningStepGroupBy { + psgb.fns = append(psgb.fns, fns...) + return psgb } -// Float64X is like Float64, but panics if an error occurs. -func (pss *ProvisioningStepSelect) Float64X(ctx context.Context) float64 { - v, err := pss.Float64(ctx) - if err != nil { - panic(err) +// Scan applies the selector query and scans the result into the given value. +func (psgb *ProvisioningStepGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, psgb.build.ctx, "GroupBy") + if err := psgb.build.prepareQuery(ctx); err != nil { + return err } - return v + return scanWithInterceptors[*ProvisioningStepQuery, *ProvisioningStepGroupBy](ctx, psgb.build, psgb, psgb.build.inters, v) } -// Bools returns list of bools from a selector. It is only allowed when selecting one field. -func (pss *ProvisioningStepSelect) Bools(ctx context.Context) ([]bool, error) { - if len(pss.fields) > 1 { - return nil, errors.New("ent: ProvisioningStepSelect.Bools is not achievable when selecting more than 1 field") +func (psgb *ProvisioningStepGroupBy) sqlScan(ctx context.Context, root *ProvisioningStepQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(psgb.fns)) + for _, fn := range psgb.fns { + aggregation = append(aggregation, fn(selector)) } - var v []bool - if err := pss.Scan(ctx, &v); err != nil { - return nil, err + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*psgb.flds)+len(psgb.fns)) + for _, f := range *psgb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*psgb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := psgb.build.driver.Query(ctx, query, args, rows); err != nil { + return err } - return v, nil + defer rows.Close() + return sql.ScanSlice(rows, v) } -// BoolsX is like Bools, but panics if an error occurs. -func (pss *ProvisioningStepSelect) BoolsX(ctx context.Context) []bool { - v, err := pss.Bools(ctx) - if err != nil { - panic(err) - } - return v +// ProvisioningStepSelect is the builder for selecting fields of ProvisioningStep entities. +type ProvisioningStepSelect struct { + *ProvisioningStepQuery + selector } -// Bool returns a single bool from a selector. It is only allowed when selecting one field. -func (pss *ProvisioningStepSelect) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = pss.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{provisioningstep.Label} - default: - err = fmt.Errorf("ent: ProvisioningStepSelect.Bools returned %d results when one was expected", len(v)) - } - return +// Aggregate adds the given aggregation functions to the selector query. +func (pss *ProvisioningStepSelect) Aggregate(fns ...AggregateFunc) *ProvisioningStepSelect { + pss.fns = append(pss.fns, fns...) + return pss } -// BoolX is like Bool, but panics if an error occurs. -func (pss *ProvisioningStepSelect) BoolX(ctx context.Context) bool { - v, err := pss.Bool(ctx) - if err != nil { - panic(err) +// Scan applies the selector query and scans the result into the given value. +func (pss *ProvisioningStepSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, pss.ctx, "Select") + if err := pss.prepareQuery(ctx); err != nil { + return err } - return v + return scanWithInterceptors[*ProvisioningStepQuery, *ProvisioningStepSelect](ctx, pss.ProvisioningStepQuery, pss, pss.inters, v) } -func (pss *ProvisioningStepSelect) sqlScan(ctx context.Context, v interface{}) error { +func (pss *ProvisioningStepSelect) sqlScan(ctx context.Context, root *ProvisioningStepQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(pss.fns)) + for _, fn := range pss.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*pss.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } rows := &sql.Rows{} - query, args := pss.sql.Query() + query, args := selector.Query() if err := pss.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/ent/provisioningstep_update.go b/ent/provisioningstep_update.go index bc5cc5d6..987ea176 100755 --- a/ent/provisioningstep_update.go +++ b/ent/provisioningstep_update.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -46,6 +46,14 @@ func (psu *ProvisioningStepUpdate) SetType(pr provisioningstep.Type) *Provisioni return psu } +// SetNillableType sets the "type" field if the given value is not nil. +func (psu *ProvisioningStepUpdate) SetNillableType(pr *provisioningstep.Type) *ProvisioningStepUpdate { + if pr != nil { + psu.SetType(*pr) + } + return psu +} + // SetStepNumber sets the "step_number" field. func (psu *ProvisioningStepUpdate) SetStepNumber(i int) *ProvisioningStepUpdate { psu.mutation.ResetStepNumber() @@ -53,6 +61,14 @@ func (psu *ProvisioningStepUpdate) SetStepNumber(i int) *ProvisioningStepUpdate return psu } +// SetNillableStepNumber sets the "step_number" field if the given value is not nil. +func (psu *ProvisioningStepUpdate) SetNillableStepNumber(i *int) *ProvisioningStepUpdate { + if i != nil { + psu.SetStepNumber(*i) + } + return psu +} + // AddStepNumber adds i to the "step_number" field. func (psu *ProvisioningStepUpdate) AddStepNumber(i int) *ProvisioningStepUpdate { psu.mutation.AddStepNumber(i) @@ -377,40 +393,7 @@ func (psu *ProvisioningStepUpdate) ClearProvisioningStepToGinFileMiddleware() *P // Save executes the query and returns the number of nodes affected by the update operation. func (psu *ProvisioningStepUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(psu.hooks) == 0 { - if err = psu.check(); err != nil { - return 0, err - } - affected, err = psu.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*ProvisioningStepMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = psu.check(); err != nil { - return 0, err - } - psu.mutation = mutation - affected, err = psu.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(psu.hooks) - 1; i >= 0; i-- { - if psu.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = psu.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, psu.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, psu.sqlSave, psu.mutation, psu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -446,16 +429,10 @@ func (psu *ProvisioningStepUpdate) check() error { } func (psu *ProvisioningStepUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: provisioningstep.Table, - Columns: provisioningstep.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisioningstep.FieldID, - }, - }, + if err := psu.check(); err != nil { + return n, err } + _spec := sqlgraph.NewUpdateSpec(provisioningstep.Table, provisioningstep.Columns, sqlgraph.NewFieldSpec(provisioningstep.FieldID, field.TypeUUID)) if ps := psu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -464,25 +441,13 @@ func (psu *ProvisioningStepUpdate) sqlSave(ctx context.Context) (n int, err erro } } if value, ok := psu.mutation.GetType(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: provisioningstep.FieldType, - }) + _spec.SetField(provisioningstep.FieldType, field.TypeEnum, value) } if value, ok := psu.mutation.StepNumber(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: provisioningstep.FieldStepNumber, - }) + _spec.SetField(provisioningstep.FieldStepNumber, field.TypeInt, value) } if value, ok := psu.mutation.AddedStepNumber(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: provisioningstep.FieldStepNumber, - }) + _spec.AddField(provisioningstep.FieldStepNumber, field.TypeInt, value) } if psu.mutation.ProvisioningStepToStatusCleared() { edge := &sqlgraph.EdgeSpec{ @@ -492,10 +457,7 @@ func (psu *ProvisioningStepUpdate) sqlSave(ctx context.Context) (n int, err erro Columns: []string{provisioningstep.ProvisioningStepToStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -508,10 +470,7 @@ func (psu *ProvisioningStepUpdate) sqlSave(ctx context.Context) (n int, err erro Columns: []string{provisioningstep.ProvisioningStepToStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -527,10 +486,7 @@ func (psu *ProvisioningStepUpdate) sqlSave(ctx context.Context) (n int, err erro Columns: []string{provisioningstep.ProvisioningStepToProvisionedHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionedhost.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionedhost.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -543,10 +499,7 @@ func (psu *ProvisioningStepUpdate) sqlSave(ctx context.Context) (n int, err erro Columns: []string{provisioningstep.ProvisioningStepToProvisionedHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionedhost.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionedhost.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -562,10 +515,7 @@ func (psu *ProvisioningStepUpdate) sqlSave(ctx context.Context) (n int, err erro Columns: []string{provisioningstep.ProvisioningStepToScriptColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: script.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(script.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -578,10 +528,7 @@ func (psu *ProvisioningStepUpdate) sqlSave(ctx context.Context) (n int, err erro Columns: []string{provisioningstep.ProvisioningStepToScriptColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: script.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(script.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -597,10 +544,7 @@ func (psu *ProvisioningStepUpdate) sqlSave(ctx context.Context) (n int, err erro Columns: []string{provisioningstep.ProvisioningStepToCommandColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: command.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(command.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -613,10 +557,7 @@ func (psu *ProvisioningStepUpdate) sqlSave(ctx context.Context) (n int, err erro Columns: []string{provisioningstep.ProvisioningStepToCommandColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: command.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(command.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -632,10 +573,7 @@ func (psu *ProvisioningStepUpdate) sqlSave(ctx context.Context) (n int, err erro Columns: []string{provisioningstep.ProvisioningStepToDNSRecordColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: dnsrecord.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(dnsrecord.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -648,10 +586,7 @@ func (psu *ProvisioningStepUpdate) sqlSave(ctx context.Context) (n int, err erro Columns: []string{provisioningstep.ProvisioningStepToDNSRecordColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: dnsrecord.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(dnsrecord.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -667,10 +602,7 @@ func (psu *ProvisioningStepUpdate) sqlSave(ctx context.Context) (n int, err erro Columns: []string{provisioningstep.ProvisioningStepToFileDeleteColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: filedelete.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(filedelete.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -683,10 +615,7 @@ func (psu *ProvisioningStepUpdate) sqlSave(ctx context.Context) (n int, err erro Columns: []string{provisioningstep.ProvisioningStepToFileDeleteColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: filedelete.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(filedelete.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -702,10 +631,7 @@ func (psu *ProvisioningStepUpdate) sqlSave(ctx context.Context) (n int, err erro Columns: []string{provisioningstep.ProvisioningStepToFileDownloadColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: filedownload.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(filedownload.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -718,10 +644,7 @@ func (psu *ProvisioningStepUpdate) sqlSave(ctx context.Context) (n int, err erro Columns: []string{provisioningstep.ProvisioningStepToFileDownloadColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: filedownload.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(filedownload.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -737,10 +660,7 @@ func (psu *ProvisioningStepUpdate) sqlSave(ctx context.Context) (n int, err erro Columns: []string{provisioningstep.ProvisioningStepToFileExtractColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: fileextract.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(fileextract.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -753,10 +673,7 @@ func (psu *ProvisioningStepUpdate) sqlSave(ctx context.Context) (n int, err erro Columns: []string{provisioningstep.ProvisioningStepToFileExtractColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: fileextract.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(fileextract.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -772,10 +689,7 @@ func (psu *ProvisioningStepUpdate) sqlSave(ctx context.Context) (n int, err erro Columns: []string{provisioningstep.ProvisioningStepToAnsibleColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: ansible.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(ansible.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -788,10 +702,7 @@ func (psu *ProvisioningStepUpdate) sqlSave(ctx context.Context) (n int, err erro Columns: []string{provisioningstep.ProvisioningStepToAnsibleColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: ansible.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(ansible.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -807,10 +718,7 @@ func (psu *ProvisioningStepUpdate) sqlSave(ctx context.Context) (n int, err erro Columns: []string{provisioningstep.ProvisioningStepToPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -823,10 +731,7 @@ func (psu *ProvisioningStepUpdate) sqlSave(ctx context.Context) (n int, err erro Columns: []string{provisioningstep.ProvisioningStepToPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -842,10 +747,7 @@ func (psu *ProvisioningStepUpdate) sqlSave(ctx context.Context) (n int, err erro Columns: []string{provisioningstep.ProvisioningStepToAgentTaskColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: agenttask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(agenttask.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -858,10 +760,7 @@ func (psu *ProvisioningStepUpdate) sqlSave(ctx context.Context) (n int, err erro Columns: []string{provisioningstep.ProvisioningStepToAgentTaskColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: agenttask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(agenttask.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -877,10 +776,7 @@ func (psu *ProvisioningStepUpdate) sqlSave(ctx context.Context) (n int, err erro Columns: []string{provisioningstep.ProvisioningStepToAgentTaskColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: agenttask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(agenttask.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -896,10 +792,7 @@ func (psu *ProvisioningStepUpdate) sqlSave(ctx context.Context) (n int, err erro Columns: []string{provisioningstep.ProvisioningStepToGinFileMiddlewareColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: ginfilemiddleware.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(ginfilemiddleware.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -912,10 +805,7 @@ func (psu *ProvisioningStepUpdate) sqlSave(ctx context.Context) (n int, err erro Columns: []string{provisioningstep.ProvisioningStepToGinFileMiddlewareColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: ginfilemiddleware.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(ginfilemiddleware.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -927,10 +817,11 @@ func (psu *ProvisioningStepUpdate) sqlSave(ctx context.Context) (n int, err erro if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{provisioningstep.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return 0, err } + psu.mutation.done = true return n, nil } @@ -948,6 +839,14 @@ func (psuo *ProvisioningStepUpdateOne) SetType(pr provisioningstep.Type) *Provis return psuo } +// SetNillableType sets the "type" field if the given value is not nil. +func (psuo *ProvisioningStepUpdateOne) SetNillableType(pr *provisioningstep.Type) *ProvisioningStepUpdateOne { + if pr != nil { + psuo.SetType(*pr) + } + return psuo +} + // SetStepNumber sets the "step_number" field. func (psuo *ProvisioningStepUpdateOne) SetStepNumber(i int) *ProvisioningStepUpdateOne { psuo.mutation.ResetStepNumber() @@ -955,6 +854,14 @@ func (psuo *ProvisioningStepUpdateOne) SetStepNumber(i int) *ProvisioningStepUpd return psuo } +// SetNillableStepNumber sets the "step_number" field if the given value is not nil. +func (psuo *ProvisioningStepUpdateOne) SetNillableStepNumber(i *int) *ProvisioningStepUpdateOne { + if i != nil { + psuo.SetStepNumber(*i) + } + return psuo +} + // AddStepNumber adds i to the "step_number" field. func (psuo *ProvisioningStepUpdateOne) AddStepNumber(i int) *ProvisioningStepUpdateOne { psuo.mutation.AddStepNumber(i) @@ -1277,6 +1184,12 @@ func (psuo *ProvisioningStepUpdateOne) ClearProvisioningStepToGinFileMiddleware( return psuo } +// Where appends a list predicates to the ProvisioningStepUpdate builder. +func (psuo *ProvisioningStepUpdateOne) Where(ps ...predicate.ProvisioningStep) *ProvisioningStepUpdateOne { + psuo.mutation.Where(ps...) + return psuo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (psuo *ProvisioningStepUpdateOne) Select(field string, fields ...string) *ProvisioningStepUpdateOne { @@ -1286,40 +1199,7 @@ func (psuo *ProvisioningStepUpdateOne) Select(field string, fields ...string) *P // Save executes the query and returns the updated ProvisioningStep entity. func (psuo *ProvisioningStepUpdateOne) Save(ctx context.Context) (*ProvisioningStep, error) { - var ( - err error - node *ProvisioningStep - ) - if len(psuo.hooks) == 0 { - if err = psuo.check(); err != nil { - return nil, err - } - node, err = psuo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*ProvisioningStepMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = psuo.check(); err != nil { - return nil, err - } - psuo.mutation = mutation - node, err = psuo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(psuo.hooks) - 1; i >= 0; i-- { - if psuo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = psuo.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, psuo.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, psuo.sqlSave, psuo.mutation, psuo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -1355,16 +1235,10 @@ func (psuo *ProvisioningStepUpdateOne) check() error { } func (psuo *ProvisioningStepUpdateOne) sqlSave(ctx context.Context) (_node *ProvisioningStep, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: provisioningstep.Table, - Columns: provisioningstep.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisioningstep.FieldID, - }, - }, + if err := psuo.check(); err != nil { + return _node, err } + _spec := sqlgraph.NewUpdateSpec(provisioningstep.Table, provisioningstep.Columns, sqlgraph.NewFieldSpec(provisioningstep.FieldID, field.TypeUUID)) id, ok := psuo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "ProvisioningStep.id" for update`)} @@ -1390,25 +1264,13 @@ func (psuo *ProvisioningStepUpdateOne) sqlSave(ctx context.Context) (_node *Prov } } if value, ok := psuo.mutation.GetType(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: provisioningstep.FieldType, - }) + _spec.SetField(provisioningstep.FieldType, field.TypeEnum, value) } if value, ok := psuo.mutation.StepNumber(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: provisioningstep.FieldStepNumber, - }) + _spec.SetField(provisioningstep.FieldStepNumber, field.TypeInt, value) } if value, ok := psuo.mutation.AddedStepNumber(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: provisioningstep.FieldStepNumber, - }) + _spec.AddField(provisioningstep.FieldStepNumber, field.TypeInt, value) } if psuo.mutation.ProvisioningStepToStatusCleared() { edge := &sqlgraph.EdgeSpec{ @@ -1418,10 +1280,7 @@ func (psuo *ProvisioningStepUpdateOne) sqlSave(ctx context.Context) (_node *Prov Columns: []string{provisioningstep.ProvisioningStepToStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1434,10 +1293,7 @@ func (psuo *ProvisioningStepUpdateOne) sqlSave(ctx context.Context) (_node *Prov Columns: []string{provisioningstep.ProvisioningStepToStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1453,10 +1309,7 @@ func (psuo *ProvisioningStepUpdateOne) sqlSave(ctx context.Context) (_node *Prov Columns: []string{provisioningstep.ProvisioningStepToProvisionedHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionedhost.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionedhost.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1469,10 +1322,7 @@ func (psuo *ProvisioningStepUpdateOne) sqlSave(ctx context.Context) (_node *Prov Columns: []string{provisioningstep.ProvisioningStepToProvisionedHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionedhost.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionedhost.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1488,10 +1338,7 @@ func (psuo *ProvisioningStepUpdateOne) sqlSave(ctx context.Context) (_node *Prov Columns: []string{provisioningstep.ProvisioningStepToScriptColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: script.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(script.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1504,10 +1351,7 @@ func (psuo *ProvisioningStepUpdateOne) sqlSave(ctx context.Context) (_node *Prov Columns: []string{provisioningstep.ProvisioningStepToScriptColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: script.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(script.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1523,10 +1367,7 @@ func (psuo *ProvisioningStepUpdateOne) sqlSave(ctx context.Context) (_node *Prov Columns: []string{provisioningstep.ProvisioningStepToCommandColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: command.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(command.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1539,10 +1380,7 @@ func (psuo *ProvisioningStepUpdateOne) sqlSave(ctx context.Context) (_node *Prov Columns: []string{provisioningstep.ProvisioningStepToCommandColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: command.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(command.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1558,10 +1396,7 @@ func (psuo *ProvisioningStepUpdateOne) sqlSave(ctx context.Context) (_node *Prov Columns: []string{provisioningstep.ProvisioningStepToDNSRecordColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: dnsrecord.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(dnsrecord.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1574,10 +1409,7 @@ func (psuo *ProvisioningStepUpdateOne) sqlSave(ctx context.Context) (_node *Prov Columns: []string{provisioningstep.ProvisioningStepToDNSRecordColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: dnsrecord.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(dnsrecord.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1593,10 +1425,7 @@ func (psuo *ProvisioningStepUpdateOne) sqlSave(ctx context.Context) (_node *Prov Columns: []string{provisioningstep.ProvisioningStepToFileDeleteColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: filedelete.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(filedelete.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1609,10 +1438,7 @@ func (psuo *ProvisioningStepUpdateOne) sqlSave(ctx context.Context) (_node *Prov Columns: []string{provisioningstep.ProvisioningStepToFileDeleteColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: filedelete.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(filedelete.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1628,10 +1454,7 @@ func (psuo *ProvisioningStepUpdateOne) sqlSave(ctx context.Context) (_node *Prov Columns: []string{provisioningstep.ProvisioningStepToFileDownloadColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: filedownload.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(filedownload.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1644,10 +1467,7 @@ func (psuo *ProvisioningStepUpdateOne) sqlSave(ctx context.Context) (_node *Prov Columns: []string{provisioningstep.ProvisioningStepToFileDownloadColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: filedownload.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(filedownload.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1663,10 +1483,7 @@ func (psuo *ProvisioningStepUpdateOne) sqlSave(ctx context.Context) (_node *Prov Columns: []string{provisioningstep.ProvisioningStepToFileExtractColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: fileextract.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(fileextract.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1679,10 +1496,7 @@ func (psuo *ProvisioningStepUpdateOne) sqlSave(ctx context.Context) (_node *Prov Columns: []string{provisioningstep.ProvisioningStepToFileExtractColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: fileextract.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(fileextract.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1698,10 +1512,7 @@ func (psuo *ProvisioningStepUpdateOne) sqlSave(ctx context.Context) (_node *Prov Columns: []string{provisioningstep.ProvisioningStepToAnsibleColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: ansible.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(ansible.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1714,10 +1525,7 @@ func (psuo *ProvisioningStepUpdateOne) sqlSave(ctx context.Context) (_node *Prov Columns: []string{provisioningstep.ProvisioningStepToAnsibleColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: ansible.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(ansible.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1733,10 +1541,7 @@ func (psuo *ProvisioningStepUpdateOne) sqlSave(ctx context.Context) (_node *Prov Columns: []string{provisioningstep.ProvisioningStepToPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1749,10 +1554,7 @@ func (psuo *ProvisioningStepUpdateOne) sqlSave(ctx context.Context) (_node *Prov Columns: []string{provisioningstep.ProvisioningStepToPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1768,10 +1570,7 @@ func (psuo *ProvisioningStepUpdateOne) sqlSave(ctx context.Context) (_node *Prov Columns: []string{provisioningstep.ProvisioningStepToAgentTaskColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: agenttask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(agenttask.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1784,10 +1583,7 @@ func (psuo *ProvisioningStepUpdateOne) sqlSave(ctx context.Context) (_node *Prov Columns: []string{provisioningstep.ProvisioningStepToAgentTaskColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: agenttask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(agenttask.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1803,10 +1599,7 @@ func (psuo *ProvisioningStepUpdateOne) sqlSave(ctx context.Context) (_node *Prov Columns: []string{provisioningstep.ProvisioningStepToAgentTaskColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: agenttask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(agenttask.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1822,10 +1615,7 @@ func (psuo *ProvisioningStepUpdateOne) sqlSave(ctx context.Context) (_node *Prov Columns: []string{provisioningstep.ProvisioningStepToGinFileMiddlewareColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: ginfilemiddleware.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(ginfilemiddleware.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1838,10 +1628,7 @@ func (psuo *ProvisioningStepUpdateOne) sqlSave(ctx context.Context) (_node *Prov Columns: []string{provisioningstep.ProvisioningStepToGinFileMiddlewareColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: ginfilemiddleware.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(ginfilemiddleware.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1856,9 +1643,10 @@ func (psuo *ProvisioningStepUpdateOne) sqlSave(ctx context.Context) (_node *Prov if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{provisioningstep.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } + psuo.mutation.done = true return _node, nil } diff --git a/ent/repocommit.go b/ent/repocommit.go index ed6fdc1c..eedcc83c 100755 --- a/ent/repocommit.go +++ b/ent/repocommit.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -7,6 +7,7 @@ import ( "fmt" "strings" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/gen0cide/laforge/ent/repocommit" "github.com/gen0cide/laforge/ent/repository" @@ -39,11 +40,13 @@ type RepoCommit struct { // The values are being populated by the RepoCommitQuery when eager-loading is set. Edges RepoCommitEdges `json:"edges"` + // vvvvvvvvvvvv CUSTOM vvvvvvvvvvvv // Edges put into the main struct to be loaded via hcl // RepoCommitToRepository holds the value of the RepoCommitToRepository edge. HCLRepoCommitToRepository *Repository `json:"RepoCommitToRepository,omitempty"` - // + // ^^^^^^^^^^^^ CUSTOM ^^^^^^^^^^^^^ repository_repository_to_repo_commit *uuid.UUID + selectValues sql.SelectValues } // RepoCommitEdges holds the relations/edges for other nodes in the graph. @@ -53,6 +56,8 @@ type RepoCommitEdges struct { // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. loadedTypes [1]bool + // totalCount holds the count of the edges above. + totalCount [1]map[string]int } // RepoCommitToRepositoryOrErr returns the RepoCommitToRepository value or an error if the edge @@ -60,8 +65,7 @@ type RepoCommitEdges struct { func (e RepoCommitEdges) RepoCommitToRepositoryOrErr() (*Repository, error) { if e.loadedTypes[0] { if e.RepoCommitToRepository == nil { - // The edge RepoCommitToRepository was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: repository.Label} } return e.RepoCommitToRepository, nil @@ -70,8 +74,8 @@ func (e RepoCommitEdges) RepoCommitToRepositoryOrErr() (*Repository, error) { } // scanValues returns the types for scanning values from sql.Rows. -func (*RepoCommit) scanValues(columns []string) ([]interface{}, error) { - values := make([]interface{}, len(columns)) +func (*RepoCommit) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) for i := range columns { switch columns[i] { case repocommit.FieldAuthor, repocommit.FieldCommitter, repocommit.FieldParentHashes: @@ -85,7 +89,7 @@ func (*RepoCommit) scanValues(columns []string) ([]interface{}, error) { case repocommit.ForeignKeys[0]: // repository_repository_to_repo_commit values[i] = &sql.NullScanner{S: new(uuid.UUID)} default: - return nil, fmt.Errorf("unexpected column %q for type RepoCommit", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -93,7 +97,7 @@ func (*RepoCommit) scanValues(columns []string) ([]interface{}, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the RepoCommit fields. -func (rc *RepoCommit) assignValues(columns []string, values []interface{}) error { +func (rc *RepoCommit) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -166,31 +170,39 @@ func (rc *RepoCommit) assignValues(columns []string, values []interface{}) error rc.repository_repository_to_repo_commit = new(uuid.UUID) *rc.repository_repository_to_repo_commit = *value.S.(*uuid.UUID) } + default: + rc.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the RepoCommit. +// This includes values selected through modifiers, order, etc. +func (rc *RepoCommit) Value(name string) (ent.Value, error) { + return rc.selectValues.Get(name) +} + // QueryRepoCommitToRepository queries the "RepoCommitToRepository" edge of the RepoCommit entity. func (rc *RepoCommit) QueryRepoCommitToRepository() *RepositoryQuery { - return (&RepoCommitClient{config: rc.config}).QueryRepoCommitToRepository(rc) + return NewRepoCommitClient(rc.config).QueryRepoCommitToRepository(rc) } // Update returns a builder for updating this RepoCommit. // Note that you need to call RepoCommit.Unwrap() before calling this method if this RepoCommit // was returned from a transaction, and the transaction was committed or rolled back. func (rc *RepoCommit) Update() *RepoCommitUpdateOne { - return (&RepoCommitClient{config: rc.config}).UpdateOne(rc) + return NewRepoCommitClient(rc.config).UpdateOne(rc) } // Unwrap unwraps the RepoCommit entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. func (rc *RepoCommit) Unwrap() *RepoCommit { - tx, ok := rc.config.driver.(*txDriver) + _tx, ok := rc.config.driver.(*txDriver) if !ok { panic("ent: RepoCommit is not a transactional entity") } - rc.config.driver = tx.drv + rc.config.driver = _tx.drv return rc } @@ -198,22 +210,29 @@ func (rc *RepoCommit) Unwrap() *RepoCommit { func (rc *RepoCommit) String() string { var builder strings.Builder builder.WriteString("RepoCommit(") - builder.WriteString(fmt.Sprintf("id=%v", rc.ID)) - builder.WriteString(", revision=") + builder.WriteString(fmt.Sprintf("id=%v, ", rc.ID)) + builder.WriteString("revision=") builder.WriteString(fmt.Sprintf("%v", rc.Revision)) - builder.WriteString(", hash=") + builder.WriteString(", ") + builder.WriteString("hash=") builder.WriteString(rc.Hash) - builder.WriteString(", author=") + builder.WriteString(", ") + builder.WriteString("author=") builder.WriteString(fmt.Sprintf("%v", rc.Author)) - builder.WriteString(", committer=") + builder.WriteString(", ") + builder.WriteString("committer=") builder.WriteString(fmt.Sprintf("%v", rc.Committer)) - builder.WriteString(", pgp_signature=") + builder.WriteString(", ") + builder.WriteString("pgp_signature=") builder.WriteString(rc.PgpSignature) - builder.WriteString(", message=") + builder.WriteString(", ") + builder.WriteString("message=") builder.WriteString(rc.Message) - builder.WriteString(", tree_hash=") + builder.WriteString(", ") + builder.WriteString("tree_hash=") builder.WriteString(rc.TreeHash) - builder.WriteString(", parent_hashes=") + builder.WriteString(", ") + builder.WriteString("parent_hashes=") builder.WriteString(fmt.Sprintf("%v", rc.ParentHashes)) builder.WriteByte(')') return builder.String() @@ -221,9 +240,3 @@ func (rc *RepoCommit) String() string { // RepoCommits is a parsable slice of RepoCommit. type RepoCommits []*RepoCommit - -func (rc RepoCommits) config(cfg config) { - for _i := range rc { - rc[_i].config = cfg - } -} diff --git a/ent/repocommit/repocommit.go b/ent/repocommit/repocommit.go index a5848e78..b5bee90a 100755 --- a/ent/repocommit/repocommit.go +++ b/ent/repocommit/repocommit.go @@ -1,8 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package repocommit import ( + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -78,3 +80,50 @@ var ( // DefaultID holds the default value on creation for the "id" field. DefaultID func() uuid.UUID ) + +// OrderOption defines the ordering options for the RepoCommit queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByRevision orders the results by the revision field. +func ByRevision(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldRevision, opts...).ToFunc() +} + +// ByHash orders the results by the hash field. +func ByHash(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldHash, opts...).ToFunc() +} + +// ByPgpSignature orders the results by the pgp_signature field. +func ByPgpSignature(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPgpSignature, opts...).ToFunc() +} + +// ByMessage orders the results by the message field. +func ByMessage(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldMessage, opts...).ToFunc() +} + +// ByTreeHash orders the results by the tree_hash field. +func ByTreeHash(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldTreeHash, opts...).ToFunc() +} + +// ByRepoCommitToRepositoryField orders the results by RepoCommitToRepository field. +func ByRepoCommitToRepositoryField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newRepoCommitToRepositoryStep(), sql.OrderByField(field, opts...)) + } +} +func newRepoCommitToRepositoryStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(RepoCommitToRepositoryInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, RepoCommitToRepositoryTable, RepoCommitToRepositoryColumn), + ) +} diff --git a/ent/repocommit/where.go b/ent/repocommit/where.go index 8ccce9eb..97b1de50 100755 --- a/ent/repocommit/where.go +++ b/ent/repocommit/where.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package repocommit @@ -11,640 +11,372 @@ import ( // ID filters vertices based on their ID field. func ID(id uuid.UUID) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.RepoCommit(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id uuid.UUID) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.RepoCommit(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id uuid.UUID) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.RepoCommit(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...uuid.UUID) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.RepoCommit(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...uuid.UUID) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.RepoCommit(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id uuid.UUID) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.RepoCommit(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id uuid.UUID) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.RepoCommit(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id uuid.UUID) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.RepoCommit(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id uuid.UUID) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.RepoCommit(sql.FieldLTE(FieldID, id)) } // Revision applies equality check predicate on the "revision" field. It's identical to RevisionEQ. func Revision(v int) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldRevision), v)) - }) + return predicate.RepoCommit(sql.FieldEQ(FieldRevision, v)) } // Hash applies equality check predicate on the "hash" field. It's identical to HashEQ. func Hash(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldHash), v)) - }) + return predicate.RepoCommit(sql.FieldEQ(FieldHash, v)) } // PgpSignature applies equality check predicate on the "pgp_signature" field. It's identical to PgpSignatureEQ. func PgpSignature(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldPgpSignature), v)) - }) + return predicate.RepoCommit(sql.FieldEQ(FieldPgpSignature, v)) } // Message applies equality check predicate on the "message" field. It's identical to MessageEQ. func Message(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldMessage), v)) - }) + return predicate.RepoCommit(sql.FieldEQ(FieldMessage, v)) } // TreeHash applies equality check predicate on the "tree_hash" field. It's identical to TreeHashEQ. func TreeHash(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldTreeHash), v)) - }) + return predicate.RepoCommit(sql.FieldEQ(FieldTreeHash, v)) } // RevisionEQ applies the EQ predicate on the "revision" field. func RevisionEQ(v int) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldRevision), v)) - }) + return predicate.RepoCommit(sql.FieldEQ(FieldRevision, v)) } // RevisionNEQ applies the NEQ predicate on the "revision" field. func RevisionNEQ(v int) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldRevision), v)) - }) + return predicate.RepoCommit(sql.FieldNEQ(FieldRevision, v)) } // RevisionIn applies the In predicate on the "revision" field. func RevisionIn(vs ...int) predicate.RepoCommit { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.RepoCommit(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldRevision), v...)) - }) + return predicate.RepoCommit(sql.FieldIn(FieldRevision, vs...)) } // RevisionNotIn applies the NotIn predicate on the "revision" field. func RevisionNotIn(vs ...int) predicate.RepoCommit { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.RepoCommit(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldRevision), v...)) - }) + return predicate.RepoCommit(sql.FieldNotIn(FieldRevision, vs...)) } // RevisionGT applies the GT predicate on the "revision" field. func RevisionGT(v int) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldRevision), v)) - }) + return predicate.RepoCommit(sql.FieldGT(FieldRevision, v)) } // RevisionGTE applies the GTE predicate on the "revision" field. func RevisionGTE(v int) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldRevision), v)) - }) + return predicate.RepoCommit(sql.FieldGTE(FieldRevision, v)) } // RevisionLT applies the LT predicate on the "revision" field. func RevisionLT(v int) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldRevision), v)) - }) + return predicate.RepoCommit(sql.FieldLT(FieldRevision, v)) } // RevisionLTE applies the LTE predicate on the "revision" field. func RevisionLTE(v int) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldRevision), v)) - }) + return predicate.RepoCommit(sql.FieldLTE(FieldRevision, v)) } // HashEQ applies the EQ predicate on the "hash" field. func HashEQ(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldHash), v)) - }) + return predicate.RepoCommit(sql.FieldEQ(FieldHash, v)) } // HashNEQ applies the NEQ predicate on the "hash" field. func HashNEQ(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldHash), v)) - }) + return predicate.RepoCommit(sql.FieldNEQ(FieldHash, v)) } // HashIn applies the In predicate on the "hash" field. func HashIn(vs ...string) predicate.RepoCommit { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.RepoCommit(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldHash), v...)) - }) + return predicate.RepoCommit(sql.FieldIn(FieldHash, vs...)) } // HashNotIn applies the NotIn predicate on the "hash" field. func HashNotIn(vs ...string) predicate.RepoCommit { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.RepoCommit(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldHash), v...)) - }) + return predicate.RepoCommit(sql.FieldNotIn(FieldHash, vs...)) } // HashGT applies the GT predicate on the "hash" field. func HashGT(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldHash), v)) - }) + return predicate.RepoCommit(sql.FieldGT(FieldHash, v)) } // HashGTE applies the GTE predicate on the "hash" field. func HashGTE(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldHash), v)) - }) + return predicate.RepoCommit(sql.FieldGTE(FieldHash, v)) } // HashLT applies the LT predicate on the "hash" field. func HashLT(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldHash), v)) - }) + return predicate.RepoCommit(sql.FieldLT(FieldHash, v)) } // HashLTE applies the LTE predicate on the "hash" field. func HashLTE(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldHash), v)) - }) + return predicate.RepoCommit(sql.FieldLTE(FieldHash, v)) } // HashContains applies the Contains predicate on the "hash" field. func HashContains(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldHash), v)) - }) + return predicate.RepoCommit(sql.FieldContains(FieldHash, v)) } // HashHasPrefix applies the HasPrefix predicate on the "hash" field. func HashHasPrefix(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldHash), v)) - }) + return predicate.RepoCommit(sql.FieldHasPrefix(FieldHash, v)) } // HashHasSuffix applies the HasSuffix predicate on the "hash" field. func HashHasSuffix(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldHash), v)) - }) + return predicate.RepoCommit(sql.FieldHasSuffix(FieldHash, v)) } // HashEqualFold applies the EqualFold predicate on the "hash" field. func HashEqualFold(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldHash), v)) - }) + return predicate.RepoCommit(sql.FieldEqualFold(FieldHash, v)) } // HashContainsFold applies the ContainsFold predicate on the "hash" field. func HashContainsFold(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldHash), v)) - }) + return predicate.RepoCommit(sql.FieldContainsFold(FieldHash, v)) } // PgpSignatureEQ applies the EQ predicate on the "pgp_signature" field. func PgpSignatureEQ(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldPgpSignature), v)) - }) + return predicate.RepoCommit(sql.FieldEQ(FieldPgpSignature, v)) } // PgpSignatureNEQ applies the NEQ predicate on the "pgp_signature" field. func PgpSignatureNEQ(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldPgpSignature), v)) - }) + return predicate.RepoCommit(sql.FieldNEQ(FieldPgpSignature, v)) } // PgpSignatureIn applies the In predicate on the "pgp_signature" field. func PgpSignatureIn(vs ...string) predicate.RepoCommit { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.RepoCommit(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldPgpSignature), v...)) - }) + return predicate.RepoCommit(sql.FieldIn(FieldPgpSignature, vs...)) } // PgpSignatureNotIn applies the NotIn predicate on the "pgp_signature" field. func PgpSignatureNotIn(vs ...string) predicate.RepoCommit { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.RepoCommit(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldPgpSignature), v...)) - }) + return predicate.RepoCommit(sql.FieldNotIn(FieldPgpSignature, vs...)) } // PgpSignatureGT applies the GT predicate on the "pgp_signature" field. func PgpSignatureGT(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldPgpSignature), v)) - }) + return predicate.RepoCommit(sql.FieldGT(FieldPgpSignature, v)) } // PgpSignatureGTE applies the GTE predicate on the "pgp_signature" field. func PgpSignatureGTE(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldPgpSignature), v)) - }) + return predicate.RepoCommit(sql.FieldGTE(FieldPgpSignature, v)) } // PgpSignatureLT applies the LT predicate on the "pgp_signature" field. func PgpSignatureLT(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldPgpSignature), v)) - }) + return predicate.RepoCommit(sql.FieldLT(FieldPgpSignature, v)) } // PgpSignatureLTE applies the LTE predicate on the "pgp_signature" field. func PgpSignatureLTE(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldPgpSignature), v)) - }) + return predicate.RepoCommit(sql.FieldLTE(FieldPgpSignature, v)) } // PgpSignatureContains applies the Contains predicate on the "pgp_signature" field. func PgpSignatureContains(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldPgpSignature), v)) - }) + return predicate.RepoCommit(sql.FieldContains(FieldPgpSignature, v)) } // PgpSignatureHasPrefix applies the HasPrefix predicate on the "pgp_signature" field. func PgpSignatureHasPrefix(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldPgpSignature), v)) - }) + return predicate.RepoCommit(sql.FieldHasPrefix(FieldPgpSignature, v)) } // PgpSignatureHasSuffix applies the HasSuffix predicate on the "pgp_signature" field. func PgpSignatureHasSuffix(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldPgpSignature), v)) - }) + return predicate.RepoCommit(sql.FieldHasSuffix(FieldPgpSignature, v)) } // PgpSignatureEqualFold applies the EqualFold predicate on the "pgp_signature" field. func PgpSignatureEqualFold(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldPgpSignature), v)) - }) + return predicate.RepoCommit(sql.FieldEqualFold(FieldPgpSignature, v)) } // PgpSignatureContainsFold applies the ContainsFold predicate on the "pgp_signature" field. func PgpSignatureContainsFold(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldPgpSignature), v)) - }) + return predicate.RepoCommit(sql.FieldContainsFold(FieldPgpSignature, v)) } // MessageEQ applies the EQ predicate on the "message" field. func MessageEQ(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldMessage), v)) - }) + return predicate.RepoCommit(sql.FieldEQ(FieldMessage, v)) } // MessageNEQ applies the NEQ predicate on the "message" field. func MessageNEQ(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldMessage), v)) - }) + return predicate.RepoCommit(sql.FieldNEQ(FieldMessage, v)) } // MessageIn applies the In predicate on the "message" field. func MessageIn(vs ...string) predicate.RepoCommit { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.RepoCommit(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldMessage), v...)) - }) + return predicate.RepoCommit(sql.FieldIn(FieldMessage, vs...)) } // MessageNotIn applies the NotIn predicate on the "message" field. func MessageNotIn(vs ...string) predicate.RepoCommit { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.RepoCommit(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldMessage), v...)) - }) + return predicate.RepoCommit(sql.FieldNotIn(FieldMessage, vs...)) } // MessageGT applies the GT predicate on the "message" field. func MessageGT(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldMessage), v)) - }) + return predicate.RepoCommit(sql.FieldGT(FieldMessage, v)) } // MessageGTE applies the GTE predicate on the "message" field. func MessageGTE(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldMessage), v)) - }) + return predicate.RepoCommit(sql.FieldGTE(FieldMessage, v)) } // MessageLT applies the LT predicate on the "message" field. func MessageLT(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldMessage), v)) - }) + return predicate.RepoCommit(sql.FieldLT(FieldMessage, v)) } // MessageLTE applies the LTE predicate on the "message" field. func MessageLTE(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldMessage), v)) - }) + return predicate.RepoCommit(sql.FieldLTE(FieldMessage, v)) } // MessageContains applies the Contains predicate on the "message" field. func MessageContains(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldMessage), v)) - }) + return predicate.RepoCommit(sql.FieldContains(FieldMessage, v)) } // MessageHasPrefix applies the HasPrefix predicate on the "message" field. func MessageHasPrefix(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldMessage), v)) - }) + return predicate.RepoCommit(sql.FieldHasPrefix(FieldMessage, v)) } // MessageHasSuffix applies the HasSuffix predicate on the "message" field. func MessageHasSuffix(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldMessage), v)) - }) + return predicate.RepoCommit(sql.FieldHasSuffix(FieldMessage, v)) } // MessageEqualFold applies the EqualFold predicate on the "message" field. func MessageEqualFold(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldMessage), v)) - }) + return predicate.RepoCommit(sql.FieldEqualFold(FieldMessage, v)) } // MessageContainsFold applies the ContainsFold predicate on the "message" field. func MessageContainsFold(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldMessage), v)) - }) + return predicate.RepoCommit(sql.FieldContainsFold(FieldMessage, v)) } // TreeHashEQ applies the EQ predicate on the "tree_hash" field. func TreeHashEQ(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldTreeHash), v)) - }) + return predicate.RepoCommit(sql.FieldEQ(FieldTreeHash, v)) } // TreeHashNEQ applies the NEQ predicate on the "tree_hash" field. func TreeHashNEQ(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldTreeHash), v)) - }) + return predicate.RepoCommit(sql.FieldNEQ(FieldTreeHash, v)) } // TreeHashIn applies the In predicate on the "tree_hash" field. func TreeHashIn(vs ...string) predicate.RepoCommit { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.RepoCommit(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldTreeHash), v...)) - }) + return predicate.RepoCommit(sql.FieldIn(FieldTreeHash, vs...)) } // TreeHashNotIn applies the NotIn predicate on the "tree_hash" field. func TreeHashNotIn(vs ...string) predicate.RepoCommit { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.RepoCommit(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldTreeHash), v...)) - }) + return predicate.RepoCommit(sql.FieldNotIn(FieldTreeHash, vs...)) } // TreeHashGT applies the GT predicate on the "tree_hash" field. func TreeHashGT(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldTreeHash), v)) - }) + return predicate.RepoCommit(sql.FieldGT(FieldTreeHash, v)) } // TreeHashGTE applies the GTE predicate on the "tree_hash" field. func TreeHashGTE(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldTreeHash), v)) - }) + return predicate.RepoCommit(sql.FieldGTE(FieldTreeHash, v)) } // TreeHashLT applies the LT predicate on the "tree_hash" field. func TreeHashLT(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldTreeHash), v)) - }) + return predicate.RepoCommit(sql.FieldLT(FieldTreeHash, v)) } // TreeHashLTE applies the LTE predicate on the "tree_hash" field. func TreeHashLTE(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldTreeHash), v)) - }) + return predicate.RepoCommit(sql.FieldLTE(FieldTreeHash, v)) } // TreeHashContains applies the Contains predicate on the "tree_hash" field. func TreeHashContains(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldTreeHash), v)) - }) + return predicate.RepoCommit(sql.FieldContains(FieldTreeHash, v)) } // TreeHashHasPrefix applies the HasPrefix predicate on the "tree_hash" field. func TreeHashHasPrefix(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldTreeHash), v)) - }) + return predicate.RepoCommit(sql.FieldHasPrefix(FieldTreeHash, v)) } // TreeHashHasSuffix applies the HasSuffix predicate on the "tree_hash" field. func TreeHashHasSuffix(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldTreeHash), v)) - }) + return predicate.RepoCommit(sql.FieldHasSuffix(FieldTreeHash, v)) } // TreeHashEqualFold applies the EqualFold predicate on the "tree_hash" field. func TreeHashEqualFold(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldTreeHash), v)) - }) + return predicate.RepoCommit(sql.FieldEqualFold(FieldTreeHash, v)) } // TreeHashContainsFold applies the ContainsFold predicate on the "tree_hash" field. func TreeHashContainsFold(v string) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldTreeHash), v)) - }) + return predicate.RepoCommit(sql.FieldContainsFold(FieldTreeHash, v)) } // HasRepoCommitToRepository applies the HasEdge predicate on the "RepoCommitToRepository" edge. @@ -652,7 +384,6 @@ func HasRepoCommitToRepository() predicate.RepoCommit { return predicate.RepoCommit(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(RepoCommitToRepositoryTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, true, RepoCommitToRepositoryTable, RepoCommitToRepositoryColumn), ) sqlgraph.HasNeighbors(s, step) @@ -662,11 +393,7 @@ func HasRepoCommitToRepository() predicate.RepoCommit { // HasRepoCommitToRepositoryWith applies the HasEdge predicate on the "RepoCommitToRepository" edge with a given conditions (other predicates). func HasRepoCommitToRepositoryWith(preds ...predicate.Repository) predicate.RepoCommit { return predicate.RepoCommit(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(RepoCommitToRepositoryInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, RepoCommitToRepositoryTable, RepoCommitToRepositoryColumn), - ) + step := newRepoCommitToRepositoryStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -677,32 +404,15 @@ func HasRepoCommitToRepositoryWith(preds ...predicate.Repository) predicate.Repo // And groups predicates with the AND operator between them. func And(predicates ...predicate.RepoCommit) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.RepoCommit(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.RepoCommit) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.RepoCommit(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.RepoCommit) predicate.RepoCommit { - return predicate.RepoCommit(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.RepoCommit(sql.NotPredicates(p)) } diff --git a/ent/repocommit_create.go b/ent/repocommit_create.go index fbe2ded7..28eb42c1 100755 --- a/ent/repocommit_create.go +++ b/ent/repocommit_create.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -110,44 +110,8 @@ func (rcc *RepoCommitCreate) Mutation() *RepoCommitMutation { // Save creates the RepoCommit in the database. func (rcc *RepoCommitCreate) Save(ctx context.Context) (*RepoCommit, error) { - var ( - err error - node *RepoCommit - ) rcc.defaults() - if len(rcc.hooks) == 0 { - if err = rcc.check(); err != nil { - return nil, err - } - node, err = rcc.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*RepoCommitMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = rcc.check(); err != nil { - return nil, err - } - rcc.mutation = mutation - if node, err = rcc.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(rcc.hooks) - 1; i >= 0; i-- { - if rcc.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = rcc.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, rcc.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, rcc.sqlSave, rcc.mutation, rcc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -210,10 +174,13 @@ func (rcc *RepoCommitCreate) check() error { } func (rcc *RepoCommitCreate) sqlSave(ctx context.Context) (*RepoCommit, error) { + if err := rcc.check(); err != nil { + return nil, err + } _node, _spec := rcc.createSpec() if err := sqlgraph.CreateNode(ctx, rcc.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } @@ -224,86 +191,50 @@ func (rcc *RepoCommitCreate) sqlSave(ctx context.Context) (*RepoCommit, error) { return nil, err } } + rcc.mutation.id = &_node.ID + rcc.mutation.done = true return _node, nil } func (rcc *RepoCommitCreate) createSpec() (*RepoCommit, *sqlgraph.CreateSpec) { var ( _node = &RepoCommit{config: rcc.config} - _spec = &sqlgraph.CreateSpec{ - Table: repocommit.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: repocommit.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(repocommit.Table, sqlgraph.NewFieldSpec(repocommit.FieldID, field.TypeUUID)) ) if id, ok := rcc.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } if value, ok := rcc.mutation.Revision(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: repocommit.FieldRevision, - }) + _spec.SetField(repocommit.FieldRevision, field.TypeInt, value) _node.Revision = value } if value, ok := rcc.mutation.Hash(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: repocommit.FieldHash, - }) + _spec.SetField(repocommit.FieldHash, field.TypeString, value) _node.Hash = value } if value, ok := rcc.mutation.Author(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: repocommit.FieldAuthor, - }) + _spec.SetField(repocommit.FieldAuthor, field.TypeJSON, value) _node.Author = value } if value, ok := rcc.mutation.Committer(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: repocommit.FieldCommitter, - }) + _spec.SetField(repocommit.FieldCommitter, field.TypeJSON, value) _node.Committer = value } if value, ok := rcc.mutation.PgpSignature(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: repocommit.FieldPgpSignature, - }) + _spec.SetField(repocommit.FieldPgpSignature, field.TypeString, value) _node.PgpSignature = value } if value, ok := rcc.mutation.Message(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: repocommit.FieldMessage, - }) + _spec.SetField(repocommit.FieldMessage, field.TypeString, value) _node.Message = value } if value, ok := rcc.mutation.TreeHash(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: repocommit.FieldTreeHash, - }) + _spec.SetField(repocommit.FieldTreeHash, field.TypeString, value) _node.TreeHash = value } if value, ok := rcc.mutation.ParentHashes(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: repocommit.FieldParentHashes, - }) + _spec.SetField(repocommit.FieldParentHashes, field.TypeJSON, value) _node.ParentHashes = value } if nodes := rcc.mutation.RepoCommitToRepositoryIDs(); len(nodes) > 0 { @@ -314,10 +245,7 @@ func (rcc *RepoCommitCreate) createSpec() (*RepoCommit, *sqlgraph.CreateSpec) { Columns: []string{repocommit.RepoCommitToRepositoryColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: repository.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(repository.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -332,11 +260,15 @@ func (rcc *RepoCommitCreate) createSpec() (*RepoCommit, *sqlgraph.CreateSpec) { // RepoCommitCreateBulk is the builder for creating many RepoCommit entities in bulk. type RepoCommitCreateBulk struct { config + err error builders []*RepoCommitCreate } // Save creates the RepoCommit entities in the database. func (rccb *RepoCommitCreateBulk) Save(ctx context.Context) ([]*RepoCommit, error) { + if rccb.err != nil { + return nil, rccb.err + } specs := make([]*sqlgraph.CreateSpec, len(rccb.builders)) nodes := make([]*RepoCommit, len(rccb.builders)) mutators := make([]Mutator, len(rccb.builders)) @@ -353,8 +285,8 @@ func (rccb *RepoCommitCreateBulk) Save(ctx context.Context) ([]*RepoCommit, erro return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, rccb.builders[i+1].mutation) } else { @@ -362,7 +294,7 @@ func (rccb *RepoCommitCreateBulk) Save(ctx context.Context) ([]*RepoCommit, erro // Invoke the actual operation on the latest mutation in the chain. if err = sqlgraph.BatchCreate(ctx, rccb.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } } } diff --git a/ent/repocommit_delete.go b/ent/repocommit_delete.go index 49838958..84f4fe2c 100755 --- a/ent/repocommit_delete.go +++ b/ent/repocommit_delete.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (rcd *RepoCommitDelete) Where(ps ...predicate.RepoCommit) *RepoCommitDelete // Exec executes the deletion query and returns how many vertices were deleted. func (rcd *RepoCommitDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(rcd.hooks) == 0 { - affected, err = rcd.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*RepoCommitMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - rcd.mutation = mutation - affected, err = rcd.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(rcd.hooks) - 1; i >= 0; i-- { - if rcd.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = rcd.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, rcd.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, rcd.sqlExec, rcd.mutation, rcd.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (rcd *RepoCommitDelete) ExecX(ctx context.Context) int { } func (rcd *RepoCommitDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: repocommit.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: repocommit.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(repocommit.Table, sqlgraph.NewFieldSpec(repocommit.FieldID, field.TypeUUID)) if ps := rcd.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -84,7 +48,12 @@ func (rcd *RepoCommitDelete) sqlExec(ctx context.Context) (int, error) { } } } - return sqlgraph.DeleteNodes(ctx, rcd.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, rcd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + rcd.mutation.done = true + return affected, err } // RepoCommitDeleteOne is the builder for deleting a single RepoCommit entity. @@ -92,6 +61,12 @@ type RepoCommitDeleteOne struct { rcd *RepoCommitDelete } +// Where appends a list predicates to the RepoCommitDelete builder. +func (rcdo *RepoCommitDeleteOne) Where(ps ...predicate.RepoCommit) *RepoCommitDeleteOne { + rcdo.rcd.mutation.Where(ps...) + return rcdo +} + // Exec executes the deletion query. func (rcdo *RepoCommitDeleteOne) Exec(ctx context.Context) error { n, err := rcdo.rcd.Exec(ctx) @@ -107,5 +82,7 @@ func (rcdo *RepoCommitDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (rcdo *RepoCommitDeleteOne) ExecX(ctx context.Context) { - rcdo.rcd.ExecX(ctx) + if err := rcdo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/ent/repocommit_query.go b/ent/repocommit_query.go index 99922485..24c76f74 100755 --- a/ent/repocommit_query.go +++ b/ent/repocommit_query.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "errors" "fmt" "math" @@ -20,15 +19,14 @@ import ( // RepoCommitQuery is the builder for querying RepoCommit entities. type RepoCommitQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string - predicates []predicate.RepoCommit - // eager-loading edges. + ctx *QueryContext + order []repocommit.OrderOption + inters []Interceptor + predicates []predicate.RepoCommit withRepoCommitToRepository *RepositoryQuery withFKs bool + modifiers []func(*sql.Selector) + loadTotal []func(context.Context, []*RepoCommit) error // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -40,34 +38,34 @@ func (rcq *RepoCommitQuery) Where(ps ...predicate.RepoCommit) *RepoCommitQuery { return rcq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (rcq *RepoCommitQuery) Limit(limit int) *RepoCommitQuery { - rcq.limit = &limit + rcq.ctx.Limit = &limit return rcq } -// Offset adds an offset step to the query. +// Offset to start from. func (rcq *RepoCommitQuery) Offset(offset int) *RepoCommitQuery { - rcq.offset = &offset + rcq.ctx.Offset = &offset return rcq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (rcq *RepoCommitQuery) Unique(unique bool) *RepoCommitQuery { - rcq.unique = &unique + rcq.ctx.Unique = &unique return rcq } -// Order adds an order step to the query. -func (rcq *RepoCommitQuery) Order(o ...OrderFunc) *RepoCommitQuery { +// Order specifies how the records should be ordered. +func (rcq *RepoCommitQuery) Order(o ...repocommit.OrderOption) *RepoCommitQuery { rcq.order = append(rcq.order, o...) return rcq } // QueryRepoCommitToRepository chains the current query on the "RepoCommitToRepository" edge. func (rcq *RepoCommitQuery) QueryRepoCommitToRepository() *RepositoryQuery { - query := &RepositoryQuery{config: rcq.config} + query := (&RepositoryClient{config: rcq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := rcq.prepareQuery(ctx); err != nil { return nil, err @@ -90,7 +88,7 @@ func (rcq *RepoCommitQuery) QueryRepoCommitToRepository() *RepositoryQuery { // First returns the first RepoCommit entity from the query. // Returns a *NotFoundError when no RepoCommit was found. func (rcq *RepoCommitQuery) First(ctx context.Context) (*RepoCommit, error) { - nodes, err := rcq.Limit(1).All(ctx) + nodes, err := rcq.Limit(1).All(setContextOp(ctx, rcq.ctx, "First")) if err != nil { return nil, err } @@ -113,7 +111,7 @@ func (rcq *RepoCommitQuery) FirstX(ctx context.Context) *RepoCommit { // Returns a *NotFoundError when no RepoCommit ID was found. func (rcq *RepoCommitQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = rcq.Limit(1).IDs(ctx); err != nil { + if ids, err = rcq.Limit(1).IDs(setContextOp(ctx, rcq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -136,7 +134,7 @@ func (rcq *RepoCommitQuery) FirstIDX(ctx context.Context) uuid.UUID { // Returns a *NotSingularError when more than one RepoCommit entity is found. // Returns a *NotFoundError when no RepoCommit entities are found. func (rcq *RepoCommitQuery) Only(ctx context.Context) (*RepoCommit, error) { - nodes, err := rcq.Limit(2).All(ctx) + nodes, err := rcq.Limit(2).All(setContextOp(ctx, rcq.ctx, "Only")) if err != nil { return nil, err } @@ -164,7 +162,7 @@ func (rcq *RepoCommitQuery) OnlyX(ctx context.Context) *RepoCommit { // Returns a *NotFoundError when no entities are found. func (rcq *RepoCommitQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = rcq.Limit(2).IDs(ctx); err != nil { + if ids, err = rcq.Limit(2).IDs(setContextOp(ctx, rcq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -189,10 +187,12 @@ func (rcq *RepoCommitQuery) OnlyIDX(ctx context.Context) uuid.UUID { // All executes the query and returns a list of RepoCommits. func (rcq *RepoCommitQuery) All(ctx context.Context) ([]*RepoCommit, error) { + ctx = setContextOp(ctx, rcq.ctx, "All") if err := rcq.prepareQuery(ctx); err != nil { return nil, err } - return rcq.sqlAll(ctx) + qr := querierAll[[]*RepoCommit, *RepoCommitQuery]() + return withInterceptors[[]*RepoCommit](ctx, rcq, qr, rcq.inters) } // AllX is like All, but panics if an error occurs. @@ -205,9 +205,12 @@ func (rcq *RepoCommitQuery) AllX(ctx context.Context) []*RepoCommit { } // IDs executes the query and returns a list of RepoCommit IDs. -func (rcq *RepoCommitQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { - var ids []uuid.UUID - if err := rcq.Select(repocommit.FieldID).Scan(ctx, &ids); err != nil { +func (rcq *RepoCommitQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if rcq.ctx.Unique == nil && rcq.path != nil { + rcq.Unique(true) + } + ctx = setContextOp(ctx, rcq.ctx, "IDs") + if err = rcq.Select(repocommit.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -224,10 +227,11 @@ func (rcq *RepoCommitQuery) IDsX(ctx context.Context) []uuid.UUID { // Count returns the count of the given query. func (rcq *RepoCommitQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, rcq.ctx, "Count") if err := rcq.prepareQuery(ctx); err != nil { return 0, err } - return rcq.sqlCount(ctx) + return withInterceptors[int](ctx, rcq, querierCount[*RepoCommitQuery](), rcq.inters) } // CountX is like Count, but panics if an error occurs. @@ -241,10 +245,15 @@ func (rcq *RepoCommitQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (rcq *RepoCommitQuery) Exist(ctx context.Context) (bool, error) { - if err := rcq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, rcq.ctx, "Exist") + switch _, err := rcq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return rcq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -264,22 +273,21 @@ func (rcq *RepoCommitQuery) Clone() *RepoCommitQuery { } return &RepoCommitQuery{ config: rcq.config, - limit: rcq.limit, - offset: rcq.offset, - order: append([]OrderFunc{}, rcq.order...), + ctx: rcq.ctx.Clone(), + order: append([]repocommit.OrderOption{}, rcq.order...), + inters: append([]Interceptor{}, rcq.inters...), predicates: append([]predicate.RepoCommit{}, rcq.predicates...), withRepoCommitToRepository: rcq.withRepoCommitToRepository.Clone(), // clone intermediate query. - sql: rcq.sql.Clone(), - path: rcq.path, - unique: rcq.unique, + sql: rcq.sql.Clone(), + path: rcq.path, } } // WithRepoCommitToRepository tells the query-builder to eager-load the nodes that are connected to // the "RepoCommitToRepository" edge. The optional arguments are used to configure the query builder of the edge. func (rcq *RepoCommitQuery) WithRepoCommitToRepository(opts ...func(*RepositoryQuery)) *RepoCommitQuery { - query := &RepositoryQuery{config: rcq.config} + query := (&RepositoryClient{config: rcq.config}).Query() for _, opt := range opts { opt(query) } @@ -301,17 +309,13 @@ func (rcq *RepoCommitQuery) WithRepoCommitToRepository(opts ...func(*RepositoryQ // GroupBy(repocommit.FieldRevision). // Aggregate(ent.Count()). // Scan(ctx, &v) -// func (rcq *RepoCommitQuery) GroupBy(field string, fields ...string) *RepoCommitGroupBy { - group := &RepoCommitGroupBy{config: rcq.config} - group.fields = append([]string{field}, fields...) - group.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := rcq.prepareQuery(ctx); err != nil { - return nil, err - } - return rcq.sqlQuery(ctx), nil - } - return group + rcq.ctx.Fields = append([]string{field}, fields...) + grbuild := &RepoCommitGroupBy{build: rcq} + grbuild.flds = &rcq.ctx.Fields + grbuild.label = repocommit.Label + grbuild.scan = grbuild.Scan + return grbuild } // Select allows the selection one or more fields/columns for the given query, @@ -326,14 +330,31 @@ func (rcq *RepoCommitQuery) GroupBy(field string, fields ...string) *RepoCommitG // client.RepoCommit.Query(). // Select(repocommit.FieldRevision). // Scan(ctx, &v) -// func (rcq *RepoCommitQuery) Select(fields ...string) *RepoCommitSelect { - rcq.fields = append(rcq.fields, fields...) - return &RepoCommitSelect{RepoCommitQuery: rcq} + rcq.ctx.Fields = append(rcq.ctx.Fields, fields...) + sbuild := &RepoCommitSelect{RepoCommitQuery: rcq} + sbuild.label = repocommit.Label + sbuild.flds, sbuild.scan = &rcq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a RepoCommitSelect configured with the given aggregations. +func (rcq *RepoCommitQuery) Aggregate(fns ...AggregateFunc) *RepoCommitSelect { + return rcq.Select().Aggregate(fns...) } func (rcq *RepoCommitQuery) prepareQuery(ctx context.Context) error { - for _, f := range rcq.fields { + for _, inter := range rcq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, rcq); err != nil { + return err + } + } + } + for _, f := range rcq.ctx.Fields { if !repocommit.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -348,7 +369,7 @@ func (rcq *RepoCommitQuery) prepareQuery(ctx context.Context) error { return nil } -func (rcq *RepoCommitQuery) sqlAll(ctx context.Context) ([]*RepoCommit, error) { +func (rcq *RepoCommitQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*RepoCommit, error) { var ( nodes = []*RepoCommit{} withFKs = rcq.withFKs @@ -363,92 +384,95 @@ func (rcq *RepoCommitQuery) sqlAll(ctx context.Context) ([]*RepoCommit, error) { if withFKs { _spec.Node.Columns = append(_spec.Node.Columns, repocommit.ForeignKeys...) } - _spec.ScanValues = func(columns []string) ([]interface{}, error) { + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*RepoCommit).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { node := &RepoCommit{config: rcq.config} nodes = append(nodes, node) - return node.scanValues(columns) - } - _spec.Assign = func(columns []string, values []interface{}) error { - if len(nodes) == 0 { - return fmt.Errorf("ent: Assign called without calling ScanValues") - } - node := nodes[len(nodes)-1] node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(rcq.modifiers) > 0 { + _spec.Modifiers = rcq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } if err := sqlgraph.QueryNodes(ctx, rcq.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := rcq.withRepoCommitToRepository; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*RepoCommit) - for i := range nodes { - if nodes[i].repository_repository_to_repo_commit == nil { - continue - } - fk := *nodes[i].repository_repository_to_repo_commit - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) - } - query.Where(repository.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + if err := rcq.loadRepoCommitToRepository(ctx, query, nodes, nil, + func(n *RepoCommit, e *Repository) { n.Edges.RepoCommitToRepository = e }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "repository_repository_to_repo_commit" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.RepoCommitToRepository = n - } + } + for i := range rcq.loadTotal { + if err := rcq.loadTotal[i](ctx, nodes); err != nil { + return nil, err } } - return nodes, nil } -func (rcq *RepoCommitQuery) sqlCount(ctx context.Context) (int, error) { - _spec := rcq.querySpec() - _spec.Node.Columns = rcq.fields - if len(rcq.fields) > 0 { - _spec.Unique = rcq.unique != nil && *rcq.unique +func (rcq *RepoCommitQuery) loadRepoCommitToRepository(ctx context.Context, query *RepositoryQuery, nodes []*RepoCommit, init func(*RepoCommit), assign func(*RepoCommit, *Repository)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*RepoCommit) + for i := range nodes { + if nodes[i].repository_repository_to_repo_commit == nil { + continue + } + fk := *nodes[i].repository_repository_to_repo_commit + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) } - return sqlgraph.CountNodes(ctx, rcq.driver, _spec) + if len(ids) == 0 { + return nil + } + query.Where(repository.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "repository_repository_to_repo_commit" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil } -func (rcq *RepoCommitQuery) sqlExist(ctx context.Context) (bool, error) { - n, err := rcq.sqlCount(ctx) - if err != nil { - return false, fmt.Errorf("ent: check existence: %w", err) +func (rcq *RepoCommitQuery) sqlCount(ctx context.Context) (int, error) { + _spec := rcq.querySpec() + if len(rcq.modifiers) > 0 { + _spec.Modifiers = rcq.modifiers } - return n > 0, nil + _spec.Node.Columns = rcq.ctx.Fields + if len(rcq.ctx.Fields) > 0 { + _spec.Unique = rcq.ctx.Unique != nil && *rcq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, rcq.driver, _spec) } func (rcq *RepoCommitQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: repocommit.Table, - Columns: repocommit.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: repocommit.FieldID, - }, - }, - From: rcq.sql, - Unique: true, - } - if unique := rcq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(repocommit.Table, repocommit.Columns, sqlgraph.NewFieldSpec(repocommit.FieldID, field.TypeUUID)) + _spec.From = rcq.sql + if unique := rcq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if rcq.path != nil { + _spec.Unique = true } - if fields := rcq.fields; len(fields) > 0 { + if fields := rcq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, repocommit.FieldID) for i := range fields { @@ -464,10 +488,10 @@ func (rcq *RepoCommitQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := rcq.limit; limit != nil { + if limit := rcq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := rcq.offset; offset != nil { + if offset := rcq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := rcq.order; len(ps) > 0 { @@ -483,7 +507,7 @@ func (rcq *RepoCommitQuery) querySpec() *sqlgraph.QuerySpec { func (rcq *RepoCommitQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(rcq.driver.Dialect()) t1 := builder.Table(repocommit.Table) - columns := rcq.fields + columns := rcq.ctx.Fields if len(columns) == 0 { columns = repocommit.Columns } @@ -492,7 +516,7 @@ func (rcq *RepoCommitQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = rcq.sql selector.Select(selector.Columns(columns...)...) } - if rcq.unique != nil && *rcq.unique { + if rcq.ctx.Unique != nil && *rcq.ctx.Unique { selector.Distinct() } for _, p := range rcq.predicates { @@ -501,12 +525,12 @@ func (rcq *RepoCommitQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range rcq.order { p(selector) } - if offset := rcq.offset; offset != nil { + if offset := rcq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := rcq.limit; limit != nil { + if limit := rcq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -514,12 +538,8 @@ func (rcq *RepoCommitQuery) sqlQuery(ctx context.Context) *sql.Selector { // RepoCommitGroupBy is the group-by builder for RepoCommit entities. type RepoCommitGroupBy struct { - config - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) + selector + build *RepoCommitQuery } // Aggregate adds the given aggregation functions to the group-by query. @@ -528,471 +548,77 @@ func (rcgb *RepoCommitGroupBy) Aggregate(fns ...AggregateFunc) *RepoCommitGroupB return rcgb } -// Scan applies the group-by query and scans the result into the given value. -func (rcgb *RepoCommitGroupBy) Scan(ctx context.Context, v interface{}) error { - query, err := rcgb.path(ctx) - if err != nil { +// Scan applies the selector query and scans the result into the given value. +func (rcgb *RepoCommitGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, rcgb.build.ctx, "GroupBy") + if err := rcgb.build.prepareQuery(ctx); err != nil { return err } - rcgb.sql = query - return rcgb.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (rcgb *RepoCommitGroupBy) ScanX(ctx context.Context, v interface{}) { - if err := rcgb.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from group-by. -// It is only allowed when executing a group-by query with one field. -func (rcgb *RepoCommitGroupBy) Strings(ctx context.Context) ([]string, error) { - if len(rcgb.fields) > 1 { - return nil, errors.New("ent: RepoCommitGroupBy.Strings is not achievable when grouping more than 1 field") - } - var v []string - if err := rcgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (rcgb *RepoCommitGroupBy) StringsX(ctx context.Context) []string { - v, err := rcgb.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (rcgb *RepoCommitGroupBy) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = rcgb.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{repocommit.Label} - default: - err = fmt.Errorf("ent: RepoCommitGroupBy.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (rcgb *RepoCommitGroupBy) StringX(ctx context.Context) string { - v, err := rcgb.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from group-by. -// It is only allowed when executing a group-by query with one field. -func (rcgb *RepoCommitGroupBy) Ints(ctx context.Context) ([]int, error) { - if len(rcgb.fields) > 1 { - return nil, errors.New("ent: RepoCommitGroupBy.Ints is not achievable when grouping more than 1 field") - } - var v []int - if err := rcgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (rcgb *RepoCommitGroupBy) IntsX(ctx context.Context) []int { - v, err := rcgb.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (rcgb *RepoCommitGroupBy) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = rcgb.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{repocommit.Label} - default: - err = fmt.Errorf("ent: RepoCommitGroupBy.Ints returned %d results when one was expected", len(v)) - } - return + return scanWithInterceptors[*RepoCommitQuery, *RepoCommitGroupBy](ctx, rcgb.build, rcgb, rcgb.build.inters, v) } -// IntX is like Int, but panics if an error occurs. -func (rcgb *RepoCommitGroupBy) IntX(ctx context.Context) int { - v, err := rcgb.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from group-by. -// It is only allowed when executing a group-by query with one field. -func (rcgb *RepoCommitGroupBy) Float64s(ctx context.Context) ([]float64, error) { - if len(rcgb.fields) > 1 { - return nil, errors.New("ent: RepoCommitGroupBy.Float64s is not achievable when grouping more than 1 field") - } - var v []float64 - if err := rcgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (rcgb *RepoCommitGroupBy) Float64sX(ctx context.Context) []float64 { - v, err := rcgb.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (rcgb *RepoCommitGroupBy) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = rcgb.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{repocommit.Label} - default: - err = fmt.Errorf("ent: RepoCommitGroupBy.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (rcgb *RepoCommitGroupBy) Float64X(ctx context.Context) float64 { - v, err := rcgb.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from group-by. -// It is only allowed when executing a group-by query with one field. -func (rcgb *RepoCommitGroupBy) Bools(ctx context.Context) ([]bool, error) { - if len(rcgb.fields) > 1 { - return nil, errors.New("ent: RepoCommitGroupBy.Bools is not achievable when grouping more than 1 field") - } - var v []bool - if err := rcgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (rcgb *RepoCommitGroupBy) BoolsX(ctx context.Context) []bool { - v, err := rcgb.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (rcgb *RepoCommitGroupBy) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = rcgb.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{repocommit.Label} - default: - err = fmt.Errorf("ent: RepoCommitGroupBy.Bools returned %d results when one was expected", len(v)) - } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (rcgb *RepoCommitGroupBy) BoolX(ctx context.Context) bool { - v, err := rcgb.Bool(ctx) - if err != nil { - panic(err) +func (rcgb *RepoCommitGroupBy) sqlScan(ctx context.Context, root *RepoCommitQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(rcgb.fns)) + for _, fn := range rcgb.fns { + aggregation = append(aggregation, fn(selector)) } - return v -} - -func (rcgb *RepoCommitGroupBy) sqlScan(ctx context.Context, v interface{}) error { - for _, f := range rcgb.fields { - if !repocommit.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*rcgb.flds)+len(rcgb.fns)) + for _, f := range *rcgb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := rcgb.sqlQuery() + selector.GroupBy(selector.Columns(*rcgb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := rcgb.driver.Query(ctx, query, args, rows); err != nil { + if err := rcgb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (rcgb *RepoCommitGroupBy) sqlQuery() *sql.Selector { - selector := rcgb.sql.Select() - aggregation := make([]string, 0, len(rcgb.fns)) - for _, fn := range rcgb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(rcgb.fields)+len(rcgb.fns)) - for _, f := range rcgb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(rcgb.fields...)...) -} - // RepoCommitSelect is the builder for selecting fields of RepoCommit entities. type RepoCommitSelect struct { *RepoCommitQuery - // intermediate query (i.e. traversal path). - sql *sql.Selector + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (rcs *RepoCommitSelect) Aggregate(fns ...AggregateFunc) *RepoCommitSelect { + rcs.fns = append(rcs.fns, fns...) + return rcs } // Scan applies the selector query and scans the result into the given value. -func (rcs *RepoCommitSelect) Scan(ctx context.Context, v interface{}) error { +func (rcs *RepoCommitSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, rcs.ctx, "Select") if err := rcs.prepareQuery(ctx); err != nil { return err } - rcs.sql = rcs.RepoCommitQuery.sqlQuery(ctx) - return rcs.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (rcs *RepoCommitSelect) ScanX(ctx context.Context, v interface{}) { - if err := rcs.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from a selector. It is only allowed when selecting one field. -func (rcs *RepoCommitSelect) Strings(ctx context.Context) ([]string, error) { - if len(rcs.fields) > 1 { - return nil, errors.New("ent: RepoCommitSelect.Strings is not achievable when selecting more than 1 field") - } - var v []string - if err := rcs.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (rcs *RepoCommitSelect) StringsX(ctx context.Context) []string { - v, err := rcs.Strings(ctx) - if err != nil { - panic(err) - } - return v + return scanWithInterceptors[*RepoCommitQuery, *RepoCommitSelect](ctx, rcs.RepoCommitQuery, rcs, rcs.inters, v) } -// String returns a single string from a selector. It is only allowed when selecting one field. -func (rcs *RepoCommitSelect) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = rcs.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{repocommit.Label} - default: - err = fmt.Errorf("ent: RepoCommitSelect.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (rcs *RepoCommitSelect) StringX(ctx context.Context) string { - v, err := rcs.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from a selector. It is only allowed when selecting one field. -func (rcs *RepoCommitSelect) Ints(ctx context.Context) ([]int, error) { - if len(rcs.fields) > 1 { - return nil, errors.New("ent: RepoCommitSelect.Ints is not achievable when selecting more than 1 field") - } - var v []int - if err := rcs.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (rcs *RepoCommitSelect) IntsX(ctx context.Context) []int { - v, err := rcs.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a selector. It is only allowed when selecting one field. -func (rcs *RepoCommitSelect) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = rcs.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{repocommit.Label} - default: - err = fmt.Errorf("ent: RepoCommitSelect.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (rcs *RepoCommitSelect) IntX(ctx context.Context) int { - v, err := rcs.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. -func (rcs *RepoCommitSelect) Float64s(ctx context.Context) ([]float64, error) { - if len(rcs.fields) > 1 { - return nil, errors.New("ent: RepoCommitSelect.Float64s is not achievable when selecting more than 1 field") - } - var v []float64 - if err := rcs.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (rcs *RepoCommitSelect) Float64sX(ctx context.Context) []float64 { - v, err := rcs.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. -func (rcs *RepoCommitSelect) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = rcs.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{repocommit.Label} - default: - err = fmt.Errorf("ent: RepoCommitSelect.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (rcs *RepoCommitSelect) Float64X(ctx context.Context) float64 { - v, err := rcs.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from a selector. It is only allowed when selecting one field. -func (rcs *RepoCommitSelect) Bools(ctx context.Context) ([]bool, error) { - if len(rcs.fields) > 1 { - return nil, errors.New("ent: RepoCommitSelect.Bools is not achievable when selecting more than 1 field") - } - var v []bool - if err := rcs.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (rcs *RepoCommitSelect) BoolsX(ctx context.Context) []bool { - v, err := rcs.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a selector. It is only allowed when selecting one field. -func (rcs *RepoCommitSelect) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = rcs.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{repocommit.Label} - default: - err = fmt.Errorf("ent: RepoCommitSelect.Bools returned %d results when one was expected", len(v)) +func (rcs *RepoCommitSelect) sqlScan(ctx context.Context, root *RepoCommitQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(rcs.fns)) + for _, fn := range rcs.fns { + aggregation = append(aggregation, fn(selector)) } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (rcs *RepoCommitSelect) BoolX(ctx context.Context) bool { - v, err := rcs.Bool(ctx) - if err != nil { - panic(err) + switch n := len(*rcs.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) } - return v -} - -func (rcs *RepoCommitSelect) sqlScan(ctx context.Context, v interface{}) error { rows := &sql.Rows{} - query, args := rcs.sql.Query() + query, args := selector.Query() if err := rcs.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/ent/repocommit_update.go b/ent/repocommit_update.go index cc63b3e2..473b3d76 100755 --- a/ent/repocommit_update.go +++ b/ent/repocommit_update.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -9,6 +9,7 @@ import ( "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/dialect/sql/sqljson" "entgo.io/ent/schema/field" "github.com/gen0cide/laforge/ent/predicate" "github.com/gen0cide/laforge/ent/repocommit" @@ -37,6 +38,14 @@ func (rcu *RepoCommitUpdate) SetRevision(i int) *RepoCommitUpdate { return rcu } +// SetNillableRevision sets the "revision" field if the given value is not nil. +func (rcu *RepoCommitUpdate) SetNillableRevision(i *int) *RepoCommitUpdate { + if i != nil { + rcu.SetRevision(*i) + } + return rcu +} + // AddRevision adds i to the "revision" field. func (rcu *RepoCommitUpdate) AddRevision(i int) *RepoCommitUpdate { rcu.mutation.AddRevision(i) @@ -49,42 +58,96 @@ func (rcu *RepoCommitUpdate) SetHash(s string) *RepoCommitUpdate { return rcu } +// SetNillableHash sets the "hash" field if the given value is not nil. +func (rcu *RepoCommitUpdate) SetNillableHash(s *string) *RepoCommitUpdate { + if s != nil { + rcu.SetHash(*s) + } + return rcu +} + // SetAuthor sets the "author" field. func (rcu *RepoCommitUpdate) SetAuthor(o object.Signature) *RepoCommitUpdate { rcu.mutation.SetAuthor(o) return rcu } +// SetNillableAuthor sets the "author" field if the given value is not nil. +func (rcu *RepoCommitUpdate) SetNillableAuthor(o *object.Signature) *RepoCommitUpdate { + if o != nil { + rcu.SetAuthor(*o) + } + return rcu +} + // SetCommitter sets the "committer" field. func (rcu *RepoCommitUpdate) SetCommitter(o object.Signature) *RepoCommitUpdate { rcu.mutation.SetCommitter(o) return rcu } +// SetNillableCommitter sets the "committer" field if the given value is not nil. +func (rcu *RepoCommitUpdate) SetNillableCommitter(o *object.Signature) *RepoCommitUpdate { + if o != nil { + rcu.SetCommitter(*o) + } + return rcu +} + // SetPgpSignature sets the "pgp_signature" field. func (rcu *RepoCommitUpdate) SetPgpSignature(s string) *RepoCommitUpdate { rcu.mutation.SetPgpSignature(s) return rcu } +// SetNillablePgpSignature sets the "pgp_signature" field if the given value is not nil. +func (rcu *RepoCommitUpdate) SetNillablePgpSignature(s *string) *RepoCommitUpdate { + if s != nil { + rcu.SetPgpSignature(*s) + } + return rcu +} + // SetMessage sets the "message" field. func (rcu *RepoCommitUpdate) SetMessage(s string) *RepoCommitUpdate { rcu.mutation.SetMessage(s) return rcu } +// SetNillableMessage sets the "message" field if the given value is not nil. +func (rcu *RepoCommitUpdate) SetNillableMessage(s *string) *RepoCommitUpdate { + if s != nil { + rcu.SetMessage(*s) + } + return rcu +} + // SetTreeHash sets the "tree_hash" field. func (rcu *RepoCommitUpdate) SetTreeHash(s string) *RepoCommitUpdate { rcu.mutation.SetTreeHash(s) return rcu } +// SetNillableTreeHash sets the "tree_hash" field if the given value is not nil. +func (rcu *RepoCommitUpdate) SetNillableTreeHash(s *string) *RepoCommitUpdate { + if s != nil { + rcu.SetTreeHash(*s) + } + return rcu +} + // SetParentHashes sets the "parent_hashes" field. func (rcu *RepoCommitUpdate) SetParentHashes(s []string) *RepoCommitUpdate { rcu.mutation.SetParentHashes(s) return rcu } +// AppendParentHashes appends s to the "parent_hashes" field. +func (rcu *RepoCommitUpdate) AppendParentHashes(s []string) *RepoCommitUpdate { + rcu.mutation.AppendParentHashes(s) + return rcu +} + // SetRepoCommitToRepositoryID sets the "RepoCommitToRepository" edge to the Repository entity by ID. func (rcu *RepoCommitUpdate) SetRepoCommitToRepositoryID(id uuid.UUID) *RepoCommitUpdate { rcu.mutation.SetRepoCommitToRepositoryID(id) @@ -117,34 +180,7 @@ func (rcu *RepoCommitUpdate) ClearRepoCommitToRepository() *RepoCommitUpdate { // Save executes the query and returns the number of nodes affected by the update operation. func (rcu *RepoCommitUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(rcu.hooks) == 0 { - affected, err = rcu.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*RepoCommitMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - rcu.mutation = mutation - affected, err = rcu.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(rcu.hooks) - 1; i >= 0; i-- { - if rcu.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = rcu.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, rcu.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, rcu.sqlSave, rcu.mutation, rcu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -170,16 +206,7 @@ func (rcu *RepoCommitUpdate) ExecX(ctx context.Context) { } func (rcu *RepoCommitUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: repocommit.Table, - Columns: repocommit.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: repocommit.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(repocommit.Table, repocommit.Columns, sqlgraph.NewFieldSpec(repocommit.FieldID, field.TypeUUID)) if ps := rcu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -188,66 +215,35 @@ func (rcu *RepoCommitUpdate) sqlSave(ctx context.Context) (n int, err error) { } } if value, ok := rcu.mutation.Revision(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: repocommit.FieldRevision, - }) + _spec.SetField(repocommit.FieldRevision, field.TypeInt, value) } if value, ok := rcu.mutation.AddedRevision(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: repocommit.FieldRevision, - }) + _spec.AddField(repocommit.FieldRevision, field.TypeInt, value) } if value, ok := rcu.mutation.Hash(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: repocommit.FieldHash, - }) + _spec.SetField(repocommit.FieldHash, field.TypeString, value) } if value, ok := rcu.mutation.Author(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: repocommit.FieldAuthor, - }) + _spec.SetField(repocommit.FieldAuthor, field.TypeJSON, value) } if value, ok := rcu.mutation.Committer(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: repocommit.FieldCommitter, - }) + _spec.SetField(repocommit.FieldCommitter, field.TypeJSON, value) } if value, ok := rcu.mutation.PgpSignature(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: repocommit.FieldPgpSignature, - }) + _spec.SetField(repocommit.FieldPgpSignature, field.TypeString, value) } if value, ok := rcu.mutation.Message(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: repocommit.FieldMessage, - }) + _spec.SetField(repocommit.FieldMessage, field.TypeString, value) } if value, ok := rcu.mutation.TreeHash(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: repocommit.FieldTreeHash, - }) + _spec.SetField(repocommit.FieldTreeHash, field.TypeString, value) } if value, ok := rcu.mutation.ParentHashes(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: repocommit.FieldParentHashes, + _spec.SetField(repocommit.FieldParentHashes, field.TypeJSON, value) + } + if value, ok := rcu.mutation.AppendedParentHashes(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, repocommit.FieldParentHashes, value) }) } if rcu.mutation.RepoCommitToRepositoryCleared() { @@ -258,10 +254,7 @@ func (rcu *RepoCommitUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{repocommit.RepoCommitToRepositoryColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: repository.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(repository.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -274,10 +267,7 @@ func (rcu *RepoCommitUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{repocommit.RepoCommitToRepositoryColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: repository.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(repository.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -289,10 +279,11 @@ func (rcu *RepoCommitUpdate) sqlSave(ctx context.Context) (n int, err error) { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{repocommit.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return 0, err } + rcu.mutation.done = true return n, nil } @@ -311,6 +302,14 @@ func (rcuo *RepoCommitUpdateOne) SetRevision(i int) *RepoCommitUpdateOne { return rcuo } +// SetNillableRevision sets the "revision" field if the given value is not nil. +func (rcuo *RepoCommitUpdateOne) SetNillableRevision(i *int) *RepoCommitUpdateOne { + if i != nil { + rcuo.SetRevision(*i) + } + return rcuo +} + // AddRevision adds i to the "revision" field. func (rcuo *RepoCommitUpdateOne) AddRevision(i int) *RepoCommitUpdateOne { rcuo.mutation.AddRevision(i) @@ -323,42 +322,96 @@ func (rcuo *RepoCommitUpdateOne) SetHash(s string) *RepoCommitUpdateOne { return rcuo } +// SetNillableHash sets the "hash" field if the given value is not nil. +func (rcuo *RepoCommitUpdateOne) SetNillableHash(s *string) *RepoCommitUpdateOne { + if s != nil { + rcuo.SetHash(*s) + } + return rcuo +} + // SetAuthor sets the "author" field. func (rcuo *RepoCommitUpdateOne) SetAuthor(o object.Signature) *RepoCommitUpdateOne { rcuo.mutation.SetAuthor(o) return rcuo } +// SetNillableAuthor sets the "author" field if the given value is not nil. +func (rcuo *RepoCommitUpdateOne) SetNillableAuthor(o *object.Signature) *RepoCommitUpdateOne { + if o != nil { + rcuo.SetAuthor(*o) + } + return rcuo +} + // SetCommitter sets the "committer" field. func (rcuo *RepoCommitUpdateOne) SetCommitter(o object.Signature) *RepoCommitUpdateOne { rcuo.mutation.SetCommitter(o) return rcuo } +// SetNillableCommitter sets the "committer" field if the given value is not nil. +func (rcuo *RepoCommitUpdateOne) SetNillableCommitter(o *object.Signature) *RepoCommitUpdateOne { + if o != nil { + rcuo.SetCommitter(*o) + } + return rcuo +} + // SetPgpSignature sets the "pgp_signature" field. func (rcuo *RepoCommitUpdateOne) SetPgpSignature(s string) *RepoCommitUpdateOne { rcuo.mutation.SetPgpSignature(s) return rcuo } +// SetNillablePgpSignature sets the "pgp_signature" field if the given value is not nil. +func (rcuo *RepoCommitUpdateOne) SetNillablePgpSignature(s *string) *RepoCommitUpdateOne { + if s != nil { + rcuo.SetPgpSignature(*s) + } + return rcuo +} + // SetMessage sets the "message" field. func (rcuo *RepoCommitUpdateOne) SetMessage(s string) *RepoCommitUpdateOne { rcuo.mutation.SetMessage(s) return rcuo } +// SetNillableMessage sets the "message" field if the given value is not nil. +func (rcuo *RepoCommitUpdateOne) SetNillableMessage(s *string) *RepoCommitUpdateOne { + if s != nil { + rcuo.SetMessage(*s) + } + return rcuo +} + // SetTreeHash sets the "tree_hash" field. func (rcuo *RepoCommitUpdateOne) SetTreeHash(s string) *RepoCommitUpdateOne { rcuo.mutation.SetTreeHash(s) return rcuo } +// SetNillableTreeHash sets the "tree_hash" field if the given value is not nil. +func (rcuo *RepoCommitUpdateOne) SetNillableTreeHash(s *string) *RepoCommitUpdateOne { + if s != nil { + rcuo.SetTreeHash(*s) + } + return rcuo +} + // SetParentHashes sets the "parent_hashes" field. func (rcuo *RepoCommitUpdateOne) SetParentHashes(s []string) *RepoCommitUpdateOne { rcuo.mutation.SetParentHashes(s) return rcuo } +// AppendParentHashes appends s to the "parent_hashes" field. +func (rcuo *RepoCommitUpdateOne) AppendParentHashes(s []string) *RepoCommitUpdateOne { + rcuo.mutation.AppendParentHashes(s) + return rcuo +} + // SetRepoCommitToRepositoryID sets the "RepoCommitToRepository" edge to the Repository entity by ID. func (rcuo *RepoCommitUpdateOne) SetRepoCommitToRepositoryID(id uuid.UUID) *RepoCommitUpdateOne { rcuo.mutation.SetRepoCommitToRepositoryID(id) @@ -389,6 +442,12 @@ func (rcuo *RepoCommitUpdateOne) ClearRepoCommitToRepository() *RepoCommitUpdate return rcuo } +// Where appends a list predicates to the RepoCommitUpdate builder. +func (rcuo *RepoCommitUpdateOne) Where(ps ...predicate.RepoCommit) *RepoCommitUpdateOne { + rcuo.mutation.Where(ps...) + return rcuo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (rcuo *RepoCommitUpdateOne) Select(field string, fields ...string) *RepoCommitUpdateOne { @@ -398,34 +457,7 @@ func (rcuo *RepoCommitUpdateOne) Select(field string, fields ...string) *RepoCom // Save executes the query and returns the updated RepoCommit entity. func (rcuo *RepoCommitUpdateOne) Save(ctx context.Context) (*RepoCommit, error) { - var ( - err error - node *RepoCommit - ) - if len(rcuo.hooks) == 0 { - node, err = rcuo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*RepoCommitMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - rcuo.mutation = mutation - node, err = rcuo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(rcuo.hooks) - 1; i >= 0; i-- { - if rcuo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = rcuo.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, rcuo.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, rcuo.sqlSave, rcuo.mutation, rcuo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -451,16 +483,7 @@ func (rcuo *RepoCommitUpdateOne) ExecX(ctx context.Context) { } func (rcuo *RepoCommitUpdateOne) sqlSave(ctx context.Context) (_node *RepoCommit, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: repocommit.Table, - Columns: repocommit.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: repocommit.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(repocommit.Table, repocommit.Columns, sqlgraph.NewFieldSpec(repocommit.FieldID, field.TypeUUID)) id, ok := rcuo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "RepoCommit.id" for update`)} @@ -486,66 +509,35 @@ func (rcuo *RepoCommitUpdateOne) sqlSave(ctx context.Context) (_node *RepoCommit } } if value, ok := rcuo.mutation.Revision(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: repocommit.FieldRevision, - }) + _spec.SetField(repocommit.FieldRevision, field.TypeInt, value) } if value, ok := rcuo.mutation.AddedRevision(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: repocommit.FieldRevision, - }) + _spec.AddField(repocommit.FieldRevision, field.TypeInt, value) } if value, ok := rcuo.mutation.Hash(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: repocommit.FieldHash, - }) + _spec.SetField(repocommit.FieldHash, field.TypeString, value) } if value, ok := rcuo.mutation.Author(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: repocommit.FieldAuthor, - }) + _spec.SetField(repocommit.FieldAuthor, field.TypeJSON, value) } if value, ok := rcuo.mutation.Committer(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: repocommit.FieldCommitter, - }) + _spec.SetField(repocommit.FieldCommitter, field.TypeJSON, value) } if value, ok := rcuo.mutation.PgpSignature(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: repocommit.FieldPgpSignature, - }) + _spec.SetField(repocommit.FieldPgpSignature, field.TypeString, value) } if value, ok := rcuo.mutation.Message(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: repocommit.FieldMessage, - }) + _spec.SetField(repocommit.FieldMessage, field.TypeString, value) } if value, ok := rcuo.mutation.TreeHash(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: repocommit.FieldTreeHash, - }) + _spec.SetField(repocommit.FieldTreeHash, field.TypeString, value) } if value, ok := rcuo.mutation.ParentHashes(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: repocommit.FieldParentHashes, + _spec.SetField(repocommit.FieldParentHashes, field.TypeJSON, value) + } + if value, ok := rcuo.mutation.AppendedParentHashes(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, repocommit.FieldParentHashes, value) }) } if rcuo.mutation.RepoCommitToRepositoryCleared() { @@ -556,10 +548,7 @@ func (rcuo *RepoCommitUpdateOne) sqlSave(ctx context.Context) (_node *RepoCommit Columns: []string{repocommit.RepoCommitToRepositoryColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: repository.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(repository.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -572,10 +561,7 @@ func (rcuo *RepoCommitUpdateOne) sqlSave(ctx context.Context) (_node *RepoCommit Columns: []string{repocommit.RepoCommitToRepositoryColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: repository.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(repository.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -590,9 +576,10 @@ func (rcuo *RepoCommitUpdateOne) sqlSave(ctx context.Context) (_node *RepoCommit if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{repocommit.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } + rcuo.mutation.done = true return _node, nil } diff --git a/ent/repository.go b/ent/repository.go index 6b81bd83..66b72c4f 100755 --- a/ent/repository.go +++ b/ent/repository.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -6,6 +6,7 @@ import ( "fmt" "strings" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/gen0cide/laforge/ent/repository" "github.com/google/uuid" @@ -28,13 +29,14 @@ type Repository struct { // The values are being populated by the RepositoryQuery when eager-loading is set. Edges RepositoryEdges `json:"edges"` + // vvvvvvvvvvvv CUSTOM vvvvvvvvvvvv // Edges put into the main struct to be loaded via hcl // RepositoryToEnvironment holds the value of the RepositoryToEnvironment edge. HCLRepositoryToEnvironment []*Environment `json:"RepositoryToEnvironment,omitempty"` // RepositoryToRepoCommit holds the value of the RepositoryToRepoCommit edge. HCLRepositoryToRepoCommit []*RepoCommit `json:"RepositoryToRepoCommit,omitempty"` - // - + // ^^^^^^^^^^^^ CUSTOM ^^^^^^^^^^^^^ + selectValues sql.SelectValues } // RepositoryEdges holds the relations/edges for other nodes in the graph. @@ -46,6 +48,11 @@ type RepositoryEdges struct { // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. loadedTypes [2]bool + // totalCount holds the count of the edges above. + totalCount [2]map[string]int + + namedRepositoryToEnvironment map[string][]*Environment + namedRepositoryToRepoCommit map[string][]*RepoCommit } // RepositoryToEnvironmentOrErr returns the RepositoryToEnvironment value or an error if the edge @@ -67,8 +74,8 @@ func (e RepositoryEdges) RepositoryToRepoCommitOrErr() ([]*RepoCommit, error) { } // scanValues returns the types for scanning values from sql.Rows. -func (*Repository) scanValues(columns []string) ([]interface{}, error) { - values := make([]interface{}, len(columns)) +func (*Repository) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) for i := range columns { switch columns[i] { case repository.FieldRepoURL, repository.FieldBranchName, repository.FieldEnviromentFilepath, repository.FieldFolderPath: @@ -76,7 +83,7 @@ func (*Repository) scanValues(columns []string) ([]interface{}, error) { case repository.FieldID: values[i] = new(uuid.UUID) default: - return nil, fmt.Errorf("unexpected column %q for type Repository", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -84,7 +91,7 @@ func (*Repository) scanValues(columns []string) ([]interface{}, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the Repository fields. -func (r *Repository) assignValues(columns []string, values []interface{}) error { +func (r *Repository) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -120,36 +127,44 @@ func (r *Repository) assignValues(columns []string, values []interface{}) error } else if value.Valid { r.FolderPath = value.String } + default: + r.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the Repository. +// This includes values selected through modifiers, order, etc. +func (r *Repository) Value(name string) (ent.Value, error) { + return r.selectValues.Get(name) +} + // QueryRepositoryToEnvironment queries the "RepositoryToEnvironment" edge of the Repository entity. func (r *Repository) QueryRepositoryToEnvironment() *EnvironmentQuery { - return (&RepositoryClient{config: r.config}).QueryRepositoryToEnvironment(r) + return NewRepositoryClient(r.config).QueryRepositoryToEnvironment(r) } // QueryRepositoryToRepoCommit queries the "RepositoryToRepoCommit" edge of the Repository entity. func (r *Repository) QueryRepositoryToRepoCommit() *RepoCommitQuery { - return (&RepositoryClient{config: r.config}).QueryRepositoryToRepoCommit(r) + return NewRepositoryClient(r.config).QueryRepositoryToRepoCommit(r) } // Update returns a builder for updating this Repository. // Note that you need to call Repository.Unwrap() before calling this method if this Repository // was returned from a transaction, and the transaction was committed or rolled back. func (r *Repository) Update() *RepositoryUpdateOne { - return (&RepositoryClient{config: r.config}).UpdateOne(r) + return NewRepositoryClient(r.config).UpdateOne(r) } // Unwrap unwraps the Repository entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. func (r *Repository) Unwrap() *Repository { - tx, ok := r.config.driver.(*txDriver) + _tx, ok := r.config.driver.(*txDriver) if !ok { panic("ent: Repository is not a transactional entity") } - r.config.driver = tx.drv + r.config.driver = _tx.drv return r } @@ -157,24 +172,69 @@ func (r *Repository) Unwrap() *Repository { func (r *Repository) String() string { var builder strings.Builder builder.WriteString("Repository(") - builder.WriteString(fmt.Sprintf("id=%v", r.ID)) - builder.WriteString(", repo_url=") + builder.WriteString(fmt.Sprintf("id=%v, ", r.ID)) + builder.WriteString("repo_url=") builder.WriteString(r.RepoURL) - builder.WriteString(", branch_name=") + builder.WriteString(", ") + builder.WriteString("branch_name=") builder.WriteString(r.BranchName) - builder.WriteString(", enviroment_filepath=") + builder.WriteString(", ") + builder.WriteString("enviroment_filepath=") builder.WriteString(r.EnviromentFilepath) - builder.WriteString(", folder_path=") + builder.WriteString(", ") + builder.WriteString("folder_path=") builder.WriteString(r.FolderPath) builder.WriteByte(')') return builder.String() } -// Repositories is a parsable slice of Repository. -type Repositories []*Repository +// NamedRepositoryToEnvironment returns the RepositoryToEnvironment named value or an error if the edge was not +// loaded in eager-loading with this name. +func (r *Repository) NamedRepositoryToEnvironment(name string) ([]*Environment, error) { + if r.Edges.namedRepositoryToEnvironment == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := r.Edges.namedRepositoryToEnvironment[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} + +func (r *Repository) appendNamedRepositoryToEnvironment(name string, edges ...*Environment) { + if r.Edges.namedRepositoryToEnvironment == nil { + r.Edges.namedRepositoryToEnvironment = make(map[string][]*Environment) + } + if len(edges) == 0 { + r.Edges.namedRepositoryToEnvironment[name] = []*Environment{} + } else { + r.Edges.namedRepositoryToEnvironment[name] = append(r.Edges.namedRepositoryToEnvironment[name], edges...) + } +} + +// NamedRepositoryToRepoCommit returns the RepositoryToRepoCommit named value or an error if the edge was not +// loaded in eager-loading with this name. +func (r *Repository) NamedRepositoryToRepoCommit(name string) ([]*RepoCommit, error) { + if r.Edges.namedRepositoryToRepoCommit == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := r.Edges.namedRepositoryToRepoCommit[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} -func (r Repositories) config(cfg config) { - for _i := range r { - r[_i].config = cfg +func (r *Repository) appendNamedRepositoryToRepoCommit(name string, edges ...*RepoCommit) { + if r.Edges.namedRepositoryToRepoCommit == nil { + r.Edges.namedRepositoryToRepoCommit = make(map[string][]*RepoCommit) + } + if len(edges) == 0 { + r.Edges.namedRepositoryToRepoCommit[name] = []*RepoCommit{} + } else { + r.Edges.namedRepositoryToRepoCommit[name] = append(r.Edges.namedRepositoryToRepoCommit[name], edges...) } } + +// Repositories is a parsable slice of Repository. +type Repositories []*Repository diff --git a/ent/repository/repository.go b/ent/repository/repository.go index c71fbc27..bb8e57a4 100755 --- a/ent/repository/repository.go +++ b/ent/repository/repository.go @@ -1,8 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package repository import ( + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -72,3 +74,73 @@ var ( // DefaultID holds the default value on creation for the "id" field. DefaultID func() uuid.UUID ) + +// OrderOption defines the ordering options for the Repository queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByRepoURL orders the results by the repo_url field. +func ByRepoURL(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldRepoURL, opts...).ToFunc() +} + +// ByBranchName orders the results by the branch_name field. +func ByBranchName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldBranchName, opts...).ToFunc() +} + +// ByEnviromentFilepath orders the results by the enviroment_filepath field. +func ByEnviromentFilepath(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldEnviromentFilepath, opts...).ToFunc() +} + +// ByFolderPath orders the results by the folder_path field. +func ByFolderPath(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldFolderPath, opts...).ToFunc() +} + +// ByRepositoryToEnvironmentCount orders the results by RepositoryToEnvironment count. +func ByRepositoryToEnvironmentCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newRepositoryToEnvironmentStep(), opts...) + } +} + +// ByRepositoryToEnvironment orders the results by RepositoryToEnvironment terms. +func ByRepositoryToEnvironment(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newRepositoryToEnvironmentStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByRepositoryToRepoCommitCount orders the results by RepositoryToRepoCommit count. +func ByRepositoryToRepoCommitCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newRepositoryToRepoCommitStep(), opts...) + } +} + +// ByRepositoryToRepoCommit orders the results by RepositoryToRepoCommit terms. +func ByRepositoryToRepoCommit(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newRepositoryToRepoCommitStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newRepositoryToEnvironmentStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(RepositoryToEnvironmentInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, RepositoryToEnvironmentTable, RepositoryToEnvironmentPrimaryKey...), + ) +} +func newRepositoryToRepoCommitStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(RepositoryToRepoCommitInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, RepositoryToRepoCommitTable, RepositoryToRepoCommitColumn), + ) +} diff --git a/ent/repository/where.go b/ent/repository/where.go index 4bde3329..467a42bf 100755 --- a/ent/repository/where.go +++ b/ent/repository/where.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package repository @@ -11,557 +11,327 @@ import ( // ID filters vertices based on their ID field. func ID(id uuid.UUID) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Repository(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id uuid.UUID) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Repository(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id uuid.UUID) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.Repository(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...uuid.UUID) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.Repository(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...uuid.UUID) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.Repository(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id uuid.UUID) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.Repository(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id uuid.UUID) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.Repository(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id uuid.UUID) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.Repository(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id uuid.UUID) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.Repository(sql.FieldLTE(FieldID, id)) } // RepoURL applies equality check predicate on the "repo_url" field. It's identical to RepoURLEQ. func RepoURL(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldRepoURL), v)) - }) + return predicate.Repository(sql.FieldEQ(FieldRepoURL, v)) } // BranchName applies equality check predicate on the "branch_name" field. It's identical to BranchNameEQ. func BranchName(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldBranchName), v)) - }) + return predicate.Repository(sql.FieldEQ(FieldBranchName, v)) } // EnviromentFilepath applies equality check predicate on the "enviroment_filepath" field. It's identical to EnviromentFilepathEQ. func EnviromentFilepath(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldEnviromentFilepath), v)) - }) + return predicate.Repository(sql.FieldEQ(FieldEnviromentFilepath, v)) } // FolderPath applies equality check predicate on the "folder_path" field. It's identical to FolderPathEQ. func FolderPath(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldFolderPath), v)) - }) + return predicate.Repository(sql.FieldEQ(FieldFolderPath, v)) } // RepoURLEQ applies the EQ predicate on the "repo_url" field. func RepoURLEQ(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldRepoURL), v)) - }) + return predicate.Repository(sql.FieldEQ(FieldRepoURL, v)) } // RepoURLNEQ applies the NEQ predicate on the "repo_url" field. func RepoURLNEQ(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldRepoURL), v)) - }) + return predicate.Repository(sql.FieldNEQ(FieldRepoURL, v)) } // RepoURLIn applies the In predicate on the "repo_url" field. func RepoURLIn(vs ...string) predicate.Repository { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Repository(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldRepoURL), v...)) - }) + return predicate.Repository(sql.FieldIn(FieldRepoURL, vs...)) } // RepoURLNotIn applies the NotIn predicate on the "repo_url" field. func RepoURLNotIn(vs ...string) predicate.Repository { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Repository(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldRepoURL), v...)) - }) + return predicate.Repository(sql.FieldNotIn(FieldRepoURL, vs...)) } // RepoURLGT applies the GT predicate on the "repo_url" field. func RepoURLGT(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldRepoURL), v)) - }) + return predicate.Repository(sql.FieldGT(FieldRepoURL, v)) } // RepoURLGTE applies the GTE predicate on the "repo_url" field. func RepoURLGTE(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldRepoURL), v)) - }) + return predicate.Repository(sql.FieldGTE(FieldRepoURL, v)) } // RepoURLLT applies the LT predicate on the "repo_url" field. func RepoURLLT(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldRepoURL), v)) - }) + return predicate.Repository(sql.FieldLT(FieldRepoURL, v)) } // RepoURLLTE applies the LTE predicate on the "repo_url" field. func RepoURLLTE(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldRepoURL), v)) - }) + return predicate.Repository(sql.FieldLTE(FieldRepoURL, v)) } // RepoURLContains applies the Contains predicate on the "repo_url" field. func RepoURLContains(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldRepoURL), v)) - }) + return predicate.Repository(sql.FieldContains(FieldRepoURL, v)) } // RepoURLHasPrefix applies the HasPrefix predicate on the "repo_url" field. func RepoURLHasPrefix(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldRepoURL), v)) - }) + return predicate.Repository(sql.FieldHasPrefix(FieldRepoURL, v)) } // RepoURLHasSuffix applies the HasSuffix predicate on the "repo_url" field. func RepoURLHasSuffix(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldRepoURL), v)) - }) + return predicate.Repository(sql.FieldHasSuffix(FieldRepoURL, v)) } // RepoURLEqualFold applies the EqualFold predicate on the "repo_url" field. func RepoURLEqualFold(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldRepoURL), v)) - }) + return predicate.Repository(sql.FieldEqualFold(FieldRepoURL, v)) } // RepoURLContainsFold applies the ContainsFold predicate on the "repo_url" field. func RepoURLContainsFold(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldRepoURL), v)) - }) + return predicate.Repository(sql.FieldContainsFold(FieldRepoURL, v)) } // BranchNameEQ applies the EQ predicate on the "branch_name" field. func BranchNameEQ(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldBranchName), v)) - }) + return predicate.Repository(sql.FieldEQ(FieldBranchName, v)) } // BranchNameNEQ applies the NEQ predicate on the "branch_name" field. func BranchNameNEQ(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldBranchName), v)) - }) + return predicate.Repository(sql.FieldNEQ(FieldBranchName, v)) } // BranchNameIn applies the In predicate on the "branch_name" field. func BranchNameIn(vs ...string) predicate.Repository { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Repository(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldBranchName), v...)) - }) + return predicate.Repository(sql.FieldIn(FieldBranchName, vs...)) } // BranchNameNotIn applies the NotIn predicate on the "branch_name" field. func BranchNameNotIn(vs ...string) predicate.Repository { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Repository(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldBranchName), v...)) - }) + return predicate.Repository(sql.FieldNotIn(FieldBranchName, vs...)) } // BranchNameGT applies the GT predicate on the "branch_name" field. func BranchNameGT(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldBranchName), v)) - }) + return predicate.Repository(sql.FieldGT(FieldBranchName, v)) } // BranchNameGTE applies the GTE predicate on the "branch_name" field. func BranchNameGTE(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldBranchName), v)) - }) + return predicate.Repository(sql.FieldGTE(FieldBranchName, v)) } // BranchNameLT applies the LT predicate on the "branch_name" field. func BranchNameLT(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldBranchName), v)) - }) + return predicate.Repository(sql.FieldLT(FieldBranchName, v)) } // BranchNameLTE applies the LTE predicate on the "branch_name" field. func BranchNameLTE(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldBranchName), v)) - }) + return predicate.Repository(sql.FieldLTE(FieldBranchName, v)) } // BranchNameContains applies the Contains predicate on the "branch_name" field. func BranchNameContains(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldBranchName), v)) - }) + return predicate.Repository(sql.FieldContains(FieldBranchName, v)) } // BranchNameHasPrefix applies the HasPrefix predicate on the "branch_name" field. func BranchNameHasPrefix(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldBranchName), v)) - }) + return predicate.Repository(sql.FieldHasPrefix(FieldBranchName, v)) } // BranchNameHasSuffix applies the HasSuffix predicate on the "branch_name" field. func BranchNameHasSuffix(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldBranchName), v)) - }) + return predicate.Repository(sql.FieldHasSuffix(FieldBranchName, v)) } // BranchNameEqualFold applies the EqualFold predicate on the "branch_name" field. func BranchNameEqualFold(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldBranchName), v)) - }) + return predicate.Repository(sql.FieldEqualFold(FieldBranchName, v)) } // BranchNameContainsFold applies the ContainsFold predicate on the "branch_name" field. func BranchNameContainsFold(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldBranchName), v)) - }) + return predicate.Repository(sql.FieldContainsFold(FieldBranchName, v)) } // EnviromentFilepathEQ applies the EQ predicate on the "enviroment_filepath" field. func EnviromentFilepathEQ(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldEnviromentFilepath), v)) - }) + return predicate.Repository(sql.FieldEQ(FieldEnviromentFilepath, v)) } // EnviromentFilepathNEQ applies the NEQ predicate on the "enviroment_filepath" field. func EnviromentFilepathNEQ(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldEnviromentFilepath), v)) - }) + return predicate.Repository(sql.FieldNEQ(FieldEnviromentFilepath, v)) } // EnviromentFilepathIn applies the In predicate on the "enviroment_filepath" field. func EnviromentFilepathIn(vs ...string) predicate.Repository { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Repository(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldEnviromentFilepath), v...)) - }) + return predicate.Repository(sql.FieldIn(FieldEnviromentFilepath, vs...)) } // EnviromentFilepathNotIn applies the NotIn predicate on the "enviroment_filepath" field. func EnviromentFilepathNotIn(vs ...string) predicate.Repository { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Repository(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldEnviromentFilepath), v...)) - }) + return predicate.Repository(sql.FieldNotIn(FieldEnviromentFilepath, vs...)) } // EnviromentFilepathGT applies the GT predicate on the "enviroment_filepath" field. func EnviromentFilepathGT(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldEnviromentFilepath), v)) - }) + return predicate.Repository(sql.FieldGT(FieldEnviromentFilepath, v)) } // EnviromentFilepathGTE applies the GTE predicate on the "enviroment_filepath" field. func EnviromentFilepathGTE(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldEnviromentFilepath), v)) - }) + return predicate.Repository(sql.FieldGTE(FieldEnviromentFilepath, v)) } // EnviromentFilepathLT applies the LT predicate on the "enviroment_filepath" field. func EnviromentFilepathLT(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldEnviromentFilepath), v)) - }) + return predicate.Repository(sql.FieldLT(FieldEnviromentFilepath, v)) } // EnviromentFilepathLTE applies the LTE predicate on the "enviroment_filepath" field. func EnviromentFilepathLTE(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldEnviromentFilepath), v)) - }) + return predicate.Repository(sql.FieldLTE(FieldEnviromentFilepath, v)) } // EnviromentFilepathContains applies the Contains predicate on the "enviroment_filepath" field. func EnviromentFilepathContains(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldEnviromentFilepath), v)) - }) + return predicate.Repository(sql.FieldContains(FieldEnviromentFilepath, v)) } // EnviromentFilepathHasPrefix applies the HasPrefix predicate on the "enviroment_filepath" field. func EnviromentFilepathHasPrefix(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldEnviromentFilepath), v)) - }) + return predicate.Repository(sql.FieldHasPrefix(FieldEnviromentFilepath, v)) } // EnviromentFilepathHasSuffix applies the HasSuffix predicate on the "enviroment_filepath" field. func EnviromentFilepathHasSuffix(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldEnviromentFilepath), v)) - }) + return predicate.Repository(sql.FieldHasSuffix(FieldEnviromentFilepath, v)) } // EnviromentFilepathEqualFold applies the EqualFold predicate on the "enviroment_filepath" field. func EnviromentFilepathEqualFold(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldEnviromentFilepath), v)) - }) + return predicate.Repository(sql.FieldEqualFold(FieldEnviromentFilepath, v)) } // EnviromentFilepathContainsFold applies the ContainsFold predicate on the "enviroment_filepath" field. func EnviromentFilepathContainsFold(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldEnviromentFilepath), v)) - }) + return predicate.Repository(sql.FieldContainsFold(FieldEnviromentFilepath, v)) } // FolderPathEQ applies the EQ predicate on the "folder_path" field. func FolderPathEQ(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldFolderPath), v)) - }) + return predicate.Repository(sql.FieldEQ(FieldFolderPath, v)) } // FolderPathNEQ applies the NEQ predicate on the "folder_path" field. func FolderPathNEQ(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldFolderPath), v)) - }) + return predicate.Repository(sql.FieldNEQ(FieldFolderPath, v)) } // FolderPathIn applies the In predicate on the "folder_path" field. func FolderPathIn(vs ...string) predicate.Repository { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Repository(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldFolderPath), v...)) - }) + return predicate.Repository(sql.FieldIn(FieldFolderPath, vs...)) } // FolderPathNotIn applies the NotIn predicate on the "folder_path" field. func FolderPathNotIn(vs ...string) predicate.Repository { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Repository(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldFolderPath), v...)) - }) + return predicate.Repository(sql.FieldNotIn(FieldFolderPath, vs...)) } // FolderPathGT applies the GT predicate on the "folder_path" field. func FolderPathGT(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldFolderPath), v)) - }) + return predicate.Repository(sql.FieldGT(FieldFolderPath, v)) } // FolderPathGTE applies the GTE predicate on the "folder_path" field. func FolderPathGTE(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldFolderPath), v)) - }) + return predicate.Repository(sql.FieldGTE(FieldFolderPath, v)) } // FolderPathLT applies the LT predicate on the "folder_path" field. func FolderPathLT(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldFolderPath), v)) - }) + return predicate.Repository(sql.FieldLT(FieldFolderPath, v)) } // FolderPathLTE applies the LTE predicate on the "folder_path" field. func FolderPathLTE(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldFolderPath), v)) - }) + return predicate.Repository(sql.FieldLTE(FieldFolderPath, v)) } // FolderPathContains applies the Contains predicate on the "folder_path" field. func FolderPathContains(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldFolderPath), v)) - }) + return predicate.Repository(sql.FieldContains(FieldFolderPath, v)) } // FolderPathHasPrefix applies the HasPrefix predicate on the "folder_path" field. func FolderPathHasPrefix(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldFolderPath), v)) - }) + return predicate.Repository(sql.FieldHasPrefix(FieldFolderPath, v)) } // FolderPathHasSuffix applies the HasSuffix predicate on the "folder_path" field. func FolderPathHasSuffix(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldFolderPath), v)) - }) + return predicate.Repository(sql.FieldHasSuffix(FieldFolderPath, v)) } // FolderPathEqualFold applies the EqualFold predicate on the "folder_path" field. func FolderPathEqualFold(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldFolderPath), v)) - }) + return predicate.Repository(sql.FieldEqualFold(FieldFolderPath, v)) } // FolderPathContainsFold applies the ContainsFold predicate on the "folder_path" field. func FolderPathContainsFold(v string) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldFolderPath), v)) - }) + return predicate.Repository(sql.FieldContainsFold(FieldFolderPath, v)) } // HasRepositoryToEnvironment applies the HasEdge predicate on the "RepositoryToEnvironment" edge. @@ -569,7 +339,6 @@ func HasRepositoryToEnvironment() predicate.Repository { return predicate.Repository(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(RepositoryToEnvironmentTable, FieldID), sqlgraph.Edge(sqlgraph.M2M, false, RepositoryToEnvironmentTable, RepositoryToEnvironmentPrimaryKey...), ) sqlgraph.HasNeighbors(s, step) @@ -579,11 +348,7 @@ func HasRepositoryToEnvironment() predicate.Repository { // HasRepositoryToEnvironmentWith applies the HasEdge predicate on the "RepositoryToEnvironment" edge with a given conditions (other predicates). func HasRepositoryToEnvironmentWith(preds ...predicate.Environment) predicate.Repository { return predicate.Repository(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(RepositoryToEnvironmentInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2M, false, RepositoryToEnvironmentTable, RepositoryToEnvironmentPrimaryKey...), - ) + step := newRepositoryToEnvironmentStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -597,7 +362,6 @@ func HasRepositoryToRepoCommit() predicate.Repository { return predicate.Repository(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(RepositoryToRepoCommitTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, false, RepositoryToRepoCommitTable, RepositoryToRepoCommitColumn), ) sqlgraph.HasNeighbors(s, step) @@ -607,11 +371,7 @@ func HasRepositoryToRepoCommit() predicate.Repository { // HasRepositoryToRepoCommitWith applies the HasEdge predicate on the "RepositoryToRepoCommit" edge with a given conditions (other predicates). func HasRepositoryToRepoCommitWith(preds ...predicate.RepoCommit) predicate.Repository { return predicate.Repository(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(RepositoryToRepoCommitInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, RepositoryToRepoCommitTable, RepositoryToRepoCommitColumn), - ) + step := newRepositoryToRepoCommitStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -622,32 +382,15 @@ func HasRepositoryToRepoCommitWith(preds ...predicate.RepoCommit) predicate.Repo // And groups predicates with the AND operator between them. func And(predicates ...predicate.Repository) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Repository(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.Repository) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Repository(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.Repository) predicate.Repository { - return predicate.Repository(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.Repository(sql.NotPredicates(p)) } diff --git a/ent/repository_create.go b/ent/repository_create.go index 6a0339b8..79ccb4bc 100755 --- a/ent/repository_create.go +++ b/ent/repository_create.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -113,44 +113,8 @@ func (rc *RepositoryCreate) Mutation() *RepositoryMutation { // Save creates the Repository in the database. func (rc *RepositoryCreate) Save(ctx context.Context) (*Repository, error) { - var ( - err error - node *Repository - ) rc.defaults() - if len(rc.hooks) == 0 { - if err = rc.check(); err != nil { - return nil, err - } - node, err = rc.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*RepositoryMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = rc.check(); err != nil { - return nil, err - } - rc.mutation = mutation - if node, err = rc.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(rc.hooks) - 1; i >= 0; i-- { - if rc.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = rc.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, rc.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, rc.sqlSave, rc.mutation, rc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -209,10 +173,13 @@ func (rc *RepositoryCreate) check() error { } func (rc *RepositoryCreate) sqlSave(ctx context.Context) (*Repository, error) { + if err := rc.check(); err != nil { + return nil, err + } _node, _spec := rc.createSpec() if err := sqlgraph.CreateNode(ctx, rc.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } @@ -223,54 +190,34 @@ func (rc *RepositoryCreate) sqlSave(ctx context.Context) (*Repository, error) { return nil, err } } + rc.mutation.id = &_node.ID + rc.mutation.done = true return _node, nil } func (rc *RepositoryCreate) createSpec() (*Repository, *sqlgraph.CreateSpec) { var ( _node = &Repository{config: rc.config} - _spec = &sqlgraph.CreateSpec{ - Table: repository.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: repository.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(repository.Table, sqlgraph.NewFieldSpec(repository.FieldID, field.TypeUUID)) ) if id, ok := rc.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } if value, ok := rc.mutation.RepoURL(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: repository.FieldRepoURL, - }) + _spec.SetField(repository.FieldRepoURL, field.TypeString, value) _node.RepoURL = value } if value, ok := rc.mutation.BranchName(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: repository.FieldBranchName, - }) + _spec.SetField(repository.FieldBranchName, field.TypeString, value) _node.BranchName = value } if value, ok := rc.mutation.EnviromentFilepath(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: repository.FieldEnviromentFilepath, - }) + _spec.SetField(repository.FieldEnviromentFilepath, field.TypeString, value) _node.EnviromentFilepath = value } if value, ok := rc.mutation.FolderPath(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: repository.FieldFolderPath, - }) + _spec.SetField(repository.FieldFolderPath, field.TypeString, value) _node.FolderPath = value } if nodes := rc.mutation.RepositoryToEnvironmentIDs(); len(nodes) > 0 { @@ -281,10 +228,7 @@ func (rc *RepositoryCreate) createSpec() (*Repository, *sqlgraph.CreateSpec) { Columns: repository.RepositoryToEnvironmentPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -300,10 +244,7 @@ func (rc *RepositoryCreate) createSpec() (*Repository, *sqlgraph.CreateSpec) { Columns: []string{repository.RepositoryToRepoCommitColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: repocommit.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(repocommit.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -317,11 +258,15 @@ func (rc *RepositoryCreate) createSpec() (*Repository, *sqlgraph.CreateSpec) { // RepositoryCreateBulk is the builder for creating many Repository entities in bulk. type RepositoryCreateBulk struct { config + err error builders []*RepositoryCreate } // Save creates the Repository entities in the database. func (rcb *RepositoryCreateBulk) Save(ctx context.Context) ([]*Repository, error) { + if rcb.err != nil { + return nil, rcb.err + } specs := make([]*sqlgraph.CreateSpec, len(rcb.builders)) nodes := make([]*Repository, len(rcb.builders)) mutators := make([]Mutator, len(rcb.builders)) @@ -338,8 +283,8 @@ func (rcb *RepositoryCreateBulk) Save(ctx context.Context) ([]*Repository, error return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, rcb.builders[i+1].mutation) } else { @@ -347,7 +292,7 @@ func (rcb *RepositoryCreateBulk) Save(ctx context.Context) ([]*Repository, error // Invoke the actual operation on the latest mutation in the chain. if err = sqlgraph.BatchCreate(ctx, rcb.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } } } diff --git a/ent/repository_delete.go b/ent/repository_delete.go index 7068850c..7d272d37 100755 --- a/ent/repository_delete.go +++ b/ent/repository_delete.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (rd *RepositoryDelete) Where(ps ...predicate.Repository) *RepositoryDelete // Exec executes the deletion query and returns how many vertices were deleted. func (rd *RepositoryDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(rd.hooks) == 0 { - affected, err = rd.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*RepositoryMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - rd.mutation = mutation - affected, err = rd.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(rd.hooks) - 1; i >= 0; i-- { - if rd.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = rd.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, rd.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, rd.sqlExec, rd.mutation, rd.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (rd *RepositoryDelete) ExecX(ctx context.Context) int { } func (rd *RepositoryDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: repository.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: repository.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(repository.Table, sqlgraph.NewFieldSpec(repository.FieldID, field.TypeUUID)) if ps := rd.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -84,7 +48,12 @@ func (rd *RepositoryDelete) sqlExec(ctx context.Context) (int, error) { } } } - return sqlgraph.DeleteNodes(ctx, rd.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, rd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + rd.mutation.done = true + return affected, err } // RepositoryDeleteOne is the builder for deleting a single Repository entity. @@ -92,6 +61,12 @@ type RepositoryDeleteOne struct { rd *RepositoryDelete } +// Where appends a list predicates to the RepositoryDelete builder. +func (rdo *RepositoryDeleteOne) Where(ps ...predicate.Repository) *RepositoryDeleteOne { + rdo.rd.mutation.Where(ps...) + return rdo +} + // Exec executes the deletion query. func (rdo *RepositoryDeleteOne) Exec(ctx context.Context) error { n, err := rdo.rd.Exec(ctx) @@ -107,5 +82,7 @@ func (rdo *RepositoryDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (rdo *RepositoryDeleteOne) ExecX(ctx context.Context) { - rdo.rd.ExecX(ctx) + if err := rdo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/ent/repository_query.go b/ent/repository_query.go index c8edbfe6..9c5ecc49 100755 --- a/ent/repository_query.go +++ b/ent/repository_query.go @@ -1,11 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" "database/sql/driver" - "errors" "fmt" "math" @@ -22,15 +21,16 @@ import ( // RepositoryQuery is the builder for querying Repository entities. type RepositoryQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string - predicates []predicate.Repository - // eager-loading edges. - withRepositoryToEnvironment *EnvironmentQuery - withRepositoryToRepoCommit *RepoCommitQuery + ctx *QueryContext + order []repository.OrderOption + inters []Interceptor + predicates []predicate.Repository + withRepositoryToEnvironment *EnvironmentQuery + withRepositoryToRepoCommit *RepoCommitQuery + modifiers []func(*sql.Selector) + loadTotal []func(context.Context, []*Repository) error + withNamedRepositoryToEnvironment map[string]*EnvironmentQuery + withNamedRepositoryToRepoCommit map[string]*RepoCommitQuery // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -42,34 +42,34 @@ func (rq *RepositoryQuery) Where(ps ...predicate.Repository) *RepositoryQuery { return rq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (rq *RepositoryQuery) Limit(limit int) *RepositoryQuery { - rq.limit = &limit + rq.ctx.Limit = &limit return rq } -// Offset adds an offset step to the query. +// Offset to start from. func (rq *RepositoryQuery) Offset(offset int) *RepositoryQuery { - rq.offset = &offset + rq.ctx.Offset = &offset return rq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (rq *RepositoryQuery) Unique(unique bool) *RepositoryQuery { - rq.unique = &unique + rq.ctx.Unique = &unique return rq } -// Order adds an order step to the query. -func (rq *RepositoryQuery) Order(o ...OrderFunc) *RepositoryQuery { +// Order specifies how the records should be ordered. +func (rq *RepositoryQuery) Order(o ...repository.OrderOption) *RepositoryQuery { rq.order = append(rq.order, o...) return rq } // QueryRepositoryToEnvironment chains the current query on the "RepositoryToEnvironment" edge. func (rq *RepositoryQuery) QueryRepositoryToEnvironment() *EnvironmentQuery { - query := &EnvironmentQuery{config: rq.config} + query := (&EnvironmentClient{config: rq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := rq.prepareQuery(ctx); err != nil { return nil, err @@ -91,7 +91,7 @@ func (rq *RepositoryQuery) QueryRepositoryToEnvironment() *EnvironmentQuery { // QueryRepositoryToRepoCommit chains the current query on the "RepositoryToRepoCommit" edge. func (rq *RepositoryQuery) QueryRepositoryToRepoCommit() *RepoCommitQuery { - query := &RepoCommitQuery{config: rq.config} + query := (&RepoCommitClient{config: rq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := rq.prepareQuery(ctx); err != nil { return nil, err @@ -114,7 +114,7 @@ func (rq *RepositoryQuery) QueryRepositoryToRepoCommit() *RepoCommitQuery { // First returns the first Repository entity from the query. // Returns a *NotFoundError when no Repository was found. func (rq *RepositoryQuery) First(ctx context.Context) (*Repository, error) { - nodes, err := rq.Limit(1).All(ctx) + nodes, err := rq.Limit(1).All(setContextOp(ctx, rq.ctx, "First")) if err != nil { return nil, err } @@ -137,7 +137,7 @@ func (rq *RepositoryQuery) FirstX(ctx context.Context) *Repository { // Returns a *NotFoundError when no Repository ID was found. func (rq *RepositoryQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = rq.Limit(1).IDs(ctx); err != nil { + if ids, err = rq.Limit(1).IDs(setContextOp(ctx, rq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -160,7 +160,7 @@ func (rq *RepositoryQuery) FirstIDX(ctx context.Context) uuid.UUID { // Returns a *NotSingularError when more than one Repository entity is found. // Returns a *NotFoundError when no Repository entities are found. func (rq *RepositoryQuery) Only(ctx context.Context) (*Repository, error) { - nodes, err := rq.Limit(2).All(ctx) + nodes, err := rq.Limit(2).All(setContextOp(ctx, rq.ctx, "Only")) if err != nil { return nil, err } @@ -188,7 +188,7 @@ func (rq *RepositoryQuery) OnlyX(ctx context.Context) *Repository { // Returns a *NotFoundError when no entities are found. func (rq *RepositoryQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = rq.Limit(2).IDs(ctx); err != nil { + if ids, err = rq.Limit(2).IDs(setContextOp(ctx, rq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -213,10 +213,12 @@ func (rq *RepositoryQuery) OnlyIDX(ctx context.Context) uuid.UUID { // All executes the query and returns a list of Repositories. func (rq *RepositoryQuery) All(ctx context.Context) ([]*Repository, error) { + ctx = setContextOp(ctx, rq.ctx, "All") if err := rq.prepareQuery(ctx); err != nil { return nil, err } - return rq.sqlAll(ctx) + qr := querierAll[[]*Repository, *RepositoryQuery]() + return withInterceptors[[]*Repository](ctx, rq, qr, rq.inters) } // AllX is like All, but panics if an error occurs. @@ -229,9 +231,12 @@ func (rq *RepositoryQuery) AllX(ctx context.Context) []*Repository { } // IDs executes the query and returns a list of Repository IDs. -func (rq *RepositoryQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { - var ids []uuid.UUID - if err := rq.Select(repository.FieldID).Scan(ctx, &ids); err != nil { +func (rq *RepositoryQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if rq.ctx.Unique == nil && rq.path != nil { + rq.Unique(true) + } + ctx = setContextOp(ctx, rq.ctx, "IDs") + if err = rq.Select(repository.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -248,10 +253,11 @@ func (rq *RepositoryQuery) IDsX(ctx context.Context) []uuid.UUID { // Count returns the count of the given query. func (rq *RepositoryQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, rq.ctx, "Count") if err := rq.prepareQuery(ctx); err != nil { return 0, err } - return rq.sqlCount(ctx) + return withInterceptors[int](ctx, rq, querierCount[*RepositoryQuery](), rq.inters) } // CountX is like Count, but panics if an error occurs. @@ -265,10 +271,15 @@ func (rq *RepositoryQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (rq *RepositoryQuery) Exist(ctx context.Context) (bool, error) { - if err := rq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, rq.ctx, "Exist") + switch _, err := rq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return rq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -288,23 +299,22 @@ func (rq *RepositoryQuery) Clone() *RepositoryQuery { } return &RepositoryQuery{ config: rq.config, - limit: rq.limit, - offset: rq.offset, - order: append([]OrderFunc{}, rq.order...), + ctx: rq.ctx.Clone(), + order: append([]repository.OrderOption{}, rq.order...), + inters: append([]Interceptor{}, rq.inters...), predicates: append([]predicate.Repository{}, rq.predicates...), withRepositoryToEnvironment: rq.withRepositoryToEnvironment.Clone(), withRepositoryToRepoCommit: rq.withRepositoryToRepoCommit.Clone(), // clone intermediate query. - sql: rq.sql.Clone(), - path: rq.path, - unique: rq.unique, + sql: rq.sql.Clone(), + path: rq.path, } } // WithRepositoryToEnvironment tells the query-builder to eager-load the nodes that are connected to // the "RepositoryToEnvironment" edge. The optional arguments are used to configure the query builder of the edge. func (rq *RepositoryQuery) WithRepositoryToEnvironment(opts ...func(*EnvironmentQuery)) *RepositoryQuery { - query := &EnvironmentQuery{config: rq.config} + query := (&EnvironmentClient{config: rq.config}).Query() for _, opt := range opts { opt(query) } @@ -315,7 +325,7 @@ func (rq *RepositoryQuery) WithRepositoryToEnvironment(opts ...func(*Environment // WithRepositoryToRepoCommit tells the query-builder to eager-load the nodes that are connected to // the "RepositoryToRepoCommit" edge. The optional arguments are used to configure the query builder of the edge. func (rq *RepositoryQuery) WithRepositoryToRepoCommit(opts ...func(*RepoCommitQuery)) *RepositoryQuery { - query := &RepoCommitQuery{config: rq.config} + query := (&RepoCommitClient{config: rq.config}).Query() for _, opt := range opts { opt(query) } @@ -337,17 +347,13 @@ func (rq *RepositoryQuery) WithRepositoryToRepoCommit(opts ...func(*RepoCommitQu // GroupBy(repository.FieldRepoURL). // Aggregate(ent.Count()). // Scan(ctx, &v) -// func (rq *RepositoryQuery) GroupBy(field string, fields ...string) *RepositoryGroupBy { - group := &RepositoryGroupBy{config: rq.config} - group.fields = append([]string{field}, fields...) - group.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := rq.prepareQuery(ctx); err != nil { - return nil, err - } - return rq.sqlQuery(ctx), nil - } - return group + rq.ctx.Fields = append([]string{field}, fields...) + grbuild := &RepositoryGroupBy{build: rq} + grbuild.flds = &rq.ctx.Fields + grbuild.label = repository.Label + grbuild.scan = grbuild.Scan + return grbuild } // Select allows the selection one or more fields/columns for the given query, @@ -362,14 +368,31 @@ func (rq *RepositoryQuery) GroupBy(field string, fields ...string) *RepositoryGr // client.Repository.Query(). // Select(repository.FieldRepoURL). // Scan(ctx, &v) -// func (rq *RepositoryQuery) Select(fields ...string) *RepositorySelect { - rq.fields = append(rq.fields, fields...) - return &RepositorySelect{RepositoryQuery: rq} + rq.ctx.Fields = append(rq.ctx.Fields, fields...) + sbuild := &RepositorySelect{RepositoryQuery: rq} + sbuild.label = repository.Label + sbuild.flds, sbuild.scan = &rq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a RepositorySelect configured with the given aggregations. +func (rq *RepositoryQuery) Aggregate(fns ...AggregateFunc) *RepositorySelect { + return rq.Select().Aggregate(fns...) } func (rq *RepositoryQuery) prepareQuery(ctx context.Context) error { - for _, f := range rq.fields { + for _, inter := range rq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, rq); err != nil { + return err + } + } + } + for _, f := range rq.ctx.Fields { if !repository.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -384,7 +407,7 @@ func (rq *RepositoryQuery) prepareQuery(ctx context.Context) error { return nil } -func (rq *RepositoryQuery) sqlAll(ctx context.Context) ([]*Repository, error) { +func (rq *RepositoryQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Repository, error) { var ( nodes = []*Repository{} _spec = rq.querySpec() @@ -393,157 +416,181 @@ func (rq *RepositoryQuery) sqlAll(ctx context.Context) ([]*Repository, error) { rq.withRepositoryToRepoCommit != nil, } ) - _spec.ScanValues = func(columns []string) ([]interface{}, error) { + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Repository).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { node := &Repository{config: rq.config} nodes = append(nodes, node) - return node.scanValues(columns) - } - _spec.Assign = func(columns []string, values []interface{}) error { - if len(nodes) == 0 { - return fmt.Errorf("ent: Assign called without calling ScanValues") - } - node := nodes[len(nodes)-1] node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(rq.modifiers) > 0 { + _spec.Modifiers = rq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } if err := sqlgraph.QueryNodes(ctx, rq.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := rq.withRepositoryToEnvironment; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - ids := make(map[uuid.UUID]*Repository, len(nodes)) - for _, node := range nodes { - ids[node.ID] = node - fks = append(fks, node.ID) - node.Edges.RepositoryToEnvironment = []*Environment{} + if err := rq.loadRepositoryToEnvironment(ctx, query, nodes, + func(n *Repository) { n.Edges.RepositoryToEnvironment = []*Environment{} }, + func(n *Repository, e *Environment) { + n.Edges.RepositoryToEnvironment = append(n.Edges.RepositoryToEnvironment, e) + }); err != nil { + return nil, err } - var ( - edgeids []uuid.UUID - edges = make(map[uuid.UUID][]*Repository) - ) - _spec := &sqlgraph.EdgeQuerySpec{ - Edge: &sqlgraph.EdgeSpec{ - Inverse: false, - Table: repository.RepositoryToEnvironmentTable, - Columns: repository.RepositoryToEnvironmentPrimaryKey, - }, - Predicate: func(s *sql.Selector) { - s.Where(sql.InValues(repository.RepositoryToEnvironmentPrimaryKey[0], fks...)) - }, - ScanValues: func() [2]interface{} { - return [2]interface{}{new(uuid.UUID), new(uuid.UUID)} - }, - Assign: func(out, in interface{}) error { - eout, ok := out.(*uuid.UUID) - if !ok || eout == nil { - return fmt.Errorf("unexpected id value for edge-out") - } - ein, ok := in.(*uuid.UUID) - if !ok || ein == nil { - return fmt.Errorf("unexpected id value for edge-in") - } - outValue := *eout - inValue := *ein - node, ok := ids[outValue] - if !ok { - return fmt.Errorf("unexpected node id in edges: %v", outValue) - } - if _, ok := edges[inValue]; !ok { - edgeids = append(edgeids, inValue) - } - edges[inValue] = append(edges[inValue], node) - return nil - }, + } + if query := rq.withRepositoryToRepoCommit; query != nil { + if err := rq.loadRepositoryToRepoCommit(ctx, query, nodes, + func(n *Repository) { n.Edges.RepositoryToRepoCommit = []*RepoCommit{} }, + func(n *Repository, e *RepoCommit) { + n.Edges.RepositoryToRepoCommit = append(n.Edges.RepositoryToRepoCommit, e) + }); err != nil { + return nil, err } - if err := sqlgraph.QueryEdges(ctx, rq.driver, _spec); err != nil { - return nil, fmt.Errorf(`query edges "RepositoryToEnvironment": %w`, err) + } + for name, query := range rq.withNamedRepositoryToEnvironment { + if err := rq.loadRepositoryToEnvironment(ctx, query, nodes, + func(n *Repository) { n.appendNamedRepositoryToEnvironment(name) }, + func(n *Repository, e *Environment) { n.appendNamedRepositoryToEnvironment(name, e) }); err != nil { + return nil, err } - query.Where(environment.IDIn(edgeids...)) - neighbors, err := query.All(ctx) - if err != nil { + } + for name, query := range rq.withNamedRepositoryToRepoCommit { + if err := rq.loadRepositoryToRepoCommit(ctx, query, nodes, + func(n *Repository) { n.appendNamedRepositoryToRepoCommit(name) }, + func(n *Repository, e *RepoCommit) { n.appendNamedRepositoryToRepoCommit(name, e) }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := edges[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected "RepositoryToEnvironment" node returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.RepositoryToEnvironment = append(nodes[i].Edges.RepositoryToEnvironment, n) - } + } + for i := range rq.loadTotal { + if err := rq.loadTotal[i](ctx, nodes); err != nil { + return nil, err } } + return nodes, nil +} - if query := rq.withRepositoryToRepoCommit; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*Repository) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.RepositoryToRepoCommit = []*RepoCommit{} - } - query.withFKs = true - query.Where(predicate.RepoCommit(func(s *sql.Selector) { - s.Where(sql.InValues(repository.RepositoryToRepoCommitColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err +func (rq *RepositoryQuery) loadRepositoryToEnvironment(ctx context.Context, query *EnvironmentQuery, nodes []*Repository, init func(*Repository), assign func(*Repository, *Environment)) error { + edgeIDs := make([]driver.Value, len(nodes)) + byID := make(map[uuid.UUID]*Repository) + nids := make(map[uuid.UUID]map[*Repository]struct{}) + for i, node := range nodes { + edgeIDs[i] = node.ID + byID[node.ID] = node + if init != nil { + init(node) } - for _, n := range neighbors { - fk := n.repository_repository_to_repo_commit - if fk == nil { - return nil, fmt.Errorf(`foreign-key "repository_repository_to_repo_commit" is nil for node %v`, n.ID) + } + query.Where(func(s *sql.Selector) { + joinT := sql.Table(repository.RepositoryToEnvironmentTable) + s.Join(joinT).On(s.C(environment.FieldID), joinT.C(repository.RepositoryToEnvironmentPrimaryKey[1])) + s.Where(sql.InValues(joinT.C(repository.RepositoryToEnvironmentPrimaryKey[0]), edgeIDs...)) + columns := s.SelectedColumns() + s.Select(joinT.C(repository.RepositoryToEnvironmentPrimaryKey[0])) + s.AppendSelect(columns...) + s.SetDistinct(false) + }) + if err := query.prepareQuery(ctx); err != nil { + return err + } + qr := QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + return query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) { + assign := spec.Assign + values := spec.ScanValues + spec.ScanValues = func(columns []string) ([]any, error) { + values, err := values(columns[1:]) + if err != nil { + return nil, err + } + return append([]any{new(uuid.UUID)}, values...), nil } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "repository_repository_to_repo_commit" returned %v for node %v`, *fk, n.ID) + spec.Assign = func(columns []string, values []any) error { + outValue := *values[0].(*uuid.UUID) + inValue := *values[1].(*uuid.UUID) + if nids[inValue] == nil { + nids[inValue] = map[*Repository]struct{}{byID[outValue]: {}} + return assign(columns[1:], values[1:]) + } + nids[inValue][byID[outValue]] = struct{}{} + return nil } - node.Edges.RepositoryToRepoCommit = append(node.Edges.RepositoryToRepoCommit, n) + }) + }) + neighbors, err := withInterceptors[[]*Environment](ctx, query, qr, query.inters) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nids[n.ID] + if !ok { + return fmt.Errorf(`unexpected "RepositoryToEnvironment" node returned %v`, n.ID) + } + for kn := range nodes { + assign(kn, n) } } - - return nodes, nil + return nil +} +func (rq *RepositoryQuery) loadRepositoryToRepoCommit(ctx context.Context, query *RepoCommitQuery, nodes []*Repository, init func(*Repository), assign func(*Repository, *RepoCommit)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Repository) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.RepoCommit(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(repository.RepositoryToRepoCommitColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.repository_repository_to_repo_commit + if fk == nil { + return fmt.Errorf(`foreign-key "repository_repository_to_repo_commit" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "repository_repository_to_repo_commit" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil } func (rq *RepositoryQuery) sqlCount(ctx context.Context) (int, error) { _spec := rq.querySpec() - _spec.Node.Columns = rq.fields - if len(rq.fields) > 0 { - _spec.Unique = rq.unique != nil && *rq.unique + if len(rq.modifiers) > 0 { + _spec.Modifiers = rq.modifiers } - return sqlgraph.CountNodes(ctx, rq.driver, _spec) -} - -func (rq *RepositoryQuery) sqlExist(ctx context.Context) (bool, error) { - n, err := rq.sqlCount(ctx) - if err != nil { - return false, fmt.Errorf("ent: check existence: %w", err) + _spec.Node.Columns = rq.ctx.Fields + if len(rq.ctx.Fields) > 0 { + _spec.Unique = rq.ctx.Unique != nil && *rq.ctx.Unique } - return n > 0, nil + return sqlgraph.CountNodes(ctx, rq.driver, _spec) } func (rq *RepositoryQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: repository.Table, - Columns: repository.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: repository.FieldID, - }, - }, - From: rq.sql, - Unique: true, - } - if unique := rq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(repository.Table, repository.Columns, sqlgraph.NewFieldSpec(repository.FieldID, field.TypeUUID)) + _spec.From = rq.sql + if unique := rq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if rq.path != nil { + _spec.Unique = true } - if fields := rq.fields; len(fields) > 0 { + if fields := rq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, repository.FieldID) for i := range fields { @@ -559,10 +606,10 @@ func (rq *RepositoryQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := rq.limit; limit != nil { + if limit := rq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := rq.offset; offset != nil { + if offset := rq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := rq.order; len(ps) > 0 { @@ -578,7 +625,7 @@ func (rq *RepositoryQuery) querySpec() *sqlgraph.QuerySpec { func (rq *RepositoryQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(rq.driver.Dialect()) t1 := builder.Table(repository.Table) - columns := rq.fields + columns := rq.ctx.Fields if len(columns) == 0 { columns = repository.Columns } @@ -587,7 +634,7 @@ func (rq *RepositoryQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = rq.sql selector.Select(selector.Columns(columns...)...) } - if rq.unique != nil && *rq.unique { + if rq.ctx.Unique != nil && *rq.ctx.Unique { selector.Distinct() } for _, p := range rq.predicates { @@ -596,498 +643,128 @@ func (rq *RepositoryQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range rq.order { p(selector) } - if offset := rq.offset; offset != nil { + if offset := rq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := rq.limit; limit != nil { + if limit := rq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector } -// RepositoryGroupBy is the group-by builder for Repository entities. -type RepositoryGroupBy struct { - config - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) -} - -// Aggregate adds the given aggregation functions to the group-by query. -func (rgb *RepositoryGroupBy) Aggregate(fns ...AggregateFunc) *RepositoryGroupBy { - rgb.fns = append(rgb.fns, fns...) - return rgb -} - -// Scan applies the group-by query and scans the result into the given value. -func (rgb *RepositoryGroupBy) Scan(ctx context.Context, v interface{}) error { - query, err := rgb.path(ctx) - if err != nil { - return err - } - rgb.sql = query - return rgb.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (rgb *RepositoryGroupBy) ScanX(ctx context.Context, v interface{}) { - if err := rgb.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from group-by. -// It is only allowed when executing a group-by query with one field. -func (rgb *RepositoryGroupBy) Strings(ctx context.Context) ([]string, error) { - if len(rgb.fields) > 1 { - return nil, errors.New("ent: RepositoryGroupBy.Strings is not achievable when grouping more than 1 field") - } - var v []string - if err := rgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (rgb *RepositoryGroupBy) StringsX(ctx context.Context) []string { - v, err := rgb.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (rgb *RepositoryGroupBy) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = rgb.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{repository.Label} - default: - err = fmt.Errorf("ent: RepositoryGroupBy.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (rgb *RepositoryGroupBy) StringX(ctx context.Context) string { - v, err := rgb.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from group-by. -// It is only allowed when executing a group-by query with one field. -func (rgb *RepositoryGroupBy) Ints(ctx context.Context) ([]int, error) { - if len(rgb.fields) > 1 { - return nil, errors.New("ent: RepositoryGroupBy.Ints is not achievable when grouping more than 1 field") - } - var v []int - if err := rgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (rgb *RepositoryGroupBy) IntsX(ctx context.Context) []int { - v, err := rgb.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (rgb *RepositoryGroupBy) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = rgb.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{repository.Label} - default: - err = fmt.Errorf("ent: RepositoryGroupBy.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (rgb *RepositoryGroupBy) IntX(ctx context.Context) int { - v, err := rgb.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from group-by. -// It is only allowed when executing a group-by query with one field. -func (rgb *RepositoryGroupBy) Float64s(ctx context.Context) ([]float64, error) { - if len(rgb.fields) > 1 { - return nil, errors.New("ent: RepositoryGroupBy.Float64s is not achievable when grouping more than 1 field") - } - var v []float64 - if err := rgb.Scan(ctx, &v); err != nil { - return nil, err +// WithNamedRepositoryToEnvironment tells the query-builder to eager-load the nodes that are connected to the "RepositoryToEnvironment" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (rq *RepositoryQuery) WithNamedRepositoryToEnvironment(name string, opts ...func(*EnvironmentQuery)) *RepositoryQuery { + query := (&EnvironmentClient{config: rq.config}).Query() + for _, opt := range opts { + opt(query) } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (rgb *RepositoryGroupBy) Float64sX(ctx context.Context) []float64 { - v, err := rgb.Float64s(ctx) - if err != nil { - panic(err) + if rq.withNamedRepositoryToEnvironment == nil { + rq.withNamedRepositoryToEnvironment = make(map[string]*EnvironmentQuery) } - return v + rq.withNamedRepositoryToEnvironment[name] = query + return rq } -// Float64 returns a single float64 from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (rgb *RepositoryGroupBy) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = rgb.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{repository.Label} - default: - err = fmt.Errorf("ent: RepositoryGroupBy.Float64s returned %d results when one was expected", len(v)) +// WithNamedRepositoryToRepoCommit tells the query-builder to eager-load the nodes that are connected to the "RepositoryToRepoCommit" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (rq *RepositoryQuery) WithNamedRepositoryToRepoCommit(name string, opts ...func(*RepoCommitQuery)) *RepositoryQuery { + query := (&RepoCommitClient{config: rq.config}).Query() + for _, opt := range opts { + opt(query) } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (rgb *RepositoryGroupBy) Float64X(ctx context.Context) float64 { - v, err := rgb.Float64(ctx) - if err != nil { - panic(err) + if rq.withNamedRepositoryToRepoCommit == nil { + rq.withNamedRepositoryToRepoCommit = make(map[string]*RepoCommitQuery) } - return v + rq.withNamedRepositoryToRepoCommit[name] = query + return rq } -// Bools returns list of bools from group-by. -// It is only allowed when executing a group-by query with one field. -func (rgb *RepositoryGroupBy) Bools(ctx context.Context) ([]bool, error) { - if len(rgb.fields) > 1 { - return nil, errors.New("ent: RepositoryGroupBy.Bools is not achievable when grouping more than 1 field") - } - var v []bool - if err := rgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil +// RepositoryGroupBy is the group-by builder for Repository entities. +type RepositoryGroupBy struct { + selector + build *RepositoryQuery } -// BoolsX is like Bools, but panics if an error occurs. -func (rgb *RepositoryGroupBy) BoolsX(ctx context.Context) []bool { - v, err := rgb.Bools(ctx) - if err != nil { - panic(err) - } - return v +// Aggregate adds the given aggregation functions to the group-by query. +func (rgb *RepositoryGroupBy) Aggregate(fns ...AggregateFunc) *RepositoryGroupBy { + rgb.fns = append(rgb.fns, fns...) + return rgb } -// Bool returns a single bool from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (rgb *RepositoryGroupBy) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = rgb.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{repository.Label} - default: - err = fmt.Errorf("ent: RepositoryGroupBy.Bools returned %d results when one was expected", len(v)) +// Scan applies the selector query and scans the result into the given value. +func (rgb *RepositoryGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, rgb.build.ctx, "GroupBy") + if err := rgb.build.prepareQuery(ctx); err != nil { + return err } - return + return scanWithInterceptors[*RepositoryQuery, *RepositoryGroupBy](ctx, rgb.build, rgb, rgb.build.inters, v) } -// BoolX is like Bool, but panics if an error occurs. -func (rgb *RepositoryGroupBy) BoolX(ctx context.Context) bool { - v, err := rgb.Bool(ctx) - if err != nil { - panic(err) +func (rgb *RepositoryGroupBy) sqlScan(ctx context.Context, root *RepositoryQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(rgb.fns)) + for _, fn := range rgb.fns { + aggregation = append(aggregation, fn(selector)) } - return v -} - -func (rgb *RepositoryGroupBy) sqlScan(ctx context.Context, v interface{}) error { - for _, f := range rgb.fields { - if !repository.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*rgb.flds)+len(rgb.fns)) + for _, f := range *rgb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := rgb.sqlQuery() + selector.GroupBy(selector.Columns(*rgb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := rgb.driver.Query(ctx, query, args, rows); err != nil { + if err := rgb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (rgb *RepositoryGroupBy) sqlQuery() *sql.Selector { - selector := rgb.sql.Select() - aggregation := make([]string, 0, len(rgb.fns)) - for _, fn := range rgb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(rgb.fields)+len(rgb.fns)) - for _, f := range rgb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(rgb.fields...)...) -} - // RepositorySelect is the builder for selecting fields of Repository entities. type RepositorySelect struct { *RepositoryQuery - // intermediate query (i.e. traversal path). - sql *sql.Selector + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (rs *RepositorySelect) Aggregate(fns ...AggregateFunc) *RepositorySelect { + rs.fns = append(rs.fns, fns...) + return rs } // Scan applies the selector query and scans the result into the given value. -func (rs *RepositorySelect) Scan(ctx context.Context, v interface{}) error { +func (rs *RepositorySelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, rs.ctx, "Select") if err := rs.prepareQuery(ctx); err != nil { return err } - rs.sql = rs.RepositoryQuery.sqlQuery(ctx) - return rs.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (rs *RepositorySelect) ScanX(ctx context.Context, v interface{}) { - if err := rs.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from a selector. It is only allowed when selecting one field. -func (rs *RepositorySelect) Strings(ctx context.Context) ([]string, error) { - if len(rs.fields) > 1 { - return nil, errors.New("ent: RepositorySelect.Strings is not achievable when selecting more than 1 field") - } - var v []string - if err := rs.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (rs *RepositorySelect) StringsX(ctx context.Context) []string { - v, err := rs.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a selector. It is only allowed when selecting one field. -func (rs *RepositorySelect) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = rs.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{repository.Label} - default: - err = fmt.Errorf("ent: RepositorySelect.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (rs *RepositorySelect) StringX(ctx context.Context) string { - v, err := rs.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from a selector. It is only allowed when selecting one field. -func (rs *RepositorySelect) Ints(ctx context.Context) ([]int, error) { - if len(rs.fields) > 1 { - return nil, errors.New("ent: RepositorySelect.Ints is not achievable when selecting more than 1 field") - } - var v []int - if err := rs.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (rs *RepositorySelect) IntsX(ctx context.Context) []int { - v, err := rs.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a selector. It is only allowed when selecting one field. -func (rs *RepositorySelect) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = rs.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{repository.Label} - default: - err = fmt.Errorf("ent: RepositorySelect.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (rs *RepositorySelect) IntX(ctx context.Context) int { - v, err := rs.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. -func (rs *RepositorySelect) Float64s(ctx context.Context) ([]float64, error) { - if len(rs.fields) > 1 { - return nil, errors.New("ent: RepositorySelect.Float64s is not achievable when selecting more than 1 field") - } - var v []float64 - if err := rs.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (rs *RepositorySelect) Float64sX(ctx context.Context) []float64 { - v, err := rs.Float64s(ctx) - if err != nil { - panic(err) - } - return v + return scanWithInterceptors[*RepositoryQuery, *RepositorySelect](ctx, rs.RepositoryQuery, rs, rs.inters, v) } -// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. -func (rs *RepositorySelect) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = rs.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{repository.Label} - default: - err = fmt.Errorf("ent: RepositorySelect.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (rs *RepositorySelect) Float64X(ctx context.Context) float64 { - v, err := rs.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from a selector. It is only allowed when selecting one field. -func (rs *RepositorySelect) Bools(ctx context.Context) ([]bool, error) { - if len(rs.fields) > 1 { - return nil, errors.New("ent: RepositorySelect.Bools is not achievable when selecting more than 1 field") - } - var v []bool - if err := rs.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (rs *RepositorySelect) BoolsX(ctx context.Context) []bool { - v, err := rs.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a selector. It is only allowed when selecting one field. -func (rs *RepositorySelect) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = rs.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{repository.Label} - default: - err = fmt.Errorf("ent: RepositorySelect.Bools returned %d results when one was expected", len(v)) +func (rs *RepositorySelect) sqlScan(ctx context.Context, root *RepositoryQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(rs.fns)) + for _, fn := range rs.fns { + aggregation = append(aggregation, fn(selector)) } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (rs *RepositorySelect) BoolX(ctx context.Context) bool { - v, err := rs.Bool(ctx) - if err != nil { - panic(err) + switch n := len(*rs.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) } - return v -} - -func (rs *RepositorySelect) sqlScan(ctx context.Context, v interface{}) error { rows := &sql.Rows{} - query, args := rs.sql.Query() + query, args := selector.Query() if err := rs.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/ent/repository_update.go b/ent/repository_update.go index 4741cbc8..e38b2f3c 100755 --- a/ent/repository_update.go +++ b/ent/repository_update.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -36,6 +36,14 @@ func (ru *RepositoryUpdate) SetRepoURL(s string) *RepositoryUpdate { return ru } +// SetNillableRepoURL sets the "repo_url" field if the given value is not nil. +func (ru *RepositoryUpdate) SetNillableRepoURL(s *string) *RepositoryUpdate { + if s != nil { + ru.SetRepoURL(*s) + } + return ru +} + // SetBranchName sets the "branch_name" field. func (ru *RepositoryUpdate) SetBranchName(s string) *RepositoryUpdate { ru.mutation.SetBranchName(s) @@ -56,6 +64,14 @@ func (ru *RepositoryUpdate) SetEnviromentFilepath(s string) *RepositoryUpdate { return ru } +// SetNillableEnviromentFilepath sets the "enviroment_filepath" field if the given value is not nil. +func (ru *RepositoryUpdate) SetNillableEnviromentFilepath(s *string) *RepositoryUpdate { + if s != nil { + ru.SetEnviromentFilepath(*s) + } + return ru +} + // SetFolderPath sets the "folder_path" field. func (ru *RepositoryUpdate) SetFolderPath(s string) *RepositoryUpdate { ru.mutation.SetFolderPath(s) @@ -149,34 +165,7 @@ func (ru *RepositoryUpdate) RemoveRepositoryToRepoCommit(r ...*RepoCommit) *Repo // Save executes the query and returns the number of nodes affected by the update operation. func (ru *RepositoryUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(ru.hooks) == 0 { - affected, err = ru.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*RepositoryMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - ru.mutation = mutation - affected, err = ru.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(ru.hooks) - 1; i >= 0; i-- { - if ru.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = ru.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, ru.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, ru.sqlSave, ru.mutation, ru.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -202,16 +191,7 @@ func (ru *RepositoryUpdate) ExecX(ctx context.Context) { } func (ru *RepositoryUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: repository.Table, - Columns: repository.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: repository.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(repository.Table, repository.Columns, sqlgraph.NewFieldSpec(repository.FieldID, field.TypeUUID)) if ps := ru.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -220,32 +200,16 @@ func (ru *RepositoryUpdate) sqlSave(ctx context.Context) (n int, err error) { } } if value, ok := ru.mutation.RepoURL(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: repository.FieldRepoURL, - }) + _spec.SetField(repository.FieldRepoURL, field.TypeString, value) } if value, ok := ru.mutation.BranchName(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: repository.FieldBranchName, - }) + _spec.SetField(repository.FieldBranchName, field.TypeString, value) } if value, ok := ru.mutation.EnviromentFilepath(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: repository.FieldEnviromentFilepath, - }) + _spec.SetField(repository.FieldEnviromentFilepath, field.TypeString, value) } if value, ok := ru.mutation.FolderPath(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: repository.FieldFolderPath, - }) + _spec.SetField(repository.FieldFolderPath, field.TypeString, value) } if ru.mutation.RepositoryToEnvironmentCleared() { edge := &sqlgraph.EdgeSpec{ @@ -255,10 +219,7 @@ func (ru *RepositoryUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: repository.RepositoryToEnvironmentPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -271,10 +232,7 @@ func (ru *RepositoryUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: repository.RepositoryToEnvironmentPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -290,10 +248,7 @@ func (ru *RepositoryUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: repository.RepositoryToEnvironmentPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -309,10 +264,7 @@ func (ru *RepositoryUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{repository.RepositoryToRepoCommitColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: repocommit.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(repocommit.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -325,10 +277,7 @@ func (ru *RepositoryUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{repository.RepositoryToRepoCommitColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: repocommit.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(repocommit.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -344,10 +293,7 @@ func (ru *RepositoryUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{repository.RepositoryToRepoCommitColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: repocommit.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(repocommit.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -359,10 +305,11 @@ func (ru *RepositoryUpdate) sqlSave(ctx context.Context) (n int, err error) { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{repository.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return 0, err } + ru.mutation.done = true return n, nil } @@ -380,6 +327,14 @@ func (ruo *RepositoryUpdateOne) SetRepoURL(s string) *RepositoryUpdateOne { return ruo } +// SetNillableRepoURL sets the "repo_url" field if the given value is not nil. +func (ruo *RepositoryUpdateOne) SetNillableRepoURL(s *string) *RepositoryUpdateOne { + if s != nil { + ruo.SetRepoURL(*s) + } + return ruo +} + // SetBranchName sets the "branch_name" field. func (ruo *RepositoryUpdateOne) SetBranchName(s string) *RepositoryUpdateOne { ruo.mutation.SetBranchName(s) @@ -400,6 +355,14 @@ func (ruo *RepositoryUpdateOne) SetEnviromentFilepath(s string) *RepositoryUpdat return ruo } +// SetNillableEnviromentFilepath sets the "enviroment_filepath" field if the given value is not nil. +func (ruo *RepositoryUpdateOne) SetNillableEnviromentFilepath(s *string) *RepositoryUpdateOne { + if s != nil { + ruo.SetEnviromentFilepath(*s) + } + return ruo +} + // SetFolderPath sets the "folder_path" field. func (ruo *RepositoryUpdateOne) SetFolderPath(s string) *RepositoryUpdateOne { ruo.mutation.SetFolderPath(s) @@ -491,6 +454,12 @@ func (ruo *RepositoryUpdateOne) RemoveRepositoryToRepoCommit(r ...*RepoCommit) * return ruo.RemoveRepositoryToRepoCommitIDs(ids...) } +// Where appends a list predicates to the RepositoryUpdate builder. +func (ruo *RepositoryUpdateOne) Where(ps ...predicate.Repository) *RepositoryUpdateOne { + ruo.mutation.Where(ps...) + return ruo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (ruo *RepositoryUpdateOne) Select(field string, fields ...string) *RepositoryUpdateOne { @@ -500,34 +469,7 @@ func (ruo *RepositoryUpdateOne) Select(field string, fields ...string) *Reposito // Save executes the query and returns the updated Repository entity. func (ruo *RepositoryUpdateOne) Save(ctx context.Context) (*Repository, error) { - var ( - err error - node *Repository - ) - if len(ruo.hooks) == 0 { - node, err = ruo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*RepositoryMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - ruo.mutation = mutation - node, err = ruo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(ruo.hooks) - 1; i >= 0; i-- { - if ruo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = ruo.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, ruo.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, ruo.sqlSave, ruo.mutation, ruo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -553,16 +495,7 @@ func (ruo *RepositoryUpdateOne) ExecX(ctx context.Context) { } func (ruo *RepositoryUpdateOne) sqlSave(ctx context.Context) (_node *Repository, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: repository.Table, - Columns: repository.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: repository.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(repository.Table, repository.Columns, sqlgraph.NewFieldSpec(repository.FieldID, field.TypeUUID)) id, ok := ruo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Repository.id" for update`)} @@ -588,32 +521,16 @@ func (ruo *RepositoryUpdateOne) sqlSave(ctx context.Context) (_node *Repository, } } if value, ok := ruo.mutation.RepoURL(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: repository.FieldRepoURL, - }) + _spec.SetField(repository.FieldRepoURL, field.TypeString, value) } if value, ok := ruo.mutation.BranchName(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: repository.FieldBranchName, - }) + _spec.SetField(repository.FieldBranchName, field.TypeString, value) } if value, ok := ruo.mutation.EnviromentFilepath(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: repository.FieldEnviromentFilepath, - }) + _spec.SetField(repository.FieldEnviromentFilepath, field.TypeString, value) } if value, ok := ruo.mutation.FolderPath(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: repository.FieldFolderPath, - }) + _spec.SetField(repository.FieldFolderPath, field.TypeString, value) } if ruo.mutation.RepositoryToEnvironmentCleared() { edge := &sqlgraph.EdgeSpec{ @@ -623,10 +540,7 @@ func (ruo *RepositoryUpdateOne) sqlSave(ctx context.Context) (_node *Repository, Columns: repository.RepositoryToEnvironmentPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -639,10 +553,7 @@ func (ruo *RepositoryUpdateOne) sqlSave(ctx context.Context) (_node *Repository, Columns: repository.RepositoryToEnvironmentPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -658,10 +569,7 @@ func (ruo *RepositoryUpdateOne) sqlSave(ctx context.Context) (_node *Repository, Columns: repository.RepositoryToEnvironmentPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -677,10 +585,7 @@ func (ruo *RepositoryUpdateOne) sqlSave(ctx context.Context) (_node *Repository, Columns: []string{repository.RepositoryToRepoCommitColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: repocommit.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(repocommit.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -693,10 +598,7 @@ func (ruo *RepositoryUpdateOne) sqlSave(ctx context.Context) (_node *Repository, Columns: []string{repository.RepositoryToRepoCommitColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: repocommit.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(repocommit.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -712,10 +614,7 @@ func (ruo *RepositoryUpdateOne) sqlSave(ctx context.Context) (_node *Repository, Columns: []string{repository.RepositoryToRepoCommitColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: repocommit.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(repocommit.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -730,9 +629,10 @@ func (ruo *RepositoryUpdateOne) sqlSave(ctx context.Context) (_node *Repository, if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{repository.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } + ruo.mutation.done = true return _node, nil } diff --git a/ent/runtime.go b/ent/runtime.go index 51860ec4..90a52037 100755 --- a/ent/runtime.go +++ b/ent/runtime.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent diff --git a/ent/runtime/runtime.go b/ent/runtime/runtime.go index 5efcaee7..33074a52 100755 --- a/ent/runtime/runtime.go +++ b/ent/runtime/runtime.go @@ -1,10 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package runtime // The schema-stitching logic is generated in github.com/gen0cide/laforge/ent/runtime.go const ( - Version = "v0.10.1" // Version of ent codegen. - Sum = "h1:dM5h4Zk6yHGIgw4dCqVzGw3nWgpGYJiV4/kyHEF6PFo=" // Sum of ent codegen. + Version = "v0.12.5" // Version of ent codegen. + Sum = "h1:KREM5E4CSoej4zeGa88Ou/gfturAnpUv0mzAjch1sj4=" // Sum of ent codegen. ) diff --git a/ent/script.go b/ent/script.go index b5f712c5..c4d95696 100755 --- a/ent/script.go +++ b/ent/script.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -7,6 +7,7 @@ import ( "fmt" "strings" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/gen0cide/laforge/ent/environment" "github.com/gen0cide/laforge/ent/script" @@ -18,8 +19,8 @@ type Script struct { config ` json:"-"` // ID of the ent. ID uuid.UUID `json:"id,omitempty"` - // HclID holds the value of the "hcl_id" field. - HclID string `json:"hcl_id,omitempty" hcl:"id,label"` + // HCLID holds the value of the "hcl_id" field. + HCLID string `json:"hcl_id,omitempty" hcl:"id,label"` // Name holds the value of the "name" field. Name string `json:"name,omitempty" hcl:"name,attr"` // Language holds the value of the "language" field. @@ -50,6 +51,7 @@ type Script struct { // The values are being populated by the ScriptQuery when eager-loading is set. Edges ScriptEdges `json:"edges"` + // vvvvvvvvvvvv CUSTOM vvvvvvvvvvvv // Edges put into the main struct to be loaded via hcl // ScriptToUser holds the value of the ScriptToUser edge. HCLScriptToUser []*User `json:"ScriptToUser,omitempty" hcl:"maintainer,block"` @@ -57,8 +59,9 @@ type Script struct { HCLScriptToFinding []*Finding `json:"ScriptToFinding,omitempty" hcl:"finding,block"` // ScriptToEnvironment holds the value of the ScriptToEnvironment edge. HCLScriptToEnvironment *Environment `json:"ScriptToEnvironment,omitempty"` - // + // ^^^^^^^^^^^^ CUSTOM ^^^^^^^^^^^^^ environment_environment_to_script *uuid.UUID + selectValues sql.SelectValues } // ScriptEdges holds the relations/edges for other nodes in the graph. @@ -72,6 +75,11 @@ type ScriptEdges struct { // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. loadedTypes [3]bool + // totalCount holds the count of the edges above. + totalCount [3]map[string]int + + namedScriptToUser map[string][]*User + namedScriptToFinding map[string][]*Finding } // ScriptToUserOrErr returns the ScriptToUser value or an error if the edge @@ -97,8 +105,7 @@ func (e ScriptEdges) ScriptToFindingOrErr() ([]*Finding, error) { func (e ScriptEdges) ScriptToEnvironmentOrErr() (*Environment, error) { if e.loadedTypes[2] { if e.ScriptToEnvironment == nil { - // The edge ScriptToEnvironment was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: environment.Label} } return e.ScriptToEnvironment, nil @@ -107,8 +114,8 @@ func (e ScriptEdges) ScriptToEnvironmentOrErr() (*Environment, error) { } // scanValues returns the types for scanning values from sql.Rows. -func (*Script) scanValues(columns []string) ([]interface{}, error) { - values := make([]interface{}, len(columns)) +func (*Script) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) for i := range columns { switch columns[i] { case script.FieldArgs, script.FieldVars, script.FieldTags: @@ -117,14 +124,14 @@ func (*Script) scanValues(columns []string) ([]interface{}, error) { values[i] = new(sql.NullBool) case script.FieldCooldown, script.FieldTimeout: values[i] = new(sql.NullInt64) - case script.FieldHclID, script.FieldName, script.FieldLanguage, script.FieldDescription, script.FieldSource, script.FieldSourceType, script.FieldAbsPath: + case script.FieldHCLID, script.FieldName, script.FieldLanguage, script.FieldDescription, script.FieldSource, script.FieldSourceType, script.FieldAbsPath: values[i] = new(sql.NullString) case script.FieldID: values[i] = new(uuid.UUID) case script.ForeignKeys[0]: // environment_environment_to_script values[i] = &sql.NullScanner{S: new(uuid.UUID)} default: - return nil, fmt.Errorf("unexpected column %q for type Script", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -132,7 +139,7 @@ func (*Script) scanValues(columns []string) ([]interface{}, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the Script fields. -func (s *Script) assignValues(columns []string, values []interface{}) error { +func (s *Script) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -144,11 +151,11 @@ func (s *Script) assignValues(columns []string, values []interface{}) error { } else if value != nil { s.ID = *value } - case script.FieldHclID: + case script.FieldHCLID: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field hcl_id", values[i]) } else if value.Valid { - s.HclID = value.String + s.HCLID = value.String } case script.FieldName: if value, ok := values[i].(*sql.NullString); !ok { @@ -241,41 +248,49 @@ func (s *Script) assignValues(columns []string, values []interface{}) error { s.environment_environment_to_script = new(uuid.UUID) *s.environment_environment_to_script = *value.S.(*uuid.UUID) } + default: + s.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the Script. +// This includes values selected through modifiers, order, etc. +func (s *Script) Value(name string) (ent.Value, error) { + return s.selectValues.Get(name) +} + // QueryScriptToUser queries the "ScriptToUser" edge of the Script entity. func (s *Script) QueryScriptToUser() *UserQuery { - return (&ScriptClient{config: s.config}).QueryScriptToUser(s) + return NewScriptClient(s.config).QueryScriptToUser(s) } // QueryScriptToFinding queries the "ScriptToFinding" edge of the Script entity. func (s *Script) QueryScriptToFinding() *FindingQuery { - return (&ScriptClient{config: s.config}).QueryScriptToFinding(s) + return NewScriptClient(s.config).QueryScriptToFinding(s) } // QueryScriptToEnvironment queries the "ScriptToEnvironment" edge of the Script entity. func (s *Script) QueryScriptToEnvironment() *EnvironmentQuery { - return (&ScriptClient{config: s.config}).QueryScriptToEnvironment(s) + return NewScriptClient(s.config).QueryScriptToEnvironment(s) } // Update returns a builder for updating this Script. // Note that you need to call Script.Unwrap() before calling this method if this Script // was returned from a transaction, and the transaction was committed or rolled back. func (s *Script) Update() *ScriptUpdateOne { - return (&ScriptClient{config: s.config}).UpdateOne(s) + return NewScriptClient(s.config).UpdateOne(s) } // Unwrap unwraps the Script entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. func (s *Script) Unwrap() *Script { - tx, ok := s.config.driver.(*txDriver) + _tx, ok := s.config.driver.(*txDriver) if !ok { panic("ent: Script is not a transactional entity") } - s.config.driver = tx.drv + s.config.driver = _tx.drv return s } @@ -283,44 +298,99 @@ func (s *Script) Unwrap() *Script { func (s *Script) String() string { var builder strings.Builder builder.WriteString("Script(") - builder.WriteString(fmt.Sprintf("id=%v", s.ID)) - builder.WriteString(", hcl_id=") - builder.WriteString(s.HclID) - builder.WriteString(", name=") + builder.WriteString(fmt.Sprintf("id=%v, ", s.ID)) + builder.WriteString("hcl_id=") + builder.WriteString(s.HCLID) + builder.WriteString(", ") + builder.WriteString("name=") builder.WriteString(s.Name) - builder.WriteString(", language=") + builder.WriteString(", ") + builder.WriteString("language=") builder.WriteString(s.Language) - builder.WriteString(", description=") + builder.WriteString(", ") + builder.WriteString("description=") builder.WriteString(s.Description) - builder.WriteString(", source=") + builder.WriteString(", ") + builder.WriteString("source=") builder.WriteString(s.Source) - builder.WriteString(", source_type=") + builder.WriteString(", ") + builder.WriteString("source_type=") builder.WriteString(s.SourceType) - builder.WriteString(", cooldown=") + builder.WriteString(", ") + builder.WriteString("cooldown=") builder.WriteString(fmt.Sprintf("%v", s.Cooldown)) - builder.WriteString(", timeout=") + builder.WriteString(", ") + builder.WriteString("timeout=") builder.WriteString(fmt.Sprintf("%v", s.Timeout)) - builder.WriteString(", ignore_errors=") + builder.WriteString(", ") + builder.WriteString("ignore_errors=") builder.WriteString(fmt.Sprintf("%v", s.IgnoreErrors)) - builder.WriteString(", args=") + builder.WriteString(", ") + builder.WriteString("args=") builder.WriteString(fmt.Sprintf("%v", s.Args)) - builder.WriteString(", disabled=") + builder.WriteString(", ") + builder.WriteString("disabled=") builder.WriteString(fmt.Sprintf("%v", s.Disabled)) - builder.WriteString(", vars=") + builder.WriteString(", ") + builder.WriteString("vars=") builder.WriteString(fmt.Sprintf("%v", s.Vars)) - builder.WriteString(", abs_path=") + builder.WriteString(", ") + builder.WriteString("abs_path=") builder.WriteString(s.AbsPath) - builder.WriteString(", tags=") + builder.WriteString(", ") + builder.WriteString("tags=") builder.WriteString(fmt.Sprintf("%v", s.Tags)) builder.WriteByte(')') return builder.String() } -// Scripts is a parsable slice of Script. -type Scripts []*Script +// NamedScriptToUser returns the ScriptToUser named value or an error if the edge was not +// loaded in eager-loading with this name. +func (s *Script) NamedScriptToUser(name string) ([]*User, error) { + if s.Edges.namedScriptToUser == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := s.Edges.namedScriptToUser[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} -func (s Scripts) config(cfg config) { - for _i := range s { - s[_i].config = cfg +func (s *Script) appendNamedScriptToUser(name string, edges ...*User) { + if s.Edges.namedScriptToUser == nil { + s.Edges.namedScriptToUser = make(map[string][]*User) + } + if len(edges) == 0 { + s.Edges.namedScriptToUser[name] = []*User{} + } else { + s.Edges.namedScriptToUser[name] = append(s.Edges.namedScriptToUser[name], edges...) } } + +// NamedScriptToFinding returns the ScriptToFinding named value or an error if the edge was not +// loaded in eager-loading with this name. +func (s *Script) NamedScriptToFinding(name string) ([]*Finding, error) { + if s.Edges.namedScriptToFinding == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := s.Edges.namedScriptToFinding[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} + +func (s *Script) appendNamedScriptToFinding(name string, edges ...*Finding) { + if s.Edges.namedScriptToFinding == nil { + s.Edges.namedScriptToFinding = make(map[string][]*Finding) + } + if len(edges) == 0 { + s.Edges.namedScriptToFinding[name] = []*Finding{} + } else { + s.Edges.namedScriptToFinding[name] = append(s.Edges.namedScriptToFinding[name], edges...) + } +} + +// Scripts is a parsable slice of Script. +type Scripts []*Script diff --git a/ent/script/script.go b/ent/script/script.go index 9c0a0cac..d02f46db 100755 --- a/ent/script/script.go +++ b/ent/script/script.go @@ -1,8 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package script import ( + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -11,8 +13,8 @@ const ( Label = "script" // FieldID holds the string denoting the id field in the database. FieldID = "id" - // FieldHclID holds the string denoting the hcl_id field in the database. - FieldHclID = "hcl_id" + // FieldHCLID holds the string denoting the hcl_id field in the database. + FieldHCLID = "hcl_id" // FieldName holds the string denoting the name field in the database. FieldName = "name" // FieldLanguage holds the string denoting the language field in the database. @@ -73,7 +75,7 @@ const ( // Columns holds all SQL columns for script fields. var Columns = []string{ FieldID, - FieldHclID, + FieldHCLID, FieldName, FieldLanguage, FieldDescription, @@ -114,3 +116,122 @@ var ( // DefaultID holds the default value on creation for the "id" field. DefaultID func() uuid.UUID ) + +// OrderOption defines the ordering options for the Script queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByHCLID orders the results by the hcl_id field. +func ByHCLID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldHCLID, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByLanguage orders the results by the language field. +func ByLanguage(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldLanguage, opts...).ToFunc() +} + +// ByDescription orders the results by the description field. +func ByDescription(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDescription, opts...).ToFunc() +} + +// BySource orders the results by the source field. +func BySource(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSource, opts...).ToFunc() +} + +// BySourceType orders the results by the source_type field. +func BySourceType(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSourceType, opts...).ToFunc() +} + +// ByCooldown orders the results by the cooldown field. +func ByCooldown(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCooldown, opts...).ToFunc() +} + +// ByTimeout orders the results by the timeout field. +func ByTimeout(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldTimeout, opts...).ToFunc() +} + +// ByIgnoreErrors orders the results by the ignore_errors field. +func ByIgnoreErrors(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldIgnoreErrors, opts...).ToFunc() +} + +// ByDisabled orders the results by the disabled field. +func ByDisabled(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDisabled, opts...).ToFunc() +} + +// ByAbsPath orders the results by the abs_path field. +func ByAbsPath(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAbsPath, opts...).ToFunc() +} + +// ByScriptToUserCount orders the results by ScriptToUser count. +func ByScriptToUserCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newScriptToUserStep(), opts...) + } +} + +// ByScriptToUser orders the results by ScriptToUser terms. +func ByScriptToUser(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newScriptToUserStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByScriptToFindingCount orders the results by ScriptToFinding count. +func ByScriptToFindingCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newScriptToFindingStep(), opts...) + } +} + +// ByScriptToFinding orders the results by ScriptToFinding terms. +func ByScriptToFinding(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newScriptToFindingStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByScriptToEnvironmentField orders the results by ScriptToEnvironment field. +func ByScriptToEnvironmentField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newScriptToEnvironmentStep(), sql.OrderByField(field, opts...)) + } +} +func newScriptToUserStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ScriptToUserInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, ScriptToUserTable, ScriptToUserColumn), + ) +} +func newScriptToFindingStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ScriptToFindingInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, ScriptToFindingTable, ScriptToFindingColumn), + ) +} +func newScriptToEnvironmentStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ScriptToEnvironmentInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, ScriptToEnvironmentTable, ScriptToEnvironmentColumn), + ) +} diff --git a/ent/script/where.go b/ent/script/where.go index 69c46342..583adf30 100755 --- a/ent/script/where.go +++ b/ent/script/where.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package script @@ -11,1119 +11,657 @@ import ( // ID filters vertices based on their ID field. func ID(id uuid.UUID) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Script(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id uuid.UUID) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Script(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id uuid.UUID) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.Script(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...uuid.UUID) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.Script(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...uuid.UUID) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.Script(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id uuid.UUID) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.Script(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id uuid.UUID) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.Script(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id uuid.UUID) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.Script(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id uuid.UUID) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.Script(sql.FieldLTE(FieldID, id)) } -// HclID applies equality check predicate on the "hcl_id" field. It's identical to HclIDEQ. -func HclID(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldHclID), v)) - }) +// HCLID applies equality check predicate on the "hcl_id" field. It's identical to HCLIDEQ. +func HCLID(v string) predicate.Script { + return predicate.Script(sql.FieldEQ(FieldHCLID, v)) } // Name applies equality check predicate on the "name" field. It's identical to NameEQ. func Name(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldName), v)) - }) + return predicate.Script(sql.FieldEQ(FieldName, v)) } // Language applies equality check predicate on the "language" field. It's identical to LanguageEQ. func Language(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldLanguage), v)) - }) + return predicate.Script(sql.FieldEQ(FieldLanguage, v)) } // Description applies equality check predicate on the "description" field. It's identical to DescriptionEQ. func Description(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldDescription), v)) - }) + return predicate.Script(sql.FieldEQ(FieldDescription, v)) } // Source applies equality check predicate on the "source" field. It's identical to SourceEQ. func Source(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSource), v)) - }) + return predicate.Script(sql.FieldEQ(FieldSource, v)) } // SourceType applies equality check predicate on the "source_type" field. It's identical to SourceTypeEQ. func SourceType(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSourceType), v)) - }) + return predicate.Script(sql.FieldEQ(FieldSourceType, v)) } // Cooldown applies equality check predicate on the "cooldown" field. It's identical to CooldownEQ. func Cooldown(v int) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCooldown), v)) - }) + return predicate.Script(sql.FieldEQ(FieldCooldown, v)) } // Timeout applies equality check predicate on the "timeout" field. It's identical to TimeoutEQ. func Timeout(v int) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldTimeout), v)) - }) + return predicate.Script(sql.FieldEQ(FieldTimeout, v)) } // IgnoreErrors applies equality check predicate on the "ignore_errors" field. It's identical to IgnoreErrorsEQ. func IgnoreErrors(v bool) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldIgnoreErrors), v)) - }) + return predicate.Script(sql.FieldEQ(FieldIgnoreErrors, v)) } // Disabled applies equality check predicate on the "disabled" field. It's identical to DisabledEQ. func Disabled(v bool) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldDisabled), v)) - }) + return predicate.Script(sql.FieldEQ(FieldDisabled, v)) } // AbsPath applies equality check predicate on the "abs_path" field. It's identical to AbsPathEQ. func AbsPath(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldAbsPath), v)) - }) + return predicate.Script(sql.FieldEQ(FieldAbsPath, v)) } -// HclIDEQ applies the EQ predicate on the "hcl_id" field. -func HclIDEQ(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldHclID), v)) - }) +// HCLIDEQ applies the EQ predicate on the "hcl_id" field. +func HCLIDEQ(v string) predicate.Script { + return predicate.Script(sql.FieldEQ(FieldHCLID, v)) } -// HclIDNEQ applies the NEQ predicate on the "hcl_id" field. -func HclIDNEQ(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldHclID), v)) - }) +// HCLIDNEQ applies the NEQ predicate on the "hcl_id" field. +func HCLIDNEQ(v string) predicate.Script { + return predicate.Script(sql.FieldNEQ(FieldHCLID, v)) } -// HclIDIn applies the In predicate on the "hcl_id" field. -func HclIDIn(vs ...string) predicate.Script { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Script(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldHclID), v...)) - }) +// HCLIDIn applies the In predicate on the "hcl_id" field. +func HCLIDIn(vs ...string) predicate.Script { + return predicate.Script(sql.FieldIn(FieldHCLID, vs...)) } -// HclIDNotIn applies the NotIn predicate on the "hcl_id" field. -func HclIDNotIn(vs ...string) predicate.Script { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Script(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldHclID), v...)) - }) +// HCLIDNotIn applies the NotIn predicate on the "hcl_id" field. +func HCLIDNotIn(vs ...string) predicate.Script { + return predicate.Script(sql.FieldNotIn(FieldHCLID, vs...)) } -// HclIDGT applies the GT predicate on the "hcl_id" field. -func HclIDGT(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldHclID), v)) - }) +// HCLIDGT applies the GT predicate on the "hcl_id" field. +func HCLIDGT(v string) predicate.Script { + return predicate.Script(sql.FieldGT(FieldHCLID, v)) } -// HclIDGTE applies the GTE predicate on the "hcl_id" field. -func HclIDGTE(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldHclID), v)) - }) +// HCLIDGTE applies the GTE predicate on the "hcl_id" field. +func HCLIDGTE(v string) predicate.Script { + return predicate.Script(sql.FieldGTE(FieldHCLID, v)) } -// HclIDLT applies the LT predicate on the "hcl_id" field. -func HclIDLT(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldHclID), v)) - }) +// HCLIDLT applies the LT predicate on the "hcl_id" field. +func HCLIDLT(v string) predicate.Script { + return predicate.Script(sql.FieldLT(FieldHCLID, v)) } -// HclIDLTE applies the LTE predicate on the "hcl_id" field. -func HclIDLTE(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldHclID), v)) - }) +// HCLIDLTE applies the LTE predicate on the "hcl_id" field. +func HCLIDLTE(v string) predicate.Script { + return predicate.Script(sql.FieldLTE(FieldHCLID, v)) } -// HclIDContains applies the Contains predicate on the "hcl_id" field. -func HclIDContains(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldHclID), v)) - }) +// HCLIDContains applies the Contains predicate on the "hcl_id" field. +func HCLIDContains(v string) predicate.Script { + return predicate.Script(sql.FieldContains(FieldHCLID, v)) } -// HclIDHasPrefix applies the HasPrefix predicate on the "hcl_id" field. -func HclIDHasPrefix(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldHclID), v)) - }) +// HCLIDHasPrefix applies the HasPrefix predicate on the "hcl_id" field. +func HCLIDHasPrefix(v string) predicate.Script { + return predicate.Script(sql.FieldHasPrefix(FieldHCLID, v)) } -// HclIDHasSuffix applies the HasSuffix predicate on the "hcl_id" field. -func HclIDHasSuffix(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldHclID), v)) - }) +// HCLIDHasSuffix applies the HasSuffix predicate on the "hcl_id" field. +func HCLIDHasSuffix(v string) predicate.Script { + return predicate.Script(sql.FieldHasSuffix(FieldHCLID, v)) } -// HclIDEqualFold applies the EqualFold predicate on the "hcl_id" field. -func HclIDEqualFold(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldHclID), v)) - }) +// HCLIDEqualFold applies the EqualFold predicate on the "hcl_id" field. +func HCLIDEqualFold(v string) predicate.Script { + return predicate.Script(sql.FieldEqualFold(FieldHCLID, v)) } -// HclIDContainsFold applies the ContainsFold predicate on the "hcl_id" field. -func HclIDContainsFold(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldHclID), v)) - }) +// HCLIDContainsFold applies the ContainsFold predicate on the "hcl_id" field. +func HCLIDContainsFold(v string) predicate.Script { + return predicate.Script(sql.FieldContainsFold(FieldHCLID, v)) } // NameEQ applies the EQ predicate on the "name" field. func NameEQ(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldName), v)) - }) + return predicate.Script(sql.FieldEQ(FieldName, v)) } // NameNEQ applies the NEQ predicate on the "name" field. func NameNEQ(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldName), v)) - }) + return predicate.Script(sql.FieldNEQ(FieldName, v)) } // NameIn applies the In predicate on the "name" field. func NameIn(vs ...string) predicate.Script { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Script(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldName), v...)) - }) + return predicate.Script(sql.FieldIn(FieldName, vs...)) } // NameNotIn applies the NotIn predicate on the "name" field. func NameNotIn(vs ...string) predicate.Script { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Script(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldName), v...)) - }) + return predicate.Script(sql.FieldNotIn(FieldName, vs...)) } // NameGT applies the GT predicate on the "name" field. func NameGT(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldName), v)) - }) + return predicate.Script(sql.FieldGT(FieldName, v)) } // NameGTE applies the GTE predicate on the "name" field. func NameGTE(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldName), v)) - }) + return predicate.Script(sql.FieldGTE(FieldName, v)) } // NameLT applies the LT predicate on the "name" field. func NameLT(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldName), v)) - }) + return predicate.Script(sql.FieldLT(FieldName, v)) } // NameLTE applies the LTE predicate on the "name" field. func NameLTE(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldName), v)) - }) + return predicate.Script(sql.FieldLTE(FieldName, v)) } // NameContains applies the Contains predicate on the "name" field. func NameContains(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldName), v)) - }) + return predicate.Script(sql.FieldContains(FieldName, v)) } // NameHasPrefix applies the HasPrefix predicate on the "name" field. func NameHasPrefix(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldName), v)) - }) + return predicate.Script(sql.FieldHasPrefix(FieldName, v)) } // NameHasSuffix applies the HasSuffix predicate on the "name" field. func NameHasSuffix(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldName), v)) - }) + return predicate.Script(sql.FieldHasSuffix(FieldName, v)) } // NameEqualFold applies the EqualFold predicate on the "name" field. func NameEqualFold(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldName), v)) - }) + return predicate.Script(sql.FieldEqualFold(FieldName, v)) } // NameContainsFold applies the ContainsFold predicate on the "name" field. func NameContainsFold(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldName), v)) - }) + return predicate.Script(sql.FieldContainsFold(FieldName, v)) } // LanguageEQ applies the EQ predicate on the "language" field. func LanguageEQ(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldLanguage), v)) - }) + return predicate.Script(sql.FieldEQ(FieldLanguage, v)) } // LanguageNEQ applies the NEQ predicate on the "language" field. func LanguageNEQ(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldLanguage), v)) - }) + return predicate.Script(sql.FieldNEQ(FieldLanguage, v)) } // LanguageIn applies the In predicate on the "language" field. func LanguageIn(vs ...string) predicate.Script { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Script(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldLanguage), v...)) - }) + return predicate.Script(sql.FieldIn(FieldLanguage, vs...)) } // LanguageNotIn applies the NotIn predicate on the "language" field. func LanguageNotIn(vs ...string) predicate.Script { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Script(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldLanguage), v...)) - }) + return predicate.Script(sql.FieldNotIn(FieldLanguage, vs...)) } // LanguageGT applies the GT predicate on the "language" field. func LanguageGT(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldLanguage), v)) - }) + return predicate.Script(sql.FieldGT(FieldLanguage, v)) } // LanguageGTE applies the GTE predicate on the "language" field. func LanguageGTE(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldLanguage), v)) - }) + return predicate.Script(sql.FieldGTE(FieldLanguage, v)) } // LanguageLT applies the LT predicate on the "language" field. func LanguageLT(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldLanguage), v)) - }) + return predicate.Script(sql.FieldLT(FieldLanguage, v)) } // LanguageLTE applies the LTE predicate on the "language" field. func LanguageLTE(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldLanguage), v)) - }) + return predicate.Script(sql.FieldLTE(FieldLanguage, v)) } // LanguageContains applies the Contains predicate on the "language" field. func LanguageContains(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldLanguage), v)) - }) + return predicate.Script(sql.FieldContains(FieldLanguage, v)) } // LanguageHasPrefix applies the HasPrefix predicate on the "language" field. func LanguageHasPrefix(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldLanguage), v)) - }) + return predicate.Script(sql.FieldHasPrefix(FieldLanguage, v)) } // LanguageHasSuffix applies the HasSuffix predicate on the "language" field. func LanguageHasSuffix(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldLanguage), v)) - }) + return predicate.Script(sql.FieldHasSuffix(FieldLanguage, v)) } // LanguageEqualFold applies the EqualFold predicate on the "language" field. func LanguageEqualFold(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldLanguage), v)) - }) + return predicate.Script(sql.FieldEqualFold(FieldLanguage, v)) } // LanguageContainsFold applies the ContainsFold predicate on the "language" field. func LanguageContainsFold(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldLanguage), v)) - }) + return predicate.Script(sql.FieldContainsFold(FieldLanguage, v)) } // DescriptionEQ applies the EQ predicate on the "description" field. func DescriptionEQ(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldDescription), v)) - }) + return predicate.Script(sql.FieldEQ(FieldDescription, v)) } // DescriptionNEQ applies the NEQ predicate on the "description" field. func DescriptionNEQ(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldDescription), v)) - }) + return predicate.Script(sql.FieldNEQ(FieldDescription, v)) } // DescriptionIn applies the In predicate on the "description" field. func DescriptionIn(vs ...string) predicate.Script { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Script(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldDescription), v...)) - }) + return predicate.Script(sql.FieldIn(FieldDescription, vs...)) } // DescriptionNotIn applies the NotIn predicate on the "description" field. func DescriptionNotIn(vs ...string) predicate.Script { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Script(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldDescription), v...)) - }) + return predicate.Script(sql.FieldNotIn(FieldDescription, vs...)) } // DescriptionGT applies the GT predicate on the "description" field. func DescriptionGT(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldDescription), v)) - }) + return predicate.Script(sql.FieldGT(FieldDescription, v)) } // DescriptionGTE applies the GTE predicate on the "description" field. func DescriptionGTE(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldDescription), v)) - }) + return predicate.Script(sql.FieldGTE(FieldDescription, v)) } // DescriptionLT applies the LT predicate on the "description" field. func DescriptionLT(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldDescription), v)) - }) + return predicate.Script(sql.FieldLT(FieldDescription, v)) } // DescriptionLTE applies the LTE predicate on the "description" field. func DescriptionLTE(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldDescription), v)) - }) + return predicate.Script(sql.FieldLTE(FieldDescription, v)) } // DescriptionContains applies the Contains predicate on the "description" field. func DescriptionContains(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldDescription), v)) - }) + return predicate.Script(sql.FieldContains(FieldDescription, v)) } // DescriptionHasPrefix applies the HasPrefix predicate on the "description" field. func DescriptionHasPrefix(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldDescription), v)) - }) + return predicate.Script(sql.FieldHasPrefix(FieldDescription, v)) } // DescriptionHasSuffix applies the HasSuffix predicate on the "description" field. func DescriptionHasSuffix(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldDescription), v)) - }) + return predicate.Script(sql.FieldHasSuffix(FieldDescription, v)) } // DescriptionEqualFold applies the EqualFold predicate on the "description" field. func DescriptionEqualFold(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldDescription), v)) - }) + return predicate.Script(sql.FieldEqualFold(FieldDescription, v)) } // DescriptionContainsFold applies the ContainsFold predicate on the "description" field. func DescriptionContainsFold(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldDescription), v)) - }) + return predicate.Script(sql.FieldContainsFold(FieldDescription, v)) } // SourceEQ applies the EQ predicate on the "source" field. func SourceEQ(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSource), v)) - }) + return predicate.Script(sql.FieldEQ(FieldSource, v)) } // SourceNEQ applies the NEQ predicate on the "source" field. func SourceNEQ(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldSource), v)) - }) + return predicate.Script(sql.FieldNEQ(FieldSource, v)) } // SourceIn applies the In predicate on the "source" field. func SourceIn(vs ...string) predicate.Script { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Script(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldSource), v...)) - }) + return predicate.Script(sql.FieldIn(FieldSource, vs...)) } // SourceNotIn applies the NotIn predicate on the "source" field. func SourceNotIn(vs ...string) predicate.Script { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Script(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldSource), v...)) - }) + return predicate.Script(sql.FieldNotIn(FieldSource, vs...)) } // SourceGT applies the GT predicate on the "source" field. func SourceGT(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldSource), v)) - }) + return predicate.Script(sql.FieldGT(FieldSource, v)) } // SourceGTE applies the GTE predicate on the "source" field. func SourceGTE(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldSource), v)) - }) + return predicate.Script(sql.FieldGTE(FieldSource, v)) } // SourceLT applies the LT predicate on the "source" field. func SourceLT(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldSource), v)) - }) + return predicate.Script(sql.FieldLT(FieldSource, v)) } // SourceLTE applies the LTE predicate on the "source" field. func SourceLTE(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldSource), v)) - }) + return predicate.Script(sql.FieldLTE(FieldSource, v)) } // SourceContains applies the Contains predicate on the "source" field. func SourceContains(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldSource), v)) - }) + return predicate.Script(sql.FieldContains(FieldSource, v)) } // SourceHasPrefix applies the HasPrefix predicate on the "source" field. func SourceHasPrefix(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldSource), v)) - }) + return predicate.Script(sql.FieldHasPrefix(FieldSource, v)) } // SourceHasSuffix applies the HasSuffix predicate on the "source" field. func SourceHasSuffix(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldSource), v)) - }) + return predicate.Script(sql.FieldHasSuffix(FieldSource, v)) } // SourceEqualFold applies the EqualFold predicate on the "source" field. func SourceEqualFold(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldSource), v)) - }) + return predicate.Script(sql.FieldEqualFold(FieldSource, v)) } // SourceContainsFold applies the ContainsFold predicate on the "source" field. func SourceContainsFold(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldSource), v)) - }) + return predicate.Script(sql.FieldContainsFold(FieldSource, v)) } // SourceTypeEQ applies the EQ predicate on the "source_type" field. func SourceTypeEQ(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSourceType), v)) - }) + return predicate.Script(sql.FieldEQ(FieldSourceType, v)) } // SourceTypeNEQ applies the NEQ predicate on the "source_type" field. func SourceTypeNEQ(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldSourceType), v)) - }) + return predicate.Script(sql.FieldNEQ(FieldSourceType, v)) } // SourceTypeIn applies the In predicate on the "source_type" field. func SourceTypeIn(vs ...string) predicate.Script { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Script(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldSourceType), v...)) - }) + return predicate.Script(sql.FieldIn(FieldSourceType, vs...)) } // SourceTypeNotIn applies the NotIn predicate on the "source_type" field. func SourceTypeNotIn(vs ...string) predicate.Script { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Script(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldSourceType), v...)) - }) + return predicate.Script(sql.FieldNotIn(FieldSourceType, vs...)) } // SourceTypeGT applies the GT predicate on the "source_type" field. func SourceTypeGT(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldSourceType), v)) - }) + return predicate.Script(sql.FieldGT(FieldSourceType, v)) } // SourceTypeGTE applies the GTE predicate on the "source_type" field. func SourceTypeGTE(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldSourceType), v)) - }) + return predicate.Script(sql.FieldGTE(FieldSourceType, v)) } // SourceTypeLT applies the LT predicate on the "source_type" field. func SourceTypeLT(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldSourceType), v)) - }) + return predicate.Script(sql.FieldLT(FieldSourceType, v)) } // SourceTypeLTE applies the LTE predicate on the "source_type" field. func SourceTypeLTE(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldSourceType), v)) - }) + return predicate.Script(sql.FieldLTE(FieldSourceType, v)) } // SourceTypeContains applies the Contains predicate on the "source_type" field. func SourceTypeContains(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldSourceType), v)) - }) + return predicate.Script(sql.FieldContains(FieldSourceType, v)) } // SourceTypeHasPrefix applies the HasPrefix predicate on the "source_type" field. func SourceTypeHasPrefix(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldSourceType), v)) - }) + return predicate.Script(sql.FieldHasPrefix(FieldSourceType, v)) } // SourceTypeHasSuffix applies the HasSuffix predicate on the "source_type" field. func SourceTypeHasSuffix(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldSourceType), v)) - }) + return predicate.Script(sql.FieldHasSuffix(FieldSourceType, v)) } // SourceTypeEqualFold applies the EqualFold predicate on the "source_type" field. func SourceTypeEqualFold(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldSourceType), v)) - }) + return predicate.Script(sql.FieldEqualFold(FieldSourceType, v)) } // SourceTypeContainsFold applies the ContainsFold predicate on the "source_type" field. func SourceTypeContainsFold(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldSourceType), v)) - }) + return predicate.Script(sql.FieldContainsFold(FieldSourceType, v)) } // CooldownEQ applies the EQ predicate on the "cooldown" field. func CooldownEQ(v int) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCooldown), v)) - }) + return predicate.Script(sql.FieldEQ(FieldCooldown, v)) } // CooldownNEQ applies the NEQ predicate on the "cooldown" field. func CooldownNEQ(v int) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldCooldown), v)) - }) + return predicate.Script(sql.FieldNEQ(FieldCooldown, v)) } // CooldownIn applies the In predicate on the "cooldown" field. func CooldownIn(vs ...int) predicate.Script { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Script(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldCooldown), v...)) - }) + return predicate.Script(sql.FieldIn(FieldCooldown, vs...)) } // CooldownNotIn applies the NotIn predicate on the "cooldown" field. func CooldownNotIn(vs ...int) predicate.Script { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Script(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldCooldown), v...)) - }) + return predicate.Script(sql.FieldNotIn(FieldCooldown, vs...)) } // CooldownGT applies the GT predicate on the "cooldown" field. func CooldownGT(v int) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldCooldown), v)) - }) + return predicate.Script(sql.FieldGT(FieldCooldown, v)) } // CooldownGTE applies the GTE predicate on the "cooldown" field. func CooldownGTE(v int) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldCooldown), v)) - }) + return predicate.Script(sql.FieldGTE(FieldCooldown, v)) } // CooldownLT applies the LT predicate on the "cooldown" field. func CooldownLT(v int) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldCooldown), v)) - }) + return predicate.Script(sql.FieldLT(FieldCooldown, v)) } // CooldownLTE applies the LTE predicate on the "cooldown" field. func CooldownLTE(v int) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldCooldown), v)) - }) + return predicate.Script(sql.FieldLTE(FieldCooldown, v)) } // TimeoutEQ applies the EQ predicate on the "timeout" field. func TimeoutEQ(v int) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldTimeout), v)) - }) + return predicate.Script(sql.FieldEQ(FieldTimeout, v)) } // TimeoutNEQ applies the NEQ predicate on the "timeout" field. func TimeoutNEQ(v int) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldTimeout), v)) - }) + return predicate.Script(sql.FieldNEQ(FieldTimeout, v)) } // TimeoutIn applies the In predicate on the "timeout" field. func TimeoutIn(vs ...int) predicate.Script { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Script(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldTimeout), v...)) - }) + return predicate.Script(sql.FieldIn(FieldTimeout, vs...)) } // TimeoutNotIn applies the NotIn predicate on the "timeout" field. func TimeoutNotIn(vs ...int) predicate.Script { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Script(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldTimeout), v...)) - }) + return predicate.Script(sql.FieldNotIn(FieldTimeout, vs...)) } // TimeoutGT applies the GT predicate on the "timeout" field. func TimeoutGT(v int) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldTimeout), v)) - }) + return predicate.Script(sql.FieldGT(FieldTimeout, v)) } // TimeoutGTE applies the GTE predicate on the "timeout" field. func TimeoutGTE(v int) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldTimeout), v)) - }) + return predicate.Script(sql.FieldGTE(FieldTimeout, v)) } // TimeoutLT applies the LT predicate on the "timeout" field. func TimeoutLT(v int) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldTimeout), v)) - }) + return predicate.Script(sql.FieldLT(FieldTimeout, v)) } // TimeoutLTE applies the LTE predicate on the "timeout" field. func TimeoutLTE(v int) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldTimeout), v)) - }) + return predicate.Script(sql.FieldLTE(FieldTimeout, v)) } // IgnoreErrorsEQ applies the EQ predicate on the "ignore_errors" field. func IgnoreErrorsEQ(v bool) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldIgnoreErrors), v)) - }) + return predicate.Script(sql.FieldEQ(FieldIgnoreErrors, v)) } // IgnoreErrorsNEQ applies the NEQ predicate on the "ignore_errors" field. func IgnoreErrorsNEQ(v bool) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldIgnoreErrors), v)) - }) + return predicate.Script(sql.FieldNEQ(FieldIgnoreErrors, v)) } // DisabledEQ applies the EQ predicate on the "disabled" field. func DisabledEQ(v bool) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldDisabled), v)) - }) + return predicate.Script(sql.FieldEQ(FieldDisabled, v)) } // DisabledNEQ applies the NEQ predicate on the "disabled" field. func DisabledNEQ(v bool) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldDisabled), v)) - }) + return predicate.Script(sql.FieldNEQ(FieldDisabled, v)) } // AbsPathEQ applies the EQ predicate on the "abs_path" field. func AbsPathEQ(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldAbsPath), v)) - }) + return predicate.Script(sql.FieldEQ(FieldAbsPath, v)) } // AbsPathNEQ applies the NEQ predicate on the "abs_path" field. func AbsPathNEQ(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldAbsPath), v)) - }) + return predicate.Script(sql.FieldNEQ(FieldAbsPath, v)) } // AbsPathIn applies the In predicate on the "abs_path" field. func AbsPathIn(vs ...string) predicate.Script { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Script(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldAbsPath), v...)) - }) + return predicate.Script(sql.FieldIn(FieldAbsPath, vs...)) } // AbsPathNotIn applies the NotIn predicate on the "abs_path" field. func AbsPathNotIn(vs ...string) predicate.Script { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Script(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldAbsPath), v...)) - }) + return predicate.Script(sql.FieldNotIn(FieldAbsPath, vs...)) } // AbsPathGT applies the GT predicate on the "abs_path" field. func AbsPathGT(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldAbsPath), v)) - }) + return predicate.Script(sql.FieldGT(FieldAbsPath, v)) } // AbsPathGTE applies the GTE predicate on the "abs_path" field. func AbsPathGTE(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldAbsPath), v)) - }) + return predicate.Script(sql.FieldGTE(FieldAbsPath, v)) } // AbsPathLT applies the LT predicate on the "abs_path" field. func AbsPathLT(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldAbsPath), v)) - }) + return predicate.Script(sql.FieldLT(FieldAbsPath, v)) } // AbsPathLTE applies the LTE predicate on the "abs_path" field. func AbsPathLTE(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldAbsPath), v)) - }) + return predicate.Script(sql.FieldLTE(FieldAbsPath, v)) } // AbsPathContains applies the Contains predicate on the "abs_path" field. func AbsPathContains(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldAbsPath), v)) - }) + return predicate.Script(sql.FieldContains(FieldAbsPath, v)) } // AbsPathHasPrefix applies the HasPrefix predicate on the "abs_path" field. func AbsPathHasPrefix(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldAbsPath), v)) - }) + return predicate.Script(sql.FieldHasPrefix(FieldAbsPath, v)) } // AbsPathHasSuffix applies the HasSuffix predicate on the "abs_path" field. func AbsPathHasSuffix(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldAbsPath), v)) - }) + return predicate.Script(sql.FieldHasSuffix(FieldAbsPath, v)) } // AbsPathEqualFold applies the EqualFold predicate on the "abs_path" field. func AbsPathEqualFold(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldAbsPath), v)) - }) + return predicate.Script(sql.FieldEqualFold(FieldAbsPath, v)) } // AbsPathContainsFold applies the ContainsFold predicate on the "abs_path" field. func AbsPathContainsFold(v string) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldAbsPath), v)) - }) + return predicate.Script(sql.FieldContainsFold(FieldAbsPath, v)) } // HasScriptToUser applies the HasEdge predicate on the "ScriptToUser" edge. @@ -1131,7 +669,6 @@ func HasScriptToUser() predicate.Script { return predicate.Script(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(ScriptToUserTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, false, ScriptToUserTable, ScriptToUserColumn), ) sqlgraph.HasNeighbors(s, step) @@ -1141,11 +678,7 @@ func HasScriptToUser() predicate.Script { // HasScriptToUserWith applies the HasEdge predicate on the "ScriptToUser" edge with a given conditions (other predicates). func HasScriptToUserWith(preds ...predicate.User) predicate.Script { return predicate.Script(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(ScriptToUserInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, ScriptToUserTable, ScriptToUserColumn), - ) + step := newScriptToUserStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -1159,7 +692,6 @@ func HasScriptToFinding() predicate.Script { return predicate.Script(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(ScriptToFindingTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, false, ScriptToFindingTable, ScriptToFindingColumn), ) sqlgraph.HasNeighbors(s, step) @@ -1169,11 +701,7 @@ func HasScriptToFinding() predicate.Script { // HasScriptToFindingWith applies the HasEdge predicate on the "ScriptToFinding" edge with a given conditions (other predicates). func HasScriptToFindingWith(preds ...predicate.Finding) predicate.Script { return predicate.Script(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(ScriptToFindingInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, ScriptToFindingTable, ScriptToFindingColumn), - ) + step := newScriptToFindingStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -1187,7 +715,6 @@ func HasScriptToEnvironment() predicate.Script { return predicate.Script(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(ScriptToEnvironmentTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, true, ScriptToEnvironmentTable, ScriptToEnvironmentColumn), ) sqlgraph.HasNeighbors(s, step) @@ -1197,11 +724,7 @@ func HasScriptToEnvironment() predicate.Script { // HasScriptToEnvironmentWith applies the HasEdge predicate on the "ScriptToEnvironment" edge with a given conditions (other predicates). func HasScriptToEnvironmentWith(preds ...predicate.Environment) predicate.Script { return predicate.Script(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(ScriptToEnvironmentInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, ScriptToEnvironmentTable, ScriptToEnvironmentColumn), - ) + step := newScriptToEnvironmentStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -1212,32 +735,15 @@ func HasScriptToEnvironmentWith(preds ...predicate.Environment) predicate.Script // And groups predicates with the AND operator between them. func And(predicates ...predicate.Script) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Script(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.Script) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Script(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.Script) predicate.Script { - return predicate.Script(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.Script(sql.NotPredicates(p)) } diff --git a/ent/script_create.go b/ent/script_create.go index dd67ca75..c617fcb9 100755 --- a/ent/script_create.go +++ b/ent/script_create.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -23,9 +23,9 @@ type ScriptCreate struct { hooks []Hook } -// SetHclID sets the "hcl_id" field. -func (sc *ScriptCreate) SetHclID(s string) *ScriptCreate { - sc.mutation.SetHclID(s) +// SetHCLID sets the "hcl_id" field. +func (sc *ScriptCreate) SetHCLID(s string) *ScriptCreate { + sc.mutation.SetHCLID(s) return sc } @@ -177,44 +177,8 @@ func (sc *ScriptCreate) Mutation() *ScriptMutation { // Save creates the Script in the database. func (sc *ScriptCreate) Save(ctx context.Context) (*Script, error) { - var ( - err error - node *Script - ) sc.defaults() - if len(sc.hooks) == 0 { - if err = sc.check(); err != nil { - return nil, err - } - node, err = sc.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*ScriptMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = sc.check(); err != nil { - return nil, err - } - sc.mutation = mutation - if node, err = sc.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(sc.hooks) - 1; i >= 0; i-- { - if sc.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = sc.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, sc.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, sc.sqlSave, sc.mutation, sc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -249,7 +213,7 @@ func (sc *ScriptCreate) defaults() { // check runs all checks and user-defined validators on the builder. func (sc *ScriptCreate) check() error { - if _, ok := sc.mutation.HclID(); !ok { + if _, ok := sc.mutation.HCLID(); !ok { return &ValidationError{Name: "hcl_id", err: errors.New(`ent: missing required field "Script.hcl_id"`)} } if _, ok := sc.mutation.Name(); !ok { @@ -295,10 +259,13 @@ func (sc *ScriptCreate) check() error { } func (sc *ScriptCreate) sqlSave(ctx context.Context) (*Script, error) { + if err := sc.check(); err != nil { + return nil, err + } _node, _spec := sc.createSpec() if err := sqlgraph.CreateNode(ctx, sc.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } @@ -309,134 +276,74 @@ func (sc *ScriptCreate) sqlSave(ctx context.Context) (*Script, error) { return nil, err } } + sc.mutation.id = &_node.ID + sc.mutation.done = true return _node, nil } func (sc *ScriptCreate) createSpec() (*Script, *sqlgraph.CreateSpec) { var ( _node = &Script{config: sc.config} - _spec = &sqlgraph.CreateSpec{ - Table: script.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: script.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(script.Table, sqlgraph.NewFieldSpec(script.FieldID, field.TypeUUID)) ) if id, ok := sc.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } - if value, ok := sc.mutation.HclID(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: script.FieldHclID, - }) - _node.HclID = value + if value, ok := sc.mutation.HCLID(); ok { + _spec.SetField(script.FieldHCLID, field.TypeString, value) + _node.HCLID = value } if value, ok := sc.mutation.Name(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: script.FieldName, - }) + _spec.SetField(script.FieldName, field.TypeString, value) _node.Name = value } if value, ok := sc.mutation.Language(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: script.FieldLanguage, - }) + _spec.SetField(script.FieldLanguage, field.TypeString, value) _node.Language = value } if value, ok := sc.mutation.Description(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: script.FieldDescription, - }) + _spec.SetField(script.FieldDescription, field.TypeString, value) _node.Description = value } if value, ok := sc.mutation.Source(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: script.FieldSource, - }) + _spec.SetField(script.FieldSource, field.TypeString, value) _node.Source = value } if value, ok := sc.mutation.SourceType(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: script.FieldSourceType, - }) + _spec.SetField(script.FieldSourceType, field.TypeString, value) _node.SourceType = value } if value, ok := sc.mutation.Cooldown(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: script.FieldCooldown, - }) + _spec.SetField(script.FieldCooldown, field.TypeInt, value) _node.Cooldown = value } if value, ok := sc.mutation.Timeout(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: script.FieldTimeout, - }) + _spec.SetField(script.FieldTimeout, field.TypeInt, value) _node.Timeout = value } if value, ok := sc.mutation.IgnoreErrors(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: script.FieldIgnoreErrors, - }) + _spec.SetField(script.FieldIgnoreErrors, field.TypeBool, value) _node.IgnoreErrors = value } if value, ok := sc.mutation.Args(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: script.FieldArgs, - }) + _spec.SetField(script.FieldArgs, field.TypeJSON, value) _node.Args = value } if value, ok := sc.mutation.Disabled(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: script.FieldDisabled, - }) + _spec.SetField(script.FieldDisabled, field.TypeBool, value) _node.Disabled = value } if value, ok := sc.mutation.Vars(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: script.FieldVars, - }) + _spec.SetField(script.FieldVars, field.TypeJSON, value) _node.Vars = value } if value, ok := sc.mutation.AbsPath(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: script.FieldAbsPath, - }) + _spec.SetField(script.FieldAbsPath, field.TypeString, value) _node.AbsPath = value } if value, ok := sc.mutation.Tags(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: script.FieldTags, - }) + _spec.SetField(script.FieldTags, field.TypeJSON, value) _node.Tags = value } if nodes := sc.mutation.ScriptToUserIDs(); len(nodes) > 0 { @@ -447,10 +354,7 @@ func (sc *ScriptCreate) createSpec() (*Script, *sqlgraph.CreateSpec) { Columns: []string{script.ScriptToUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -466,10 +370,7 @@ func (sc *ScriptCreate) createSpec() (*Script, *sqlgraph.CreateSpec) { Columns: []string{script.ScriptToFindingColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: finding.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(finding.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -485,10 +386,7 @@ func (sc *ScriptCreate) createSpec() (*Script, *sqlgraph.CreateSpec) { Columns: []string{script.ScriptToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -503,11 +401,15 @@ func (sc *ScriptCreate) createSpec() (*Script, *sqlgraph.CreateSpec) { // ScriptCreateBulk is the builder for creating many Script entities in bulk. type ScriptCreateBulk struct { config + err error builders []*ScriptCreate } // Save creates the Script entities in the database. func (scb *ScriptCreateBulk) Save(ctx context.Context) ([]*Script, error) { + if scb.err != nil { + return nil, scb.err + } specs := make([]*sqlgraph.CreateSpec, len(scb.builders)) nodes := make([]*Script, len(scb.builders)) mutators := make([]Mutator, len(scb.builders)) @@ -524,8 +426,8 @@ func (scb *ScriptCreateBulk) Save(ctx context.Context) ([]*Script, error) { return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, scb.builders[i+1].mutation) } else { @@ -533,7 +435,7 @@ func (scb *ScriptCreateBulk) Save(ctx context.Context) ([]*Script, error) { // Invoke the actual operation on the latest mutation in the chain. if err = sqlgraph.BatchCreate(ctx, scb.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } } } diff --git a/ent/script_delete.go b/ent/script_delete.go index 312075ea..25fd65ab 100755 --- a/ent/script_delete.go +++ b/ent/script_delete.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (sd *ScriptDelete) Where(ps ...predicate.Script) *ScriptDelete { // Exec executes the deletion query and returns how many vertices were deleted. func (sd *ScriptDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(sd.hooks) == 0 { - affected, err = sd.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*ScriptMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - sd.mutation = mutation - affected, err = sd.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(sd.hooks) - 1; i >= 0; i-- { - if sd.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = sd.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, sd.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, sd.sqlExec, sd.mutation, sd.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (sd *ScriptDelete) ExecX(ctx context.Context) int { } func (sd *ScriptDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: script.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: script.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(script.Table, sqlgraph.NewFieldSpec(script.FieldID, field.TypeUUID)) if ps := sd.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -84,7 +48,12 @@ func (sd *ScriptDelete) sqlExec(ctx context.Context) (int, error) { } } } - return sqlgraph.DeleteNodes(ctx, sd.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, sd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + sd.mutation.done = true + return affected, err } // ScriptDeleteOne is the builder for deleting a single Script entity. @@ -92,6 +61,12 @@ type ScriptDeleteOne struct { sd *ScriptDelete } +// Where appends a list predicates to the ScriptDelete builder. +func (sdo *ScriptDeleteOne) Where(ps ...predicate.Script) *ScriptDeleteOne { + sdo.sd.mutation.Where(ps...) + return sdo +} + // Exec executes the deletion query. func (sdo *ScriptDeleteOne) Exec(ctx context.Context) error { n, err := sdo.sd.Exec(ctx) @@ -107,5 +82,7 @@ func (sdo *ScriptDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (sdo *ScriptDeleteOne) ExecX(ctx context.Context) { - sdo.sd.ExecX(ctx) + if err := sdo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/ent/script_query.go b/ent/script_query.go index a5f3ba97..47f80cc0 100755 --- a/ent/script_query.go +++ b/ent/script_query.go @@ -1,11 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" "database/sql/driver" - "errors" "fmt" "math" @@ -23,17 +22,18 @@ import ( // ScriptQuery is the builder for querying Script entities. type ScriptQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string - predicates []predicate.Script - // eager-loading edges. - withScriptToUser *UserQuery - withScriptToFinding *FindingQuery - withScriptToEnvironment *EnvironmentQuery - withFKs bool + ctx *QueryContext + order []script.OrderOption + inters []Interceptor + predicates []predicate.Script + withScriptToUser *UserQuery + withScriptToFinding *FindingQuery + withScriptToEnvironment *EnvironmentQuery + withFKs bool + modifiers []func(*sql.Selector) + loadTotal []func(context.Context, []*Script) error + withNamedScriptToUser map[string]*UserQuery + withNamedScriptToFinding map[string]*FindingQuery // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -45,34 +45,34 @@ func (sq *ScriptQuery) Where(ps ...predicate.Script) *ScriptQuery { return sq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (sq *ScriptQuery) Limit(limit int) *ScriptQuery { - sq.limit = &limit + sq.ctx.Limit = &limit return sq } -// Offset adds an offset step to the query. +// Offset to start from. func (sq *ScriptQuery) Offset(offset int) *ScriptQuery { - sq.offset = &offset + sq.ctx.Offset = &offset return sq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (sq *ScriptQuery) Unique(unique bool) *ScriptQuery { - sq.unique = &unique + sq.ctx.Unique = &unique return sq } -// Order adds an order step to the query. -func (sq *ScriptQuery) Order(o ...OrderFunc) *ScriptQuery { +// Order specifies how the records should be ordered. +func (sq *ScriptQuery) Order(o ...script.OrderOption) *ScriptQuery { sq.order = append(sq.order, o...) return sq } // QueryScriptToUser chains the current query on the "ScriptToUser" edge. func (sq *ScriptQuery) QueryScriptToUser() *UserQuery { - query := &UserQuery{config: sq.config} + query := (&UserClient{config: sq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := sq.prepareQuery(ctx); err != nil { return nil, err @@ -94,7 +94,7 @@ func (sq *ScriptQuery) QueryScriptToUser() *UserQuery { // QueryScriptToFinding chains the current query on the "ScriptToFinding" edge. func (sq *ScriptQuery) QueryScriptToFinding() *FindingQuery { - query := &FindingQuery{config: sq.config} + query := (&FindingClient{config: sq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := sq.prepareQuery(ctx); err != nil { return nil, err @@ -116,7 +116,7 @@ func (sq *ScriptQuery) QueryScriptToFinding() *FindingQuery { // QueryScriptToEnvironment chains the current query on the "ScriptToEnvironment" edge. func (sq *ScriptQuery) QueryScriptToEnvironment() *EnvironmentQuery { - query := &EnvironmentQuery{config: sq.config} + query := (&EnvironmentClient{config: sq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := sq.prepareQuery(ctx); err != nil { return nil, err @@ -139,7 +139,7 @@ func (sq *ScriptQuery) QueryScriptToEnvironment() *EnvironmentQuery { // First returns the first Script entity from the query. // Returns a *NotFoundError when no Script was found. func (sq *ScriptQuery) First(ctx context.Context) (*Script, error) { - nodes, err := sq.Limit(1).All(ctx) + nodes, err := sq.Limit(1).All(setContextOp(ctx, sq.ctx, "First")) if err != nil { return nil, err } @@ -162,7 +162,7 @@ func (sq *ScriptQuery) FirstX(ctx context.Context) *Script { // Returns a *NotFoundError when no Script ID was found. func (sq *ScriptQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = sq.Limit(1).IDs(ctx); err != nil { + if ids, err = sq.Limit(1).IDs(setContextOp(ctx, sq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -185,7 +185,7 @@ func (sq *ScriptQuery) FirstIDX(ctx context.Context) uuid.UUID { // Returns a *NotSingularError when more than one Script entity is found. // Returns a *NotFoundError when no Script entities are found. func (sq *ScriptQuery) Only(ctx context.Context) (*Script, error) { - nodes, err := sq.Limit(2).All(ctx) + nodes, err := sq.Limit(2).All(setContextOp(ctx, sq.ctx, "Only")) if err != nil { return nil, err } @@ -213,7 +213,7 @@ func (sq *ScriptQuery) OnlyX(ctx context.Context) *Script { // Returns a *NotFoundError when no entities are found. func (sq *ScriptQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = sq.Limit(2).IDs(ctx); err != nil { + if ids, err = sq.Limit(2).IDs(setContextOp(ctx, sq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -238,10 +238,12 @@ func (sq *ScriptQuery) OnlyIDX(ctx context.Context) uuid.UUID { // All executes the query and returns a list of Scripts. func (sq *ScriptQuery) All(ctx context.Context) ([]*Script, error) { + ctx = setContextOp(ctx, sq.ctx, "All") if err := sq.prepareQuery(ctx); err != nil { return nil, err } - return sq.sqlAll(ctx) + qr := querierAll[[]*Script, *ScriptQuery]() + return withInterceptors[[]*Script](ctx, sq, qr, sq.inters) } // AllX is like All, but panics if an error occurs. @@ -254,9 +256,12 @@ func (sq *ScriptQuery) AllX(ctx context.Context) []*Script { } // IDs executes the query and returns a list of Script IDs. -func (sq *ScriptQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { - var ids []uuid.UUID - if err := sq.Select(script.FieldID).Scan(ctx, &ids); err != nil { +func (sq *ScriptQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if sq.ctx.Unique == nil && sq.path != nil { + sq.Unique(true) + } + ctx = setContextOp(ctx, sq.ctx, "IDs") + if err = sq.Select(script.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -273,10 +278,11 @@ func (sq *ScriptQuery) IDsX(ctx context.Context) []uuid.UUID { // Count returns the count of the given query. func (sq *ScriptQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, sq.ctx, "Count") if err := sq.prepareQuery(ctx); err != nil { return 0, err } - return sq.sqlCount(ctx) + return withInterceptors[int](ctx, sq, querierCount[*ScriptQuery](), sq.inters) } // CountX is like Count, but panics if an error occurs. @@ -290,10 +296,15 @@ func (sq *ScriptQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (sq *ScriptQuery) Exist(ctx context.Context) (bool, error) { - if err := sq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, sq.ctx, "Exist") + switch _, err := sq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return sq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -313,24 +324,23 @@ func (sq *ScriptQuery) Clone() *ScriptQuery { } return &ScriptQuery{ config: sq.config, - limit: sq.limit, - offset: sq.offset, - order: append([]OrderFunc{}, sq.order...), + ctx: sq.ctx.Clone(), + order: append([]script.OrderOption{}, sq.order...), + inters: append([]Interceptor{}, sq.inters...), predicates: append([]predicate.Script{}, sq.predicates...), withScriptToUser: sq.withScriptToUser.Clone(), withScriptToFinding: sq.withScriptToFinding.Clone(), withScriptToEnvironment: sq.withScriptToEnvironment.Clone(), // clone intermediate query. - sql: sq.sql.Clone(), - path: sq.path, - unique: sq.unique, + sql: sq.sql.Clone(), + path: sq.path, } } // WithScriptToUser tells the query-builder to eager-load the nodes that are connected to // the "ScriptToUser" edge. The optional arguments are used to configure the query builder of the edge. func (sq *ScriptQuery) WithScriptToUser(opts ...func(*UserQuery)) *ScriptQuery { - query := &UserQuery{config: sq.config} + query := (&UserClient{config: sq.config}).Query() for _, opt := range opts { opt(query) } @@ -341,7 +351,7 @@ func (sq *ScriptQuery) WithScriptToUser(opts ...func(*UserQuery)) *ScriptQuery { // WithScriptToFinding tells the query-builder to eager-load the nodes that are connected to // the "ScriptToFinding" edge. The optional arguments are used to configure the query builder of the edge. func (sq *ScriptQuery) WithScriptToFinding(opts ...func(*FindingQuery)) *ScriptQuery { - query := &FindingQuery{config: sq.config} + query := (&FindingClient{config: sq.config}).Query() for _, opt := range opts { opt(query) } @@ -352,7 +362,7 @@ func (sq *ScriptQuery) WithScriptToFinding(opts ...func(*FindingQuery)) *ScriptQ // WithScriptToEnvironment tells the query-builder to eager-load the nodes that are connected to // the "ScriptToEnvironment" edge. The optional arguments are used to configure the query builder of the edge. func (sq *ScriptQuery) WithScriptToEnvironment(opts ...func(*EnvironmentQuery)) *ScriptQuery { - query := &EnvironmentQuery{config: sq.config} + query := (&EnvironmentClient{config: sq.config}).Query() for _, opt := range opts { opt(query) } @@ -366,25 +376,21 @@ func (sq *ScriptQuery) WithScriptToEnvironment(opts ...func(*EnvironmentQuery)) // Example: // // var v []struct { -// HclID string `json:"hcl_id,omitempty" hcl:"id,label"` +// HCLID string `json:"hcl_id,omitempty" hcl:"id,label"` // Count int `json:"count,omitempty"` // } // // client.Script.Query(). -// GroupBy(script.FieldHclID). +// GroupBy(script.FieldHCLID). // Aggregate(ent.Count()). // Scan(ctx, &v) -// func (sq *ScriptQuery) GroupBy(field string, fields ...string) *ScriptGroupBy { - group := &ScriptGroupBy{config: sq.config} - group.fields = append([]string{field}, fields...) - group.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := sq.prepareQuery(ctx); err != nil { - return nil, err - } - return sq.sqlQuery(ctx), nil - } - return group + sq.ctx.Fields = append([]string{field}, fields...) + grbuild := &ScriptGroupBy{build: sq} + grbuild.flds = &sq.ctx.Fields + grbuild.label = script.Label + grbuild.scan = grbuild.Scan + return grbuild } // Select allows the selection one or more fields/columns for the given query, @@ -393,20 +399,37 @@ func (sq *ScriptQuery) GroupBy(field string, fields ...string) *ScriptGroupBy { // Example: // // var v []struct { -// HclID string `json:"hcl_id,omitempty" hcl:"id,label"` +// HCLID string `json:"hcl_id,omitempty" hcl:"id,label"` // } // // client.Script.Query(). -// Select(script.FieldHclID). +// Select(script.FieldHCLID). // Scan(ctx, &v) -// func (sq *ScriptQuery) Select(fields ...string) *ScriptSelect { - sq.fields = append(sq.fields, fields...) - return &ScriptSelect{ScriptQuery: sq} + sq.ctx.Fields = append(sq.ctx.Fields, fields...) + sbuild := &ScriptSelect{ScriptQuery: sq} + sbuild.label = script.Label + sbuild.flds, sbuild.scan = &sq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a ScriptSelect configured with the given aggregations. +func (sq *ScriptQuery) Aggregate(fns ...AggregateFunc) *ScriptSelect { + return sq.Select().Aggregate(fns...) } func (sq *ScriptQuery) prepareQuery(ctx context.Context) error { - for _, f := range sq.fields { + for _, inter := range sq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, sq); err != nil { + return err + } + } + } + for _, f := range sq.ctx.Fields { if !script.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -421,7 +444,7 @@ func (sq *ScriptQuery) prepareQuery(ctx context.Context) error { return nil } -func (sq *ScriptQuery) sqlAll(ctx context.Context) ([]*Script, error) { +func (sq *ScriptQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Script, error) { var ( nodes = []*Script{} withFKs = sq.withFKs @@ -438,150 +461,185 @@ func (sq *ScriptQuery) sqlAll(ctx context.Context) ([]*Script, error) { if withFKs { _spec.Node.Columns = append(_spec.Node.Columns, script.ForeignKeys...) } - _spec.ScanValues = func(columns []string) ([]interface{}, error) { + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Script).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { node := &Script{config: sq.config} nodes = append(nodes, node) - return node.scanValues(columns) - } - _spec.Assign = func(columns []string, values []interface{}) error { - if len(nodes) == 0 { - return fmt.Errorf("ent: Assign called without calling ScanValues") - } - node := nodes[len(nodes)-1] node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(sq.modifiers) > 0 { + _spec.Modifiers = sq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } if err := sqlgraph.QueryNodes(ctx, sq.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := sq.withScriptToUser; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*Script) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.ScriptToUser = []*User{} + if err := sq.loadScriptToUser(ctx, query, nodes, + func(n *Script) { n.Edges.ScriptToUser = []*User{} }, + func(n *Script, e *User) { n.Edges.ScriptToUser = append(n.Edges.ScriptToUser, e) }); err != nil { + return nil, err } - query.withFKs = true - query.Where(predicate.User(func(s *sql.Selector) { - s.Where(sql.InValues(script.ScriptToUserColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { + } + if query := sq.withScriptToFinding; query != nil { + if err := sq.loadScriptToFinding(ctx, query, nodes, + func(n *Script) { n.Edges.ScriptToFinding = []*Finding{} }, + func(n *Script, e *Finding) { n.Edges.ScriptToFinding = append(n.Edges.ScriptToFinding, e) }); err != nil { return nil, err } - for _, n := range neighbors { - fk := n.script_script_to_user - if fk == nil { - return nil, fmt.Errorf(`foreign-key "script_script_to_user" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "script_script_to_user" returned %v for node %v`, *fk, n.ID) - } - node.Edges.ScriptToUser = append(node.Edges.ScriptToUser, n) + } + if query := sq.withScriptToEnvironment; query != nil { + if err := sq.loadScriptToEnvironment(ctx, query, nodes, nil, + func(n *Script, e *Environment) { n.Edges.ScriptToEnvironment = e }); err != nil { + return nil, err } } - - if query := sq.withScriptToFinding; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*Script) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.ScriptToFinding = []*Finding{} + for name, query := range sq.withNamedScriptToUser { + if err := sq.loadScriptToUser(ctx, query, nodes, + func(n *Script) { n.appendNamedScriptToUser(name) }, + func(n *Script, e *User) { n.appendNamedScriptToUser(name, e) }); err != nil { + return nil, err } - query.withFKs = true - query.Where(predicate.Finding(func(s *sql.Selector) { - s.Where(sql.InValues(script.ScriptToFindingColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { + } + for name, query := range sq.withNamedScriptToFinding { + if err := sq.loadScriptToFinding(ctx, query, nodes, + func(n *Script) { n.appendNamedScriptToFinding(name) }, + func(n *Script, e *Finding) { n.appendNamedScriptToFinding(name, e) }); err != nil { return nil, err } - for _, n := range neighbors { - fk := n.script_script_to_finding - if fk == nil { - return nil, fmt.Errorf(`foreign-key "script_script_to_finding" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "script_script_to_finding" returned %v for node %v`, *fk, n.ID) - } - node.Edges.ScriptToFinding = append(node.Edges.ScriptToFinding, n) + } + for i := range sq.loadTotal { + if err := sq.loadTotal[i](ctx, nodes); err != nil { + return nil, err } } + return nodes, nil +} - if query := sq.withScriptToEnvironment; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*Script) - for i := range nodes { - if nodes[i].environment_environment_to_script == nil { - continue - } - fk := *nodes[i].environment_environment_to_script - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) +func (sq *ScriptQuery) loadScriptToUser(ctx context.Context, query *UserQuery, nodes []*Script, init func(*Script), assign func(*Script, *User)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Script) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) } - query.Where(environment.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + } + query.withFKs = true + query.Where(predicate.User(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(script.ScriptToUserColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.script_script_to_user + if fk == nil { + return fmt.Errorf(`foreign-key "script_script_to_user" is nil for node %v`, n.ID) } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "environment_environment_to_script" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.ScriptToEnvironment = n - } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "script_script_to_user" returned %v for node %v`, *fk, n.ID) } + assign(node, n) } - - return nodes, nil + return nil +} +func (sq *ScriptQuery) loadScriptToFinding(ctx context.Context, query *FindingQuery, nodes []*Script, init func(*Script), assign func(*Script, *Finding)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Script) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.Finding(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(script.ScriptToFindingColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.script_script_to_finding + if fk == nil { + return fmt.Errorf(`foreign-key "script_script_to_finding" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "script_script_to_finding" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} +func (sq *ScriptQuery) loadScriptToEnvironment(ctx context.Context, query *EnvironmentQuery, nodes []*Script, init func(*Script), assign func(*Script, *Environment)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*Script) + for i := range nodes { + if nodes[i].environment_environment_to_script == nil { + continue + } + fk := *nodes[i].environment_environment_to_script + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(environment.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "environment_environment_to_script" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil } func (sq *ScriptQuery) sqlCount(ctx context.Context) (int, error) { _spec := sq.querySpec() - _spec.Node.Columns = sq.fields - if len(sq.fields) > 0 { - _spec.Unique = sq.unique != nil && *sq.unique + if len(sq.modifiers) > 0 { + _spec.Modifiers = sq.modifiers } - return sqlgraph.CountNodes(ctx, sq.driver, _spec) -} - -func (sq *ScriptQuery) sqlExist(ctx context.Context) (bool, error) { - n, err := sq.sqlCount(ctx) - if err != nil { - return false, fmt.Errorf("ent: check existence: %w", err) + _spec.Node.Columns = sq.ctx.Fields + if len(sq.ctx.Fields) > 0 { + _spec.Unique = sq.ctx.Unique != nil && *sq.ctx.Unique } - return n > 0, nil + return sqlgraph.CountNodes(ctx, sq.driver, _spec) } func (sq *ScriptQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: script.Table, - Columns: script.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: script.FieldID, - }, - }, - From: sq.sql, - Unique: true, - } - if unique := sq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(script.Table, script.Columns, sqlgraph.NewFieldSpec(script.FieldID, field.TypeUUID)) + _spec.From = sq.sql + if unique := sq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if sq.path != nil { + _spec.Unique = true } - if fields := sq.fields; len(fields) > 0 { + if fields := sq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, script.FieldID) for i := range fields { @@ -597,10 +655,10 @@ func (sq *ScriptQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := sq.limit; limit != nil { + if limit := sq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := sq.offset; offset != nil { + if offset := sq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := sq.order; len(ps) > 0 { @@ -616,7 +674,7 @@ func (sq *ScriptQuery) querySpec() *sqlgraph.QuerySpec { func (sq *ScriptQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(sq.driver.Dialect()) t1 := builder.Table(script.Table) - columns := sq.fields + columns := sq.ctx.Fields if len(columns) == 0 { columns = script.Columns } @@ -625,7 +683,7 @@ func (sq *ScriptQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = sq.sql selector.Select(selector.Columns(columns...)...) } - if sq.unique != nil && *sq.unique { + if sq.ctx.Unique != nil && *sq.ctx.Unique { selector.Distinct() } for _, p := range sq.predicates { @@ -634,498 +692,128 @@ func (sq *ScriptQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range sq.order { p(selector) } - if offset := sq.offset; offset != nil { + if offset := sq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := sq.limit; limit != nil { + if limit := sq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector } -// ScriptGroupBy is the group-by builder for Script entities. -type ScriptGroupBy struct { - config - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) -} - -// Aggregate adds the given aggregation functions to the group-by query. -func (sgb *ScriptGroupBy) Aggregate(fns ...AggregateFunc) *ScriptGroupBy { - sgb.fns = append(sgb.fns, fns...) - return sgb -} - -// Scan applies the group-by query and scans the result into the given value. -func (sgb *ScriptGroupBy) Scan(ctx context.Context, v interface{}) error { - query, err := sgb.path(ctx) - if err != nil { - return err - } - sgb.sql = query - return sgb.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (sgb *ScriptGroupBy) ScanX(ctx context.Context, v interface{}) { - if err := sgb.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from group-by. -// It is only allowed when executing a group-by query with one field. -func (sgb *ScriptGroupBy) Strings(ctx context.Context) ([]string, error) { - if len(sgb.fields) > 1 { - return nil, errors.New("ent: ScriptGroupBy.Strings is not achievable when grouping more than 1 field") - } - var v []string - if err := sgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (sgb *ScriptGroupBy) StringsX(ctx context.Context) []string { - v, err := sgb.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (sgb *ScriptGroupBy) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = sgb.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{script.Label} - default: - err = fmt.Errorf("ent: ScriptGroupBy.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (sgb *ScriptGroupBy) StringX(ctx context.Context) string { - v, err := sgb.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from group-by. -// It is only allowed when executing a group-by query with one field. -func (sgb *ScriptGroupBy) Ints(ctx context.Context) ([]int, error) { - if len(sgb.fields) > 1 { - return nil, errors.New("ent: ScriptGroupBy.Ints is not achievable when grouping more than 1 field") - } - var v []int - if err := sgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (sgb *ScriptGroupBy) IntsX(ctx context.Context) []int { - v, err := sgb.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (sgb *ScriptGroupBy) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = sgb.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{script.Label} - default: - err = fmt.Errorf("ent: ScriptGroupBy.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (sgb *ScriptGroupBy) IntX(ctx context.Context) int { - v, err := sgb.Int(ctx) - if err != nil { - panic(err) +// WithNamedScriptToUser tells the query-builder to eager-load the nodes that are connected to the "ScriptToUser" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (sq *ScriptQuery) WithNamedScriptToUser(name string, opts ...func(*UserQuery)) *ScriptQuery { + query := (&UserClient{config: sq.config}).Query() + for _, opt := range opts { + opt(query) } - return v -} - -// Float64s returns list of float64s from group-by. -// It is only allowed when executing a group-by query with one field. -func (sgb *ScriptGroupBy) Float64s(ctx context.Context) ([]float64, error) { - if len(sgb.fields) > 1 { - return nil, errors.New("ent: ScriptGroupBy.Float64s is not achievable when grouping more than 1 field") - } - var v []float64 - if err := sgb.Scan(ctx, &v); err != nil { - return nil, err + if sq.withNamedScriptToUser == nil { + sq.withNamedScriptToUser = make(map[string]*UserQuery) } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (sgb *ScriptGroupBy) Float64sX(ctx context.Context) []float64 { - v, err := sgb.Float64s(ctx) - if err != nil { - panic(err) - } - return v + sq.withNamedScriptToUser[name] = query + return sq } -// Float64 returns a single float64 from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (sgb *ScriptGroupBy) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = sgb.Float64s(ctx); err != nil { - return +// WithNamedScriptToFinding tells the query-builder to eager-load the nodes that are connected to the "ScriptToFinding" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (sq *ScriptQuery) WithNamedScriptToFinding(name string, opts ...func(*FindingQuery)) *ScriptQuery { + query := (&FindingClient{config: sq.config}).Query() + for _, opt := range opts { + opt(query) } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{script.Label} - default: - err = fmt.Errorf("ent: ScriptGroupBy.Float64s returned %d results when one was expected", len(v)) + if sq.withNamedScriptToFinding == nil { + sq.withNamedScriptToFinding = make(map[string]*FindingQuery) } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (sgb *ScriptGroupBy) Float64X(ctx context.Context) float64 { - v, err := sgb.Float64(ctx) - if err != nil { - panic(err) - } - return v + sq.withNamedScriptToFinding[name] = query + return sq } -// Bools returns list of bools from group-by. -// It is only allowed when executing a group-by query with one field. -func (sgb *ScriptGroupBy) Bools(ctx context.Context) ([]bool, error) { - if len(sgb.fields) > 1 { - return nil, errors.New("ent: ScriptGroupBy.Bools is not achievable when grouping more than 1 field") - } - var v []bool - if err := sgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil +// ScriptGroupBy is the group-by builder for Script entities. +type ScriptGroupBy struct { + selector + build *ScriptQuery } -// BoolsX is like Bools, but panics if an error occurs. -func (sgb *ScriptGroupBy) BoolsX(ctx context.Context) []bool { - v, err := sgb.Bools(ctx) - if err != nil { - panic(err) - } - return v +// Aggregate adds the given aggregation functions to the group-by query. +func (sgb *ScriptGroupBy) Aggregate(fns ...AggregateFunc) *ScriptGroupBy { + sgb.fns = append(sgb.fns, fns...) + return sgb } -// Bool returns a single bool from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (sgb *ScriptGroupBy) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = sgb.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{script.Label} - default: - err = fmt.Errorf("ent: ScriptGroupBy.Bools returned %d results when one was expected", len(v)) +// Scan applies the selector query and scans the result into the given value. +func (sgb *ScriptGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, sgb.build.ctx, "GroupBy") + if err := sgb.build.prepareQuery(ctx); err != nil { + return err } - return + return scanWithInterceptors[*ScriptQuery, *ScriptGroupBy](ctx, sgb.build, sgb, sgb.build.inters, v) } -// BoolX is like Bool, but panics if an error occurs. -func (sgb *ScriptGroupBy) BoolX(ctx context.Context) bool { - v, err := sgb.Bool(ctx) - if err != nil { - panic(err) +func (sgb *ScriptGroupBy) sqlScan(ctx context.Context, root *ScriptQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(sgb.fns)) + for _, fn := range sgb.fns { + aggregation = append(aggregation, fn(selector)) } - return v -} - -func (sgb *ScriptGroupBy) sqlScan(ctx context.Context, v interface{}) error { - for _, f := range sgb.fields { - if !script.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*sgb.flds)+len(sgb.fns)) + for _, f := range *sgb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := sgb.sqlQuery() + selector.GroupBy(selector.Columns(*sgb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := sgb.driver.Query(ctx, query, args, rows); err != nil { + if err := sgb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (sgb *ScriptGroupBy) sqlQuery() *sql.Selector { - selector := sgb.sql.Select() - aggregation := make([]string, 0, len(sgb.fns)) - for _, fn := range sgb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(sgb.fields)+len(sgb.fns)) - for _, f := range sgb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(sgb.fields...)...) -} - // ScriptSelect is the builder for selecting fields of Script entities. type ScriptSelect struct { *ScriptQuery - // intermediate query (i.e. traversal path). - sql *sql.Selector + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (ss *ScriptSelect) Aggregate(fns ...AggregateFunc) *ScriptSelect { + ss.fns = append(ss.fns, fns...) + return ss } // Scan applies the selector query and scans the result into the given value. -func (ss *ScriptSelect) Scan(ctx context.Context, v interface{}) error { +func (ss *ScriptSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ss.ctx, "Select") if err := ss.prepareQuery(ctx); err != nil { return err } - ss.sql = ss.ScriptQuery.sqlQuery(ctx) - return ss.sqlScan(ctx, v) + return scanWithInterceptors[*ScriptQuery, *ScriptSelect](ctx, ss.ScriptQuery, ss, ss.inters, v) } -// ScanX is like Scan, but panics if an error occurs. -func (ss *ScriptSelect) ScanX(ctx context.Context, v interface{}) { - if err := ss.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from a selector. It is only allowed when selecting one field. -func (ss *ScriptSelect) Strings(ctx context.Context) ([]string, error) { - if len(ss.fields) > 1 { - return nil, errors.New("ent: ScriptSelect.Strings is not achievable when selecting more than 1 field") - } - var v []string - if err := ss.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (ss *ScriptSelect) StringsX(ctx context.Context) []string { - v, err := ss.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a selector. It is only allowed when selecting one field. -func (ss *ScriptSelect) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = ss.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{script.Label} - default: - err = fmt.Errorf("ent: ScriptSelect.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (ss *ScriptSelect) StringX(ctx context.Context) string { - v, err := ss.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from a selector. It is only allowed when selecting one field. -func (ss *ScriptSelect) Ints(ctx context.Context) ([]int, error) { - if len(ss.fields) > 1 { - return nil, errors.New("ent: ScriptSelect.Ints is not achievable when selecting more than 1 field") - } - var v []int - if err := ss.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (ss *ScriptSelect) IntsX(ctx context.Context) []int { - v, err := ss.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a selector. It is only allowed when selecting one field. -func (ss *ScriptSelect) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = ss.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{script.Label} - default: - err = fmt.Errorf("ent: ScriptSelect.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (ss *ScriptSelect) IntX(ctx context.Context) int { - v, err := ss.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. -func (ss *ScriptSelect) Float64s(ctx context.Context) ([]float64, error) { - if len(ss.fields) > 1 { - return nil, errors.New("ent: ScriptSelect.Float64s is not achievable when selecting more than 1 field") - } - var v []float64 - if err := ss.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (ss *ScriptSelect) Float64sX(ctx context.Context) []float64 { - v, err := ss.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. -func (ss *ScriptSelect) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = ss.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{script.Label} - default: - err = fmt.Errorf("ent: ScriptSelect.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (ss *ScriptSelect) Float64X(ctx context.Context) float64 { - v, err := ss.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from a selector. It is only allowed when selecting one field. -func (ss *ScriptSelect) Bools(ctx context.Context) ([]bool, error) { - if len(ss.fields) > 1 { - return nil, errors.New("ent: ScriptSelect.Bools is not achievable when selecting more than 1 field") - } - var v []bool - if err := ss.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (ss *ScriptSelect) BoolsX(ctx context.Context) []bool { - v, err := ss.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a selector. It is only allowed when selecting one field. -func (ss *ScriptSelect) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = ss.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{script.Label} - default: - err = fmt.Errorf("ent: ScriptSelect.Bools returned %d results when one was expected", len(v)) +func (ss *ScriptSelect) sqlScan(ctx context.Context, root *ScriptQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(ss.fns)) + for _, fn := range ss.fns { + aggregation = append(aggregation, fn(selector)) } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (ss *ScriptSelect) BoolX(ctx context.Context) bool { - v, err := ss.Bool(ctx) - if err != nil { - panic(err) + switch n := len(*ss.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) } - return v -} - -func (ss *ScriptSelect) sqlScan(ctx context.Context, v interface{}) error { rows := &sql.Rows{} - query, args := ss.sql.Query() + query, args := selector.Query() if err := ss.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/ent/script_update.go b/ent/script_update.go index c3ed4fe8..a54a468c 100755 --- a/ent/script_update.go +++ b/ent/script_update.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -9,6 +9,7 @@ import ( "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/dialect/sql/sqljson" "entgo.io/ent/schema/field" "github.com/gen0cide/laforge/ent/environment" "github.com/gen0cide/laforge/ent/finding" @@ -31,9 +32,17 @@ func (su *ScriptUpdate) Where(ps ...predicate.Script) *ScriptUpdate { return su } -// SetHclID sets the "hcl_id" field. -func (su *ScriptUpdate) SetHclID(s string) *ScriptUpdate { - su.mutation.SetHclID(s) +// SetHCLID sets the "hcl_id" field. +func (su *ScriptUpdate) SetHCLID(s string) *ScriptUpdate { + su.mutation.SetHCLID(s) + return su +} + +// SetNillableHCLID sets the "hcl_id" field if the given value is not nil. +func (su *ScriptUpdate) SetNillableHCLID(s *string) *ScriptUpdate { + if s != nil { + su.SetHCLID(*s) + } return su } @@ -43,30 +52,70 @@ func (su *ScriptUpdate) SetName(s string) *ScriptUpdate { return su } +// SetNillableName sets the "name" field if the given value is not nil. +func (su *ScriptUpdate) SetNillableName(s *string) *ScriptUpdate { + if s != nil { + su.SetName(*s) + } + return su +} + // SetLanguage sets the "language" field. func (su *ScriptUpdate) SetLanguage(s string) *ScriptUpdate { su.mutation.SetLanguage(s) return su } +// SetNillableLanguage sets the "language" field if the given value is not nil. +func (su *ScriptUpdate) SetNillableLanguage(s *string) *ScriptUpdate { + if s != nil { + su.SetLanguage(*s) + } + return su +} + // SetDescription sets the "description" field. func (su *ScriptUpdate) SetDescription(s string) *ScriptUpdate { su.mutation.SetDescription(s) return su } +// SetNillableDescription sets the "description" field if the given value is not nil. +func (su *ScriptUpdate) SetNillableDescription(s *string) *ScriptUpdate { + if s != nil { + su.SetDescription(*s) + } + return su +} + // SetSource sets the "source" field. func (su *ScriptUpdate) SetSource(s string) *ScriptUpdate { su.mutation.SetSource(s) return su } +// SetNillableSource sets the "source" field if the given value is not nil. +func (su *ScriptUpdate) SetNillableSource(s *string) *ScriptUpdate { + if s != nil { + su.SetSource(*s) + } + return su +} + // SetSourceType sets the "source_type" field. func (su *ScriptUpdate) SetSourceType(s string) *ScriptUpdate { su.mutation.SetSourceType(s) return su } +// SetNillableSourceType sets the "source_type" field if the given value is not nil. +func (su *ScriptUpdate) SetNillableSourceType(s *string) *ScriptUpdate { + if s != nil { + su.SetSourceType(*s) + } + return su +} + // SetCooldown sets the "cooldown" field. func (su *ScriptUpdate) SetCooldown(i int) *ScriptUpdate { su.mutation.ResetCooldown() @@ -74,6 +123,14 @@ func (su *ScriptUpdate) SetCooldown(i int) *ScriptUpdate { return su } +// SetNillableCooldown sets the "cooldown" field if the given value is not nil. +func (su *ScriptUpdate) SetNillableCooldown(i *int) *ScriptUpdate { + if i != nil { + su.SetCooldown(*i) + } + return su +} + // AddCooldown adds i to the "cooldown" field. func (su *ScriptUpdate) AddCooldown(i int) *ScriptUpdate { su.mutation.AddCooldown(i) @@ -87,6 +144,14 @@ func (su *ScriptUpdate) SetTimeout(i int) *ScriptUpdate { return su } +// SetNillableTimeout sets the "timeout" field if the given value is not nil. +func (su *ScriptUpdate) SetNillableTimeout(i *int) *ScriptUpdate { + if i != nil { + su.SetTimeout(*i) + } + return su +} + // AddTimeout adds i to the "timeout" field. func (su *ScriptUpdate) AddTimeout(i int) *ScriptUpdate { su.mutation.AddTimeout(i) @@ -99,18 +164,40 @@ func (su *ScriptUpdate) SetIgnoreErrors(b bool) *ScriptUpdate { return su } +// SetNillableIgnoreErrors sets the "ignore_errors" field if the given value is not nil. +func (su *ScriptUpdate) SetNillableIgnoreErrors(b *bool) *ScriptUpdate { + if b != nil { + su.SetIgnoreErrors(*b) + } + return su +} + // SetArgs sets the "args" field. func (su *ScriptUpdate) SetArgs(s []string) *ScriptUpdate { su.mutation.SetArgs(s) return su } +// AppendArgs appends s to the "args" field. +func (su *ScriptUpdate) AppendArgs(s []string) *ScriptUpdate { + su.mutation.AppendArgs(s) + return su +} + // SetDisabled sets the "disabled" field. func (su *ScriptUpdate) SetDisabled(b bool) *ScriptUpdate { su.mutation.SetDisabled(b) return su } +// SetNillableDisabled sets the "disabled" field if the given value is not nil. +func (su *ScriptUpdate) SetNillableDisabled(b *bool) *ScriptUpdate { + if b != nil { + su.SetDisabled(*b) + } + return su +} + // SetVars sets the "vars" field. func (su *ScriptUpdate) SetVars(m map[string]string) *ScriptUpdate { su.mutation.SetVars(m) @@ -123,6 +210,14 @@ func (su *ScriptUpdate) SetAbsPath(s string) *ScriptUpdate { return su } +// SetNillableAbsPath sets the "abs_path" field if the given value is not nil. +func (su *ScriptUpdate) SetNillableAbsPath(s *string) *ScriptUpdate { + if s != nil { + su.SetAbsPath(*s) + } + return su +} + // SetTags sets the "tags" field. func (su *ScriptUpdate) SetTags(m map[string]string) *ScriptUpdate { su.mutation.SetTags(m) @@ -233,34 +328,7 @@ func (su *ScriptUpdate) ClearScriptToEnvironment() *ScriptUpdate { // Save executes the query and returns the number of nodes affected by the update operation. func (su *ScriptUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(su.hooks) == 0 { - affected, err = su.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*ScriptMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - su.mutation = mutation - affected, err = su.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(su.hooks) - 1; i >= 0; i-- { - if su.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = su.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, su.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, su.sqlSave, su.mutation, su.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -286,16 +354,7 @@ func (su *ScriptUpdate) ExecX(ctx context.Context) { } func (su *ScriptUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: script.Table, - Columns: script.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: script.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(script.Table, script.Columns, sqlgraph.NewFieldSpec(script.FieldID, field.TypeUUID)) if ps := su.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -303,117 +362,58 @@ func (su *ScriptUpdate) sqlSave(ctx context.Context) (n int, err error) { } } } - if value, ok := su.mutation.HclID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: script.FieldHclID, - }) + if value, ok := su.mutation.HCLID(); ok { + _spec.SetField(script.FieldHCLID, field.TypeString, value) } if value, ok := su.mutation.Name(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: script.FieldName, - }) + _spec.SetField(script.FieldName, field.TypeString, value) } if value, ok := su.mutation.Language(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: script.FieldLanguage, - }) + _spec.SetField(script.FieldLanguage, field.TypeString, value) } if value, ok := su.mutation.Description(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: script.FieldDescription, - }) + _spec.SetField(script.FieldDescription, field.TypeString, value) } if value, ok := su.mutation.Source(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: script.FieldSource, - }) + _spec.SetField(script.FieldSource, field.TypeString, value) } if value, ok := su.mutation.SourceType(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: script.FieldSourceType, - }) + _spec.SetField(script.FieldSourceType, field.TypeString, value) } if value, ok := su.mutation.Cooldown(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: script.FieldCooldown, - }) + _spec.SetField(script.FieldCooldown, field.TypeInt, value) } if value, ok := su.mutation.AddedCooldown(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: script.FieldCooldown, - }) + _spec.AddField(script.FieldCooldown, field.TypeInt, value) } if value, ok := su.mutation.Timeout(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: script.FieldTimeout, - }) + _spec.SetField(script.FieldTimeout, field.TypeInt, value) } if value, ok := su.mutation.AddedTimeout(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: script.FieldTimeout, - }) + _spec.AddField(script.FieldTimeout, field.TypeInt, value) } if value, ok := su.mutation.IgnoreErrors(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: script.FieldIgnoreErrors, - }) + _spec.SetField(script.FieldIgnoreErrors, field.TypeBool, value) } if value, ok := su.mutation.Args(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: script.FieldArgs, + _spec.SetField(script.FieldArgs, field.TypeJSON, value) + } + if value, ok := su.mutation.AppendedArgs(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, script.FieldArgs, value) }) } if value, ok := su.mutation.Disabled(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: script.FieldDisabled, - }) + _spec.SetField(script.FieldDisabled, field.TypeBool, value) } if value, ok := su.mutation.Vars(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: script.FieldVars, - }) + _spec.SetField(script.FieldVars, field.TypeJSON, value) } if value, ok := su.mutation.AbsPath(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: script.FieldAbsPath, - }) + _spec.SetField(script.FieldAbsPath, field.TypeString, value) } if value, ok := su.mutation.Tags(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: script.FieldTags, - }) + _spec.SetField(script.FieldTags, field.TypeJSON, value) } if su.mutation.ScriptToUserCleared() { edge := &sqlgraph.EdgeSpec{ @@ -423,10 +423,7 @@ func (su *ScriptUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{script.ScriptToUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -439,10 +436,7 @@ func (su *ScriptUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{script.ScriptToUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -458,10 +452,7 @@ func (su *ScriptUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{script.ScriptToUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -477,10 +468,7 @@ func (su *ScriptUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{script.ScriptToFindingColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: finding.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(finding.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -493,10 +481,7 @@ func (su *ScriptUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{script.ScriptToFindingColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: finding.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(finding.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -512,10 +497,7 @@ func (su *ScriptUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{script.ScriptToFindingColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: finding.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(finding.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -531,10 +513,7 @@ func (su *ScriptUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{script.ScriptToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -547,10 +526,7 @@ func (su *ScriptUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{script.ScriptToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -562,10 +538,11 @@ func (su *ScriptUpdate) sqlSave(ctx context.Context) (n int, err error) { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{script.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return 0, err } + su.mutation.done = true return n, nil } @@ -577,9 +554,17 @@ type ScriptUpdateOne struct { mutation *ScriptMutation } -// SetHclID sets the "hcl_id" field. -func (suo *ScriptUpdateOne) SetHclID(s string) *ScriptUpdateOne { - suo.mutation.SetHclID(s) +// SetHCLID sets the "hcl_id" field. +func (suo *ScriptUpdateOne) SetHCLID(s string) *ScriptUpdateOne { + suo.mutation.SetHCLID(s) + return suo +} + +// SetNillableHCLID sets the "hcl_id" field if the given value is not nil. +func (suo *ScriptUpdateOne) SetNillableHCLID(s *string) *ScriptUpdateOne { + if s != nil { + suo.SetHCLID(*s) + } return suo } @@ -589,30 +574,70 @@ func (suo *ScriptUpdateOne) SetName(s string) *ScriptUpdateOne { return suo } +// SetNillableName sets the "name" field if the given value is not nil. +func (suo *ScriptUpdateOne) SetNillableName(s *string) *ScriptUpdateOne { + if s != nil { + suo.SetName(*s) + } + return suo +} + // SetLanguage sets the "language" field. func (suo *ScriptUpdateOne) SetLanguage(s string) *ScriptUpdateOne { suo.mutation.SetLanguage(s) return suo } +// SetNillableLanguage sets the "language" field if the given value is not nil. +func (suo *ScriptUpdateOne) SetNillableLanguage(s *string) *ScriptUpdateOne { + if s != nil { + suo.SetLanguage(*s) + } + return suo +} + // SetDescription sets the "description" field. func (suo *ScriptUpdateOne) SetDescription(s string) *ScriptUpdateOne { suo.mutation.SetDescription(s) return suo } +// SetNillableDescription sets the "description" field if the given value is not nil. +func (suo *ScriptUpdateOne) SetNillableDescription(s *string) *ScriptUpdateOne { + if s != nil { + suo.SetDescription(*s) + } + return suo +} + // SetSource sets the "source" field. func (suo *ScriptUpdateOne) SetSource(s string) *ScriptUpdateOne { suo.mutation.SetSource(s) return suo } +// SetNillableSource sets the "source" field if the given value is not nil. +func (suo *ScriptUpdateOne) SetNillableSource(s *string) *ScriptUpdateOne { + if s != nil { + suo.SetSource(*s) + } + return suo +} + // SetSourceType sets the "source_type" field. func (suo *ScriptUpdateOne) SetSourceType(s string) *ScriptUpdateOne { suo.mutation.SetSourceType(s) return suo } +// SetNillableSourceType sets the "source_type" field if the given value is not nil. +func (suo *ScriptUpdateOne) SetNillableSourceType(s *string) *ScriptUpdateOne { + if s != nil { + suo.SetSourceType(*s) + } + return suo +} + // SetCooldown sets the "cooldown" field. func (suo *ScriptUpdateOne) SetCooldown(i int) *ScriptUpdateOne { suo.mutation.ResetCooldown() @@ -620,6 +645,14 @@ func (suo *ScriptUpdateOne) SetCooldown(i int) *ScriptUpdateOne { return suo } +// SetNillableCooldown sets the "cooldown" field if the given value is not nil. +func (suo *ScriptUpdateOne) SetNillableCooldown(i *int) *ScriptUpdateOne { + if i != nil { + suo.SetCooldown(*i) + } + return suo +} + // AddCooldown adds i to the "cooldown" field. func (suo *ScriptUpdateOne) AddCooldown(i int) *ScriptUpdateOne { suo.mutation.AddCooldown(i) @@ -633,6 +666,14 @@ func (suo *ScriptUpdateOne) SetTimeout(i int) *ScriptUpdateOne { return suo } +// SetNillableTimeout sets the "timeout" field if the given value is not nil. +func (suo *ScriptUpdateOne) SetNillableTimeout(i *int) *ScriptUpdateOne { + if i != nil { + suo.SetTimeout(*i) + } + return suo +} + // AddTimeout adds i to the "timeout" field. func (suo *ScriptUpdateOne) AddTimeout(i int) *ScriptUpdateOne { suo.mutation.AddTimeout(i) @@ -645,18 +686,40 @@ func (suo *ScriptUpdateOne) SetIgnoreErrors(b bool) *ScriptUpdateOne { return suo } +// SetNillableIgnoreErrors sets the "ignore_errors" field if the given value is not nil. +func (suo *ScriptUpdateOne) SetNillableIgnoreErrors(b *bool) *ScriptUpdateOne { + if b != nil { + suo.SetIgnoreErrors(*b) + } + return suo +} + // SetArgs sets the "args" field. func (suo *ScriptUpdateOne) SetArgs(s []string) *ScriptUpdateOne { suo.mutation.SetArgs(s) return suo } +// AppendArgs appends s to the "args" field. +func (suo *ScriptUpdateOne) AppendArgs(s []string) *ScriptUpdateOne { + suo.mutation.AppendArgs(s) + return suo +} + // SetDisabled sets the "disabled" field. func (suo *ScriptUpdateOne) SetDisabled(b bool) *ScriptUpdateOne { suo.mutation.SetDisabled(b) return suo } +// SetNillableDisabled sets the "disabled" field if the given value is not nil. +func (suo *ScriptUpdateOne) SetNillableDisabled(b *bool) *ScriptUpdateOne { + if b != nil { + suo.SetDisabled(*b) + } + return suo +} + // SetVars sets the "vars" field. func (suo *ScriptUpdateOne) SetVars(m map[string]string) *ScriptUpdateOne { suo.mutation.SetVars(m) @@ -669,6 +732,14 @@ func (suo *ScriptUpdateOne) SetAbsPath(s string) *ScriptUpdateOne { return suo } +// SetNillableAbsPath sets the "abs_path" field if the given value is not nil. +func (suo *ScriptUpdateOne) SetNillableAbsPath(s *string) *ScriptUpdateOne { + if s != nil { + suo.SetAbsPath(*s) + } + return suo +} + // SetTags sets the "tags" field. func (suo *ScriptUpdateOne) SetTags(m map[string]string) *ScriptUpdateOne { suo.mutation.SetTags(m) @@ -777,6 +848,12 @@ func (suo *ScriptUpdateOne) ClearScriptToEnvironment() *ScriptUpdateOne { return suo } +// Where appends a list predicates to the ScriptUpdate builder. +func (suo *ScriptUpdateOne) Where(ps ...predicate.Script) *ScriptUpdateOne { + suo.mutation.Where(ps...) + return suo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (suo *ScriptUpdateOne) Select(field string, fields ...string) *ScriptUpdateOne { @@ -786,34 +863,7 @@ func (suo *ScriptUpdateOne) Select(field string, fields ...string) *ScriptUpdate // Save executes the query and returns the updated Script entity. func (suo *ScriptUpdateOne) Save(ctx context.Context) (*Script, error) { - var ( - err error - node *Script - ) - if len(suo.hooks) == 0 { - node, err = suo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*ScriptMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - suo.mutation = mutation - node, err = suo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(suo.hooks) - 1; i >= 0; i-- { - if suo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = suo.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, suo.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, suo.sqlSave, suo.mutation, suo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -839,16 +889,7 @@ func (suo *ScriptUpdateOne) ExecX(ctx context.Context) { } func (suo *ScriptUpdateOne) sqlSave(ctx context.Context) (_node *Script, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: script.Table, - Columns: script.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: script.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(script.Table, script.Columns, sqlgraph.NewFieldSpec(script.FieldID, field.TypeUUID)) id, ok := suo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Script.id" for update`)} @@ -873,117 +914,58 @@ func (suo *ScriptUpdateOne) sqlSave(ctx context.Context) (_node *Script, err err } } } - if value, ok := suo.mutation.HclID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: script.FieldHclID, - }) + if value, ok := suo.mutation.HCLID(); ok { + _spec.SetField(script.FieldHCLID, field.TypeString, value) } if value, ok := suo.mutation.Name(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: script.FieldName, - }) + _spec.SetField(script.FieldName, field.TypeString, value) } if value, ok := suo.mutation.Language(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: script.FieldLanguage, - }) + _spec.SetField(script.FieldLanguage, field.TypeString, value) } if value, ok := suo.mutation.Description(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: script.FieldDescription, - }) + _spec.SetField(script.FieldDescription, field.TypeString, value) } if value, ok := suo.mutation.Source(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: script.FieldSource, - }) + _spec.SetField(script.FieldSource, field.TypeString, value) } if value, ok := suo.mutation.SourceType(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: script.FieldSourceType, - }) + _spec.SetField(script.FieldSourceType, field.TypeString, value) } if value, ok := suo.mutation.Cooldown(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: script.FieldCooldown, - }) + _spec.SetField(script.FieldCooldown, field.TypeInt, value) } if value, ok := suo.mutation.AddedCooldown(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: script.FieldCooldown, - }) + _spec.AddField(script.FieldCooldown, field.TypeInt, value) } if value, ok := suo.mutation.Timeout(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: script.FieldTimeout, - }) + _spec.SetField(script.FieldTimeout, field.TypeInt, value) } if value, ok := suo.mutation.AddedTimeout(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: script.FieldTimeout, - }) + _spec.AddField(script.FieldTimeout, field.TypeInt, value) } if value, ok := suo.mutation.IgnoreErrors(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: script.FieldIgnoreErrors, - }) + _spec.SetField(script.FieldIgnoreErrors, field.TypeBool, value) } if value, ok := suo.mutation.Args(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: script.FieldArgs, + _spec.SetField(script.FieldArgs, field.TypeJSON, value) + } + if value, ok := suo.mutation.AppendedArgs(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, script.FieldArgs, value) }) } if value, ok := suo.mutation.Disabled(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: script.FieldDisabled, - }) + _spec.SetField(script.FieldDisabled, field.TypeBool, value) } if value, ok := suo.mutation.Vars(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: script.FieldVars, - }) + _spec.SetField(script.FieldVars, field.TypeJSON, value) } if value, ok := suo.mutation.AbsPath(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: script.FieldAbsPath, - }) + _spec.SetField(script.FieldAbsPath, field.TypeString, value) } if value, ok := suo.mutation.Tags(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: script.FieldTags, - }) + _spec.SetField(script.FieldTags, field.TypeJSON, value) } if suo.mutation.ScriptToUserCleared() { edge := &sqlgraph.EdgeSpec{ @@ -993,10 +975,7 @@ func (suo *ScriptUpdateOne) sqlSave(ctx context.Context) (_node *Script, err err Columns: []string{script.ScriptToUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1009,10 +988,7 @@ func (suo *ScriptUpdateOne) sqlSave(ctx context.Context) (_node *Script, err err Columns: []string{script.ScriptToUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1028,10 +1004,7 @@ func (suo *ScriptUpdateOne) sqlSave(ctx context.Context) (_node *Script, err err Columns: []string{script.ScriptToUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1047,10 +1020,7 @@ func (suo *ScriptUpdateOne) sqlSave(ctx context.Context) (_node *Script, err err Columns: []string{script.ScriptToFindingColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: finding.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(finding.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1063,10 +1033,7 @@ func (suo *ScriptUpdateOne) sqlSave(ctx context.Context) (_node *Script, err err Columns: []string{script.ScriptToFindingColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: finding.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(finding.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1082,10 +1049,7 @@ func (suo *ScriptUpdateOne) sqlSave(ctx context.Context) (_node *Script, err err Columns: []string{script.ScriptToFindingColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: finding.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(finding.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1101,10 +1065,7 @@ func (suo *ScriptUpdateOne) sqlSave(ctx context.Context) (_node *Script, err err Columns: []string{script.ScriptToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1117,10 +1078,7 @@ func (suo *ScriptUpdateOne) sqlSave(ctx context.Context) (_node *Script, err err Columns: []string{script.ScriptToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1135,9 +1093,10 @@ func (suo *ScriptUpdateOne) sqlSave(ctx context.Context) (_node *Script, err err if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{script.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } + suo.mutation.done = true return _node, nil } diff --git a/ent/servertask.go b/ent/servertask.go index 39f9ec83..40809a47 100755 --- a/ent/servertask.go +++ b/ent/servertask.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -8,6 +8,7 @@ import ( "strings" "time" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/gen0cide/laforge/ent/authuser" "github.com/gen0cide/laforge/ent/build" @@ -37,6 +38,7 @@ type ServerTask struct { // The values are being populated by the ServerTaskQuery when eager-loading is set. Edges ServerTaskEdges `json:"edges"` + // vvvvvvvvvvvv CUSTOM vvvvvvvvvvvv // Edges put into the main struct to be loaded via hcl // ServerTaskToAuthUser holds the value of the ServerTaskToAuthUser edge. HCLServerTaskToAuthUser *AuthUser `json:"ServerTaskToAuthUser,omitempty"` @@ -50,11 +52,12 @@ type ServerTask struct { HCLServerTaskToBuildCommit *BuildCommit `json:"ServerTaskToBuildCommit,omitempty"` // ServerTaskToGinFileMiddleware holds the value of the ServerTaskToGinFileMiddleware edge. HCLServerTaskToGinFileMiddleware []*GinFileMiddleware `json:"ServerTaskToGinFileMiddleware,omitempty"` - // + // ^^^^^^^^^^^^ CUSTOM ^^^^^^^^^^^^^ server_task_server_task_to_auth_user *uuid.UUID server_task_server_task_to_environment *uuid.UUID server_task_server_task_to_build *uuid.UUID server_task_server_task_to_build_commit *uuid.UUID + selectValues sql.SelectValues } // ServerTaskEdges holds the relations/edges for other nodes in the graph. @@ -74,6 +77,10 @@ type ServerTaskEdges struct { // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. loadedTypes [6]bool + // totalCount holds the count of the edges above. + totalCount [6]map[string]int + + namedServerTaskToGinFileMiddleware map[string][]*GinFileMiddleware } // ServerTaskToAuthUserOrErr returns the ServerTaskToAuthUser value or an error if the edge @@ -81,8 +88,7 @@ type ServerTaskEdges struct { func (e ServerTaskEdges) ServerTaskToAuthUserOrErr() (*AuthUser, error) { if e.loadedTypes[0] { if e.ServerTaskToAuthUser == nil { - // The edge ServerTaskToAuthUser was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: authuser.Label} } return e.ServerTaskToAuthUser, nil @@ -95,8 +101,7 @@ func (e ServerTaskEdges) ServerTaskToAuthUserOrErr() (*AuthUser, error) { func (e ServerTaskEdges) ServerTaskToStatusOrErr() (*Status, error) { if e.loadedTypes[1] { if e.ServerTaskToStatus == nil { - // The edge ServerTaskToStatus was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: status.Label} } return e.ServerTaskToStatus, nil @@ -109,8 +114,7 @@ func (e ServerTaskEdges) ServerTaskToStatusOrErr() (*Status, error) { func (e ServerTaskEdges) ServerTaskToEnvironmentOrErr() (*Environment, error) { if e.loadedTypes[2] { if e.ServerTaskToEnvironment == nil { - // The edge ServerTaskToEnvironment was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: environment.Label} } return e.ServerTaskToEnvironment, nil @@ -123,8 +127,7 @@ func (e ServerTaskEdges) ServerTaskToEnvironmentOrErr() (*Environment, error) { func (e ServerTaskEdges) ServerTaskToBuildOrErr() (*Build, error) { if e.loadedTypes[3] { if e.ServerTaskToBuild == nil { - // The edge ServerTaskToBuild was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: build.Label} } return e.ServerTaskToBuild, nil @@ -137,8 +140,7 @@ func (e ServerTaskEdges) ServerTaskToBuildOrErr() (*Build, error) { func (e ServerTaskEdges) ServerTaskToBuildCommitOrErr() (*BuildCommit, error) { if e.loadedTypes[4] { if e.ServerTaskToBuildCommit == nil { - // The edge ServerTaskToBuildCommit was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: buildcommit.Label} } return e.ServerTaskToBuildCommit, nil @@ -156,8 +158,8 @@ func (e ServerTaskEdges) ServerTaskToGinFileMiddlewareOrErr() ([]*GinFileMiddlew } // scanValues returns the types for scanning values from sql.Rows. -func (*ServerTask) scanValues(columns []string) ([]interface{}, error) { - values := make([]interface{}, len(columns)) +func (*ServerTask) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) for i := range columns { switch columns[i] { case servertask.FieldErrors: @@ -177,7 +179,7 @@ func (*ServerTask) scanValues(columns []string) ([]interface{}, error) { case servertask.ForeignKeys[3]: // server_task_server_task_to_build_commit values[i] = &sql.NullScanner{S: new(uuid.UUID)} default: - return nil, fmt.Errorf("unexpected column %q for type ServerTask", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -185,7 +187,7 @@ func (*ServerTask) scanValues(columns []string) ([]interface{}, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the ServerTask fields. -func (st *ServerTask) assignValues(columns []string, values []interface{}) error { +func (st *ServerTask) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -257,56 +259,64 @@ func (st *ServerTask) assignValues(columns []string, values []interface{}) error st.server_task_server_task_to_build_commit = new(uuid.UUID) *st.server_task_server_task_to_build_commit = *value.S.(*uuid.UUID) } + default: + st.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the ServerTask. +// This includes values selected through modifiers, order, etc. +func (st *ServerTask) Value(name string) (ent.Value, error) { + return st.selectValues.Get(name) +} + // QueryServerTaskToAuthUser queries the "ServerTaskToAuthUser" edge of the ServerTask entity. func (st *ServerTask) QueryServerTaskToAuthUser() *AuthUserQuery { - return (&ServerTaskClient{config: st.config}).QueryServerTaskToAuthUser(st) + return NewServerTaskClient(st.config).QueryServerTaskToAuthUser(st) } // QueryServerTaskToStatus queries the "ServerTaskToStatus" edge of the ServerTask entity. func (st *ServerTask) QueryServerTaskToStatus() *StatusQuery { - return (&ServerTaskClient{config: st.config}).QueryServerTaskToStatus(st) + return NewServerTaskClient(st.config).QueryServerTaskToStatus(st) } // QueryServerTaskToEnvironment queries the "ServerTaskToEnvironment" edge of the ServerTask entity. func (st *ServerTask) QueryServerTaskToEnvironment() *EnvironmentQuery { - return (&ServerTaskClient{config: st.config}).QueryServerTaskToEnvironment(st) + return NewServerTaskClient(st.config).QueryServerTaskToEnvironment(st) } // QueryServerTaskToBuild queries the "ServerTaskToBuild" edge of the ServerTask entity. func (st *ServerTask) QueryServerTaskToBuild() *BuildQuery { - return (&ServerTaskClient{config: st.config}).QueryServerTaskToBuild(st) + return NewServerTaskClient(st.config).QueryServerTaskToBuild(st) } // QueryServerTaskToBuildCommit queries the "ServerTaskToBuildCommit" edge of the ServerTask entity. func (st *ServerTask) QueryServerTaskToBuildCommit() *BuildCommitQuery { - return (&ServerTaskClient{config: st.config}).QueryServerTaskToBuildCommit(st) + return NewServerTaskClient(st.config).QueryServerTaskToBuildCommit(st) } // QueryServerTaskToGinFileMiddleware queries the "ServerTaskToGinFileMiddleware" edge of the ServerTask entity. func (st *ServerTask) QueryServerTaskToGinFileMiddleware() *GinFileMiddlewareQuery { - return (&ServerTaskClient{config: st.config}).QueryServerTaskToGinFileMiddleware(st) + return NewServerTaskClient(st.config).QueryServerTaskToGinFileMiddleware(st) } // Update returns a builder for updating this ServerTask. // Note that you need to call ServerTask.Unwrap() before calling this method if this ServerTask // was returned from a transaction, and the transaction was committed or rolled back. func (st *ServerTask) Update() *ServerTaskUpdateOne { - return (&ServerTaskClient{config: st.config}).UpdateOne(st) + return NewServerTaskClient(st.config).UpdateOne(st) } // Unwrap unwraps the ServerTask entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. func (st *ServerTask) Unwrap() *ServerTask { - tx, ok := st.config.driver.(*txDriver) + _tx, ok := st.config.driver.(*txDriver) if !ok { panic("ent: ServerTask is not a transactional entity") } - st.config.driver = tx.drv + st.config.driver = _tx.drv return st } @@ -314,26 +324,48 @@ func (st *ServerTask) Unwrap() *ServerTask { func (st *ServerTask) String() string { var builder strings.Builder builder.WriteString("ServerTask(") - builder.WriteString(fmt.Sprintf("id=%v", st.ID)) - builder.WriteString(", type=") + builder.WriteString(fmt.Sprintf("id=%v, ", st.ID)) + builder.WriteString("type=") builder.WriteString(fmt.Sprintf("%v", st.Type)) - builder.WriteString(", start_time=") + builder.WriteString(", ") + builder.WriteString("start_time=") builder.WriteString(st.StartTime.Format(time.ANSIC)) - builder.WriteString(", end_time=") + builder.WriteString(", ") + builder.WriteString("end_time=") builder.WriteString(st.EndTime.Format(time.ANSIC)) - builder.WriteString(", errors=") + builder.WriteString(", ") + builder.WriteString("errors=") builder.WriteString(fmt.Sprintf("%v", st.Errors)) - builder.WriteString(", log_file_path=") + builder.WriteString(", ") + builder.WriteString("log_file_path=") builder.WriteString(st.LogFilePath) builder.WriteByte(')') return builder.String() } -// ServerTasks is a parsable slice of ServerTask. -type ServerTasks []*ServerTask +// NamedServerTaskToGinFileMiddleware returns the ServerTaskToGinFileMiddleware named value or an error if the edge was not +// loaded in eager-loading with this name. +func (st *ServerTask) NamedServerTaskToGinFileMiddleware(name string) ([]*GinFileMiddleware, error) { + if st.Edges.namedServerTaskToGinFileMiddleware == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := st.Edges.namedServerTaskToGinFileMiddleware[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} -func (st ServerTasks) config(cfg config) { - for _i := range st { - st[_i].config = cfg +func (st *ServerTask) appendNamedServerTaskToGinFileMiddleware(name string, edges ...*GinFileMiddleware) { + if st.Edges.namedServerTaskToGinFileMiddleware == nil { + st.Edges.namedServerTaskToGinFileMiddleware = make(map[string][]*GinFileMiddleware) + } + if len(edges) == 0 { + st.Edges.namedServerTaskToGinFileMiddleware[name] = []*GinFileMiddleware{} + } else { + st.Edges.namedServerTaskToGinFileMiddleware[name] = append(st.Edges.namedServerTaskToGinFileMiddleware[name], edges...) } } + +// ServerTasks is a parsable slice of ServerTask. +type ServerTasks []*ServerTask diff --git a/ent/servertask/servertask.go b/ent/servertask/servertask.go index 5d76f508..60586b8c 100755 --- a/ent/servertask/servertask.go +++ b/ent/servertask/servertask.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package servertask @@ -7,6 +7,8 @@ import ( "io" "strconv" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -149,19 +151,138 @@ func TypeValidator(_type Type) error { } } +// OrderOption defines the ordering options for the ServerTask queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByType orders the results by the type field. +func ByType(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldType, opts...).ToFunc() +} + +// ByStartTime orders the results by the start_time field. +func ByStartTime(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStartTime, opts...).ToFunc() +} + +// ByEndTime orders the results by the end_time field. +func ByEndTime(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldEndTime, opts...).ToFunc() +} + +// ByLogFilePath orders the results by the log_file_path field. +func ByLogFilePath(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldLogFilePath, opts...).ToFunc() +} + +// ByServerTaskToAuthUserField orders the results by ServerTaskToAuthUser field. +func ByServerTaskToAuthUserField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newServerTaskToAuthUserStep(), sql.OrderByField(field, opts...)) + } +} + +// ByServerTaskToStatusField orders the results by ServerTaskToStatus field. +func ByServerTaskToStatusField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newServerTaskToStatusStep(), sql.OrderByField(field, opts...)) + } +} + +// ByServerTaskToEnvironmentField orders the results by ServerTaskToEnvironment field. +func ByServerTaskToEnvironmentField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newServerTaskToEnvironmentStep(), sql.OrderByField(field, opts...)) + } +} + +// ByServerTaskToBuildField orders the results by ServerTaskToBuild field. +func ByServerTaskToBuildField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newServerTaskToBuildStep(), sql.OrderByField(field, opts...)) + } +} + +// ByServerTaskToBuildCommitField orders the results by ServerTaskToBuildCommit field. +func ByServerTaskToBuildCommitField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newServerTaskToBuildCommitStep(), sql.OrderByField(field, opts...)) + } +} + +// ByServerTaskToGinFileMiddlewareCount orders the results by ServerTaskToGinFileMiddleware count. +func ByServerTaskToGinFileMiddlewareCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newServerTaskToGinFileMiddlewareStep(), opts...) + } +} + +// ByServerTaskToGinFileMiddleware orders the results by ServerTaskToGinFileMiddleware terms. +func ByServerTaskToGinFileMiddleware(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newServerTaskToGinFileMiddlewareStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newServerTaskToAuthUserStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ServerTaskToAuthUserInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, ServerTaskToAuthUserTable, ServerTaskToAuthUserColumn), + ) +} +func newServerTaskToStatusStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ServerTaskToStatusInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2O, false, ServerTaskToStatusTable, ServerTaskToStatusColumn), + ) +} +func newServerTaskToEnvironmentStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ServerTaskToEnvironmentInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, ServerTaskToEnvironmentTable, ServerTaskToEnvironmentColumn), + ) +} +func newServerTaskToBuildStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ServerTaskToBuildInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, ServerTaskToBuildTable, ServerTaskToBuildColumn), + ) +} +func newServerTaskToBuildCommitStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ServerTaskToBuildCommitInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, ServerTaskToBuildCommitTable, ServerTaskToBuildCommitColumn), + ) +} +func newServerTaskToGinFileMiddlewareStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ServerTaskToGinFileMiddlewareInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, ServerTaskToGinFileMiddlewareTable, ServerTaskToGinFileMiddlewareColumn), + ) +} + // MarshalGQL implements graphql.Marshaler interface. -func (_type Type) MarshalGQL(w io.Writer) { - io.WriteString(w, strconv.Quote(_type.String())) +func (e Type) MarshalGQL(w io.Writer) { + io.WriteString(w, strconv.Quote(e.String())) } // UnmarshalGQL implements graphql.Unmarshaler interface. -func (_type *Type) UnmarshalGQL(val interface{}) error { +func (e *Type) UnmarshalGQL(val interface{}) error { str, ok := val.(string) if !ok { return fmt.Errorf("enum %T must be a string", val) } - *_type = Type(str) - if err := TypeValidator(*_type); err != nil { + *e = Type(str) + if err := TypeValidator(*e); err != nil { return fmt.Errorf("%s is not a valid Type", str) } return nil diff --git a/ent/servertask/where.go b/ent/servertask/where.go index a1251f2f..ad7a755e 100755 --- a/ent/servertask/where.go +++ b/ent/servertask/where.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package servertask @@ -13,473 +13,267 @@ import ( // ID filters vertices based on their ID field. func ID(id uuid.UUID) predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.ServerTask(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id uuid.UUID) predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.ServerTask(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id uuid.UUID) predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.ServerTask(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...uuid.UUID) predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.ServerTask(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...uuid.UUID) predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.ServerTask(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id uuid.UUID) predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.ServerTask(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id uuid.UUID) predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.ServerTask(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id uuid.UUID) predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.ServerTask(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id uuid.UUID) predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.ServerTask(sql.FieldLTE(FieldID, id)) } // StartTime applies equality check predicate on the "start_time" field. It's identical to StartTimeEQ. func StartTime(v time.Time) predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldStartTime), v)) - }) + return predicate.ServerTask(sql.FieldEQ(FieldStartTime, v)) } // EndTime applies equality check predicate on the "end_time" field. It's identical to EndTimeEQ. func EndTime(v time.Time) predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldEndTime), v)) - }) + return predicate.ServerTask(sql.FieldEQ(FieldEndTime, v)) } // LogFilePath applies equality check predicate on the "log_file_path" field. It's identical to LogFilePathEQ. func LogFilePath(v string) predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldLogFilePath), v)) - }) + return predicate.ServerTask(sql.FieldEQ(FieldLogFilePath, v)) } // TypeEQ applies the EQ predicate on the "type" field. func TypeEQ(v Type) predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldType), v)) - }) + return predicate.ServerTask(sql.FieldEQ(FieldType, v)) } // TypeNEQ applies the NEQ predicate on the "type" field. func TypeNEQ(v Type) predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldType), v)) - }) + return predicate.ServerTask(sql.FieldNEQ(FieldType, v)) } // TypeIn applies the In predicate on the "type" field. func TypeIn(vs ...Type) predicate.ServerTask { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.ServerTask(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldType), v...)) - }) + return predicate.ServerTask(sql.FieldIn(FieldType, vs...)) } // TypeNotIn applies the NotIn predicate on the "type" field. func TypeNotIn(vs ...Type) predicate.ServerTask { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.ServerTask(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldType), v...)) - }) + return predicate.ServerTask(sql.FieldNotIn(FieldType, vs...)) } // StartTimeEQ applies the EQ predicate on the "start_time" field. func StartTimeEQ(v time.Time) predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldStartTime), v)) - }) + return predicate.ServerTask(sql.FieldEQ(FieldStartTime, v)) } // StartTimeNEQ applies the NEQ predicate on the "start_time" field. func StartTimeNEQ(v time.Time) predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldStartTime), v)) - }) + return predicate.ServerTask(sql.FieldNEQ(FieldStartTime, v)) } // StartTimeIn applies the In predicate on the "start_time" field. func StartTimeIn(vs ...time.Time) predicate.ServerTask { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.ServerTask(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldStartTime), v...)) - }) + return predicate.ServerTask(sql.FieldIn(FieldStartTime, vs...)) } // StartTimeNotIn applies the NotIn predicate on the "start_time" field. func StartTimeNotIn(vs ...time.Time) predicate.ServerTask { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.ServerTask(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldStartTime), v...)) - }) + return predicate.ServerTask(sql.FieldNotIn(FieldStartTime, vs...)) } // StartTimeGT applies the GT predicate on the "start_time" field. func StartTimeGT(v time.Time) predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldStartTime), v)) - }) + return predicate.ServerTask(sql.FieldGT(FieldStartTime, v)) } // StartTimeGTE applies the GTE predicate on the "start_time" field. func StartTimeGTE(v time.Time) predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldStartTime), v)) - }) + return predicate.ServerTask(sql.FieldGTE(FieldStartTime, v)) } // StartTimeLT applies the LT predicate on the "start_time" field. func StartTimeLT(v time.Time) predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldStartTime), v)) - }) + return predicate.ServerTask(sql.FieldLT(FieldStartTime, v)) } // StartTimeLTE applies the LTE predicate on the "start_time" field. func StartTimeLTE(v time.Time) predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldStartTime), v)) - }) + return predicate.ServerTask(sql.FieldLTE(FieldStartTime, v)) } // StartTimeIsNil applies the IsNil predicate on the "start_time" field. func StartTimeIsNil() predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldStartTime))) - }) + return predicate.ServerTask(sql.FieldIsNull(FieldStartTime)) } // StartTimeNotNil applies the NotNil predicate on the "start_time" field. func StartTimeNotNil() predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldStartTime))) - }) + return predicate.ServerTask(sql.FieldNotNull(FieldStartTime)) } // EndTimeEQ applies the EQ predicate on the "end_time" field. func EndTimeEQ(v time.Time) predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldEndTime), v)) - }) + return predicate.ServerTask(sql.FieldEQ(FieldEndTime, v)) } // EndTimeNEQ applies the NEQ predicate on the "end_time" field. func EndTimeNEQ(v time.Time) predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldEndTime), v)) - }) + return predicate.ServerTask(sql.FieldNEQ(FieldEndTime, v)) } // EndTimeIn applies the In predicate on the "end_time" field. func EndTimeIn(vs ...time.Time) predicate.ServerTask { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.ServerTask(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldEndTime), v...)) - }) + return predicate.ServerTask(sql.FieldIn(FieldEndTime, vs...)) } // EndTimeNotIn applies the NotIn predicate on the "end_time" field. func EndTimeNotIn(vs ...time.Time) predicate.ServerTask { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.ServerTask(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldEndTime), v...)) - }) + return predicate.ServerTask(sql.FieldNotIn(FieldEndTime, vs...)) } // EndTimeGT applies the GT predicate on the "end_time" field. func EndTimeGT(v time.Time) predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldEndTime), v)) - }) + return predicate.ServerTask(sql.FieldGT(FieldEndTime, v)) } // EndTimeGTE applies the GTE predicate on the "end_time" field. func EndTimeGTE(v time.Time) predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldEndTime), v)) - }) + return predicate.ServerTask(sql.FieldGTE(FieldEndTime, v)) } // EndTimeLT applies the LT predicate on the "end_time" field. func EndTimeLT(v time.Time) predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldEndTime), v)) - }) + return predicate.ServerTask(sql.FieldLT(FieldEndTime, v)) } // EndTimeLTE applies the LTE predicate on the "end_time" field. func EndTimeLTE(v time.Time) predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldEndTime), v)) - }) + return predicate.ServerTask(sql.FieldLTE(FieldEndTime, v)) } // EndTimeIsNil applies the IsNil predicate on the "end_time" field. func EndTimeIsNil() predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldEndTime))) - }) + return predicate.ServerTask(sql.FieldIsNull(FieldEndTime)) } // EndTimeNotNil applies the NotNil predicate on the "end_time" field. func EndTimeNotNil() predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldEndTime))) - }) + return predicate.ServerTask(sql.FieldNotNull(FieldEndTime)) } // ErrorsIsNil applies the IsNil predicate on the "errors" field. func ErrorsIsNil() predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldErrors))) - }) + return predicate.ServerTask(sql.FieldIsNull(FieldErrors)) } // ErrorsNotNil applies the NotNil predicate on the "errors" field. func ErrorsNotNil() predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldErrors))) - }) + return predicate.ServerTask(sql.FieldNotNull(FieldErrors)) } // LogFilePathEQ applies the EQ predicate on the "log_file_path" field. func LogFilePathEQ(v string) predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldLogFilePath), v)) - }) + return predicate.ServerTask(sql.FieldEQ(FieldLogFilePath, v)) } // LogFilePathNEQ applies the NEQ predicate on the "log_file_path" field. func LogFilePathNEQ(v string) predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldLogFilePath), v)) - }) + return predicate.ServerTask(sql.FieldNEQ(FieldLogFilePath, v)) } // LogFilePathIn applies the In predicate on the "log_file_path" field. func LogFilePathIn(vs ...string) predicate.ServerTask { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.ServerTask(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldLogFilePath), v...)) - }) + return predicate.ServerTask(sql.FieldIn(FieldLogFilePath, vs...)) } // LogFilePathNotIn applies the NotIn predicate on the "log_file_path" field. func LogFilePathNotIn(vs ...string) predicate.ServerTask { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.ServerTask(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldLogFilePath), v...)) - }) + return predicate.ServerTask(sql.FieldNotIn(FieldLogFilePath, vs...)) } // LogFilePathGT applies the GT predicate on the "log_file_path" field. func LogFilePathGT(v string) predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldLogFilePath), v)) - }) + return predicate.ServerTask(sql.FieldGT(FieldLogFilePath, v)) } // LogFilePathGTE applies the GTE predicate on the "log_file_path" field. func LogFilePathGTE(v string) predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldLogFilePath), v)) - }) + return predicate.ServerTask(sql.FieldGTE(FieldLogFilePath, v)) } // LogFilePathLT applies the LT predicate on the "log_file_path" field. func LogFilePathLT(v string) predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldLogFilePath), v)) - }) + return predicate.ServerTask(sql.FieldLT(FieldLogFilePath, v)) } // LogFilePathLTE applies the LTE predicate on the "log_file_path" field. func LogFilePathLTE(v string) predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldLogFilePath), v)) - }) + return predicate.ServerTask(sql.FieldLTE(FieldLogFilePath, v)) } // LogFilePathContains applies the Contains predicate on the "log_file_path" field. func LogFilePathContains(v string) predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldLogFilePath), v)) - }) + return predicate.ServerTask(sql.FieldContains(FieldLogFilePath, v)) } // LogFilePathHasPrefix applies the HasPrefix predicate on the "log_file_path" field. func LogFilePathHasPrefix(v string) predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldLogFilePath), v)) - }) + return predicate.ServerTask(sql.FieldHasPrefix(FieldLogFilePath, v)) } // LogFilePathHasSuffix applies the HasSuffix predicate on the "log_file_path" field. func LogFilePathHasSuffix(v string) predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldLogFilePath), v)) - }) + return predicate.ServerTask(sql.FieldHasSuffix(FieldLogFilePath, v)) } // LogFilePathIsNil applies the IsNil predicate on the "log_file_path" field. func LogFilePathIsNil() predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldLogFilePath))) - }) + return predicate.ServerTask(sql.FieldIsNull(FieldLogFilePath)) } // LogFilePathNotNil applies the NotNil predicate on the "log_file_path" field. func LogFilePathNotNil() predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldLogFilePath))) - }) + return predicate.ServerTask(sql.FieldNotNull(FieldLogFilePath)) } // LogFilePathEqualFold applies the EqualFold predicate on the "log_file_path" field. func LogFilePathEqualFold(v string) predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldLogFilePath), v)) - }) + return predicate.ServerTask(sql.FieldEqualFold(FieldLogFilePath, v)) } // LogFilePathContainsFold applies the ContainsFold predicate on the "log_file_path" field. func LogFilePathContainsFold(v string) predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldLogFilePath), v)) - }) + return predicate.ServerTask(sql.FieldContainsFold(FieldLogFilePath, v)) } // HasServerTaskToAuthUser applies the HasEdge predicate on the "ServerTaskToAuthUser" edge. @@ -487,7 +281,6 @@ func HasServerTaskToAuthUser() predicate.ServerTask { return predicate.ServerTask(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(ServerTaskToAuthUserTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, false, ServerTaskToAuthUserTable, ServerTaskToAuthUserColumn), ) sqlgraph.HasNeighbors(s, step) @@ -497,11 +290,7 @@ func HasServerTaskToAuthUser() predicate.ServerTask { // HasServerTaskToAuthUserWith applies the HasEdge predicate on the "ServerTaskToAuthUser" edge with a given conditions (other predicates). func HasServerTaskToAuthUserWith(preds ...predicate.AuthUser) predicate.ServerTask { return predicate.ServerTask(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(ServerTaskToAuthUserInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, ServerTaskToAuthUserTable, ServerTaskToAuthUserColumn), - ) + step := newServerTaskToAuthUserStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -515,7 +304,6 @@ func HasServerTaskToStatus() predicate.ServerTask { return predicate.ServerTask(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(ServerTaskToStatusTable, FieldID), sqlgraph.Edge(sqlgraph.O2O, false, ServerTaskToStatusTable, ServerTaskToStatusColumn), ) sqlgraph.HasNeighbors(s, step) @@ -525,11 +313,7 @@ func HasServerTaskToStatus() predicate.ServerTask { // HasServerTaskToStatusWith applies the HasEdge predicate on the "ServerTaskToStatus" edge with a given conditions (other predicates). func HasServerTaskToStatusWith(preds ...predicate.Status) predicate.ServerTask { return predicate.ServerTask(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(ServerTaskToStatusInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2O, false, ServerTaskToStatusTable, ServerTaskToStatusColumn), - ) + step := newServerTaskToStatusStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -543,7 +327,6 @@ func HasServerTaskToEnvironment() predicate.ServerTask { return predicate.ServerTask(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(ServerTaskToEnvironmentTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, false, ServerTaskToEnvironmentTable, ServerTaskToEnvironmentColumn), ) sqlgraph.HasNeighbors(s, step) @@ -553,11 +336,7 @@ func HasServerTaskToEnvironment() predicate.ServerTask { // HasServerTaskToEnvironmentWith applies the HasEdge predicate on the "ServerTaskToEnvironment" edge with a given conditions (other predicates). func HasServerTaskToEnvironmentWith(preds ...predicate.Environment) predicate.ServerTask { return predicate.ServerTask(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(ServerTaskToEnvironmentInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, ServerTaskToEnvironmentTable, ServerTaskToEnvironmentColumn), - ) + step := newServerTaskToEnvironmentStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -571,7 +350,6 @@ func HasServerTaskToBuild() predicate.ServerTask { return predicate.ServerTask(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(ServerTaskToBuildTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, false, ServerTaskToBuildTable, ServerTaskToBuildColumn), ) sqlgraph.HasNeighbors(s, step) @@ -581,11 +359,7 @@ func HasServerTaskToBuild() predicate.ServerTask { // HasServerTaskToBuildWith applies the HasEdge predicate on the "ServerTaskToBuild" edge with a given conditions (other predicates). func HasServerTaskToBuildWith(preds ...predicate.Build) predicate.ServerTask { return predicate.ServerTask(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(ServerTaskToBuildInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, ServerTaskToBuildTable, ServerTaskToBuildColumn), - ) + step := newServerTaskToBuildStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -599,7 +373,6 @@ func HasServerTaskToBuildCommit() predicate.ServerTask { return predicate.ServerTask(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(ServerTaskToBuildCommitTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, false, ServerTaskToBuildCommitTable, ServerTaskToBuildCommitColumn), ) sqlgraph.HasNeighbors(s, step) @@ -609,11 +382,7 @@ func HasServerTaskToBuildCommit() predicate.ServerTask { // HasServerTaskToBuildCommitWith applies the HasEdge predicate on the "ServerTaskToBuildCommit" edge with a given conditions (other predicates). func HasServerTaskToBuildCommitWith(preds ...predicate.BuildCommit) predicate.ServerTask { return predicate.ServerTask(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(ServerTaskToBuildCommitInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, ServerTaskToBuildCommitTable, ServerTaskToBuildCommitColumn), - ) + step := newServerTaskToBuildCommitStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -627,7 +396,6 @@ func HasServerTaskToGinFileMiddleware() predicate.ServerTask { return predicate.ServerTask(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(ServerTaskToGinFileMiddlewareTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, false, ServerTaskToGinFileMiddlewareTable, ServerTaskToGinFileMiddlewareColumn), ) sqlgraph.HasNeighbors(s, step) @@ -637,11 +405,7 @@ func HasServerTaskToGinFileMiddleware() predicate.ServerTask { // HasServerTaskToGinFileMiddlewareWith applies the HasEdge predicate on the "ServerTaskToGinFileMiddleware" edge with a given conditions (other predicates). func HasServerTaskToGinFileMiddlewareWith(preds ...predicate.GinFileMiddleware) predicate.ServerTask { return predicate.ServerTask(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(ServerTaskToGinFileMiddlewareInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, ServerTaskToGinFileMiddlewareTable, ServerTaskToGinFileMiddlewareColumn), - ) + step := newServerTaskToGinFileMiddlewareStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -652,32 +416,15 @@ func HasServerTaskToGinFileMiddlewareWith(preds ...predicate.GinFileMiddleware) // And groups predicates with the AND operator between them. func And(predicates ...predicate.ServerTask) predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.ServerTask(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.ServerTask) predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.ServerTask(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.ServerTask) predicate.ServerTask { - return predicate.ServerTask(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.ServerTask(sql.NotPredicates(p)) } diff --git a/ent/servertask_create.go b/ent/servertask_create.go index b9d773e8..ecb9677b 100755 --- a/ent/servertask_create.go +++ b/ent/servertask_create.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -196,44 +196,8 @@ func (stc *ServerTaskCreate) Mutation() *ServerTaskMutation { // Save creates the ServerTask in the database. func (stc *ServerTaskCreate) Save(ctx context.Context) (*ServerTask, error) { - var ( - err error - node *ServerTask - ) stc.defaults() - if len(stc.hooks) == 0 { - if err = stc.check(); err != nil { - return nil, err - } - node, err = stc.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*ServerTaskMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = stc.check(); err != nil { - return nil, err - } - stc.mutation = mutation - if node, err = stc.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(stc.hooks) - 1; i >= 0; i-- { - if stc.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = stc.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, stc.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, stc.sqlSave, stc.mutation, stc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -286,10 +250,13 @@ func (stc *ServerTaskCreate) check() error { } func (stc *ServerTaskCreate) sqlSave(ctx context.Context) (*ServerTask, error) { + if err := stc.check(); err != nil { + return nil, err + } _node, _spec := stc.createSpec() if err := sqlgraph.CreateNode(ctx, stc.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } @@ -300,62 +267,38 @@ func (stc *ServerTaskCreate) sqlSave(ctx context.Context) (*ServerTask, error) { return nil, err } } + stc.mutation.id = &_node.ID + stc.mutation.done = true return _node, nil } func (stc *ServerTaskCreate) createSpec() (*ServerTask, *sqlgraph.CreateSpec) { var ( _node = &ServerTask{config: stc.config} - _spec = &sqlgraph.CreateSpec{ - Table: servertask.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: servertask.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(servertask.Table, sqlgraph.NewFieldSpec(servertask.FieldID, field.TypeUUID)) ) if id, ok := stc.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } if value, ok := stc.mutation.GetType(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: servertask.FieldType, - }) + _spec.SetField(servertask.FieldType, field.TypeEnum, value) _node.Type = value } if value, ok := stc.mutation.StartTime(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: servertask.FieldStartTime, - }) + _spec.SetField(servertask.FieldStartTime, field.TypeTime, value) _node.StartTime = value } if value, ok := stc.mutation.EndTime(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: servertask.FieldEndTime, - }) + _spec.SetField(servertask.FieldEndTime, field.TypeTime, value) _node.EndTime = value } if value, ok := stc.mutation.Errors(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: servertask.FieldErrors, - }) + _spec.SetField(servertask.FieldErrors, field.TypeJSON, value) _node.Errors = value } if value, ok := stc.mutation.LogFilePath(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: servertask.FieldLogFilePath, - }) + _spec.SetField(servertask.FieldLogFilePath, field.TypeString, value) _node.LogFilePath = value } if nodes := stc.mutation.ServerTaskToAuthUserIDs(); len(nodes) > 0 { @@ -366,10 +309,7 @@ func (stc *ServerTaskCreate) createSpec() (*ServerTask, *sqlgraph.CreateSpec) { Columns: []string{servertask.ServerTaskToAuthUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: authuser.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(authuser.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -386,10 +326,7 @@ func (stc *ServerTaskCreate) createSpec() (*ServerTask, *sqlgraph.CreateSpec) { Columns: []string{servertask.ServerTaskToStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -405,10 +342,7 @@ func (stc *ServerTaskCreate) createSpec() (*ServerTask, *sqlgraph.CreateSpec) { Columns: []string{servertask.ServerTaskToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -425,10 +359,7 @@ func (stc *ServerTaskCreate) createSpec() (*ServerTask, *sqlgraph.CreateSpec) { Columns: []string{servertask.ServerTaskToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -445,10 +376,7 @@ func (stc *ServerTaskCreate) createSpec() (*ServerTask, *sqlgraph.CreateSpec) { Columns: []string{servertask.ServerTaskToBuildCommitColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: buildcommit.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(buildcommit.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -465,10 +393,7 @@ func (stc *ServerTaskCreate) createSpec() (*ServerTask, *sqlgraph.CreateSpec) { Columns: []string{servertask.ServerTaskToGinFileMiddlewareColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: ginfilemiddleware.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(ginfilemiddleware.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -482,11 +407,15 @@ func (stc *ServerTaskCreate) createSpec() (*ServerTask, *sqlgraph.CreateSpec) { // ServerTaskCreateBulk is the builder for creating many ServerTask entities in bulk. type ServerTaskCreateBulk struct { config + err error builders []*ServerTaskCreate } // Save creates the ServerTask entities in the database. func (stcb *ServerTaskCreateBulk) Save(ctx context.Context) ([]*ServerTask, error) { + if stcb.err != nil { + return nil, stcb.err + } specs := make([]*sqlgraph.CreateSpec, len(stcb.builders)) nodes := make([]*ServerTask, len(stcb.builders)) mutators := make([]Mutator, len(stcb.builders)) @@ -503,8 +432,8 @@ func (stcb *ServerTaskCreateBulk) Save(ctx context.Context) ([]*ServerTask, erro return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, stcb.builders[i+1].mutation) } else { @@ -512,7 +441,7 @@ func (stcb *ServerTaskCreateBulk) Save(ctx context.Context) ([]*ServerTask, erro // Invoke the actual operation on the latest mutation in the chain. if err = sqlgraph.BatchCreate(ctx, stcb.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } } } diff --git a/ent/servertask_delete.go b/ent/servertask_delete.go index f7a4929e..71c33c04 100755 --- a/ent/servertask_delete.go +++ b/ent/servertask_delete.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (std *ServerTaskDelete) Where(ps ...predicate.ServerTask) *ServerTaskDelete // Exec executes the deletion query and returns how many vertices were deleted. func (std *ServerTaskDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(std.hooks) == 0 { - affected, err = std.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*ServerTaskMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - std.mutation = mutation - affected, err = std.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(std.hooks) - 1; i >= 0; i-- { - if std.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = std.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, std.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, std.sqlExec, std.mutation, std.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (std *ServerTaskDelete) ExecX(ctx context.Context) int { } func (std *ServerTaskDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: servertask.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: servertask.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(servertask.Table, sqlgraph.NewFieldSpec(servertask.FieldID, field.TypeUUID)) if ps := std.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -84,7 +48,12 @@ func (std *ServerTaskDelete) sqlExec(ctx context.Context) (int, error) { } } } - return sqlgraph.DeleteNodes(ctx, std.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, std.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + std.mutation.done = true + return affected, err } // ServerTaskDeleteOne is the builder for deleting a single ServerTask entity. @@ -92,6 +61,12 @@ type ServerTaskDeleteOne struct { std *ServerTaskDelete } +// Where appends a list predicates to the ServerTaskDelete builder. +func (stdo *ServerTaskDeleteOne) Where(ps ...predicate.ServerTask) *ServerTaskDeleteOne { + stdo.std.mutation.Where(ps...) + return stdo +} + // Exec executes the deletion query. func (stdo *ServerTaskDeleteOne) Exec(ctx context.Context) error { n, err := stdo.std.Exec(ctx) @@ -107,5 +82,7 @@ func (stdo *ServerTaskDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (stdo *ServerTaskDeleteOne) ExecX(ctx context.Context) { - stdo.std.ExecX(ctx) + if err := stdo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/ent/servertask_query.go b/ent/servertask_query.go index b31b606d..afe627eb 100755 --- a/ent/servertask_query.go +++ b/ent/servertask_query.go @@ -1,11 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" "database/sql/driver" - "errors" "fmt" "math" @@ -26,20 +25,20 @@ import ( // ServerTaskQuery is the builder for querying ServerTask entities. type ServerTaskQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string - predicates []predicate.ServerTask - // eager-loading edges. - withServerTaskToAuthUser *AuthUserQuery - withServerTaskToStatus *StatusQuery - withServerTaskToEnvironment *EnvironmentQuery - withServerTaskToBuild *BuildQuery - withServerTaskToBuildCommit *BuildCommitQuery - withServerTaskToGinFileMiddleware *GinFileMiddlewareQuery - withFKs bool + ctx *QueryContext + order []servertask.OrderOption + inters []Interceptor + predicates []predicate.ServerTask + withServerTaskToAuthUser *AuthUserQuery + withServerTaskToStatus *StatusQuery + withServerTaskToEnvironment *EnvironmentQuery + withServerTaskToBuild *BuildQuery + withServerTaskToBuildCommit *BuildCommitQuery + withServerTaskToGinFileMiddleware *GinFileMiddlewareQuery + withFKs bool + modifiers []func(*sql.Selector) + loadTotal []func(context.Context, []*ServerTask) error + withNamedServerTaskToGinFileMiddleware map[string]*GinFileMiddlewareQuery // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -51,34 +50,34 @@ func (stq *ServerTaskQuery) Where(ps ...predicate.ServerTask) *ServerTaskQuery { return stq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (stq *ServerTaskQuery) Limit(limit int) *ServerTaskQuery { - stq.limit = &limit + stq.ctx.Limit = &limit return stq } -// Offset adds an offset step to the query. +// Offset to start from. func (stq *ServerTaskQuery) Offset(offset int) *ServerTaskQuery { - stq.offset = &offset + stq.ctx.Offset = &offset return stq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (stq *ServerTaskQuery) Unique(unique bool) *ServerTaskQuery { - stq.unique = &unique + stq.ctx.Unique = &unique return stq } -// Order adds an order step to the query. -func (stq *ServerTaskQuery) Order(o ...OrderFunc) *ServerTaskQuery { +// Order specifies how the records should be ordered. +func (stq *ServerTaskQuery) Order(o ...servertask.OrderOption) *ServerTaskQuery { stq.order = append(stq.order, o...) return stq } // QueryServerTaskToAuthUser chains the current query on the "ServerTaskToAuthUser" edge. func (stq *ServerTaskQuery) QueryServerTaskToAuthUser() *AuthUserQuery { - query := &AuthUserQuery{config: stq.config} + query := (&AuthUserClient{config: stq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := stq.prepareQuery(ctx); err != nil { return nil, err @@ -100,7 +99,7 @@ func (stq *ServerTaskQuery) QueryServerTaskToAuthUser() *AuthUserQuery { // QueryServerTaskToStatus chains the current query on the "ServerTaskToStatus" edge. func (stq *ServerTaskQuery) QueryServerTaskToStatus() *StatusQuery { - query := &StatusQuery{config: stq.config} + query := (&StatusClient{config: stq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := stq.prepareQuery(ctx); err != nil { return nil, err @@ -122,7 +121,7 @@ func (stq *ServerTaskQuery) QueryServerTaskToStatus() *StatusQuery { // QueryServerTaskToEnvironment chains the current query on the "ServerTaskToEnvironment" edge. func (stq *ServerTaskQuery) QueryServerTaskToEnvironment() *EnvironmentQuery { - query := &EnvironmentQuery{config: stq.config} + query := (&EnvironmentClient{config: stq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := stq.prepareQuery(ctx); err != nil { return nil, err @@ -144,7 +143,7 @@ func (stq *ServerTaskQuery) QueryServerTaskToEnvironment() *EnvironmentQuery { // QueryServerTaskToBuild chains the current query on the "ServerTaskToBuild" edge. func (stq *ServerTaskQuery) QueryServerTaskToBuild() *BuildQuery { - query := &BuildQuery{config: stq.config} + query := (&BuildClient{config: stq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := stq.prepareQuery(ctx); err != nil { return nil, err @@ -166,7 +165,7 @@ func (stq *ServerTaskQuery) QueryServerTaskToBuild() *BuildQuery { // QueryServerTaskToBuildCommit chains the current query on the "ServerTaskToBuildCommit" edge. func (stq *ServerTaskQuery) QueryServerTaskToBuildCommit() *BuildCommitQuery { - query := &BuildCommitQuery{config: stq.config} + query := (&BuildCommitClient{config: stq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := stq.prepareQuery(ctx); err != nil { return nil, err @@ -188,7 +187,7 @@ func (stq *ServerTaskQuery) QueryServerTaskToBuildCommit() *BuildCommitQuery { // QueryServerTaskToGinFileMiddleware chains the current query on the "ServerTaskToGinFileMiddleware" edge. func (stq *ServerTaskQuery) QueryServerTaskToGinFileMiddleware() *GinFileMiddlewareQuery { - query := &GinFileMiddlewareQuery{config: stq.config} + query := (&GinFileMiddlewareClient{config: stq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := stq.prepareQuery(ctx); err != nil { return nil, err @@ -211,7 +210,7 @@ func (stq *ServerTaskQuery) QueryServerTaskToGinFileMiddleware() *GinFileMiddlew // First returns the first ServerTask entity from the query. // Returns a *NotFoundError when no ServerTask was found. func (stq *ServerTaskQuery) First(ctx context.Context) (*ServerTask, error) { - nodes, err := stq.Limit(1).All(ctx) + nodes, err := stq.Limit(1).All(setContextOp(ctx, stq.ctx, "First")) if err != nil { return nil, err } @@ -234,7 +233,7 @@ func (stq *ServerTaskQuery) FirstX(ctx context.Context) *ServerTask { // Returns a *NotFoundError when no ServerTask ID was found. func (stq *ServerTaskQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = stq.Limit(1).IDs(ctx); err != nil { + if ids, err = stq.Limit(1).IDs(setContextOp(ctx, stq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -257,7 +256,7 @@ func (stq *ServerTaskQuery) FirstIDX(ctx context.Context) uuid.UUID { // Returns a *NotSingularError when more than one ServerTask entity is found. // Returns a *NotFoundError when no ServerTask entities are found. func (stq *ServerTaskQuery) Only(ctx context.Context) (*ServerTask, error) { - nodes, err := stq.Limit(2).All(ctx) + nodes, err := stq.Limit(2).All(setContextOp(ctx, stq.ctx, "Only")) if err != nil { return nil, err } @@ -285,7 +284,7 @@ func (stq *ServerTaskQuery) OnlyX(ctx context.Context) *ServerTask { // Returns a *NotFoundError when no entities are found. func (stq *ServerTaskQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = stq.Limit(2).IDs(ctx); err != nil { + if ids, err = stq.Limit(2).IDs(setContextOp(ctx, stq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -310,10 +309,12 @@ func (stq *ServerTaskQuery) OnlyIDX(ctx context.Context) uuid.UUID { // All executes the query and returns a list of ServerTasks. func (stq *ServerTaskQuery) All(ctx context.Context) ([]*ServerTask, error) { + ctx = setContextOp(ctx, stq.ctx, "All") if err := stq.prepareQuery(ctx); err != nil { return nil, err } - return stq.sqlAll(ctx) + qr := querierAll[[]*ServerTask, *ServerTaskQuery]() + return withInterceptors[[]*ServerTask](ctx, stq, qr, stq.inters) } // AllX is like All, but panics if an error occurs. @@ -326,9 +327,12 @@ func (stq *ServerTaskQuery) AllX(ctx context.Context) []*ServerTask { } // IDs executes the query and returns a list of ServerTask IDs. -func (stq *ServerTaskQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { - var ids []uuid.UUID - if err := stq.Select(servertask.FieldID).Scan(ctx, &ids); err != nil { +func (stq *ServerTaskQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if stq.ctx.Unique == nil && stq.path != nil { + stq.Unique(true) + } + ctx = setContextOp(ctx, stq.ctx, "IDs") + if err = stq.Select(servertask.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -345,10 +349,11 @@ func (stq *ServerTaskQuery) IDsX(ctx context.Context) []uuid.UUID { // Count returns the count of the given query. func (stq *ServerTaskQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, stq.ctx, "Count") if err := stq.prepareQuery(ctx); err != nil { return 0, err } - return stq.sqlCount(ctx) + return withInterceptors[int](ctx, stq, querierCount[*ServerTaskQuery](), stq.inters) } // CountX is like Count, but panics if an error occurs. @@ -362,10 +367,15 @@ func (stq *ServerTaskQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (stq *ServerTaskQuery) Exist(ctx context.Context) (bool, error) { - if err := stq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, stq.ctx, "Exist") + switch _, err := stq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return stq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -385,9 +395,9 @@ func (stq *ServerTaskQuery) Clone() *ServerTaskQuery { } return &ServerTaskQuery{ config: stq.config, - limit: stq.limit, - offset: stq.offset, - order: append([]OrderFunc{}, stq.order...), + ctx: stq.ctx.Clone(), + order: append([]servertask.OrderOption{}, stq.order...), + inters: append([]Interceptor{}, stq.inters...), predicates: append([]predicate.ServerTask{}, stq.predicates...), withServerTaskToAuthUser: stq.withServerTaskToAuthUser.Clone(), withServerTaskToStatus: stq.withServerTaskToStatus.Clone(), @@ -396,16 +406,15 @@ func (stq *ServerTaskQuery) Clone() *ServerTaskQuery { withServerTaskToBuildCommit: stq.withServerTaskToBuildCommit.Clone(), withServerTaskToGinFileMiddleware: stq.withServerTaskToGinFileMiddleware.Clone(), // clone intermediate query. - sql: stq.sql.Clone(), - path: stq.path, - unique: stq.unique, + sql: stq.sql.Clone(), + path: stq.path, } } // WithServerTaskToAuthUser tells the query-builder to eager-load the nodes that are connected to // the "ServerTaskToAuthUser" edge. The optional arguments are used to configure the query builder of the edge. func (stq *ServerTaskQuery) WithServerTaskToAuthUser(opts ...func(*AuthUserQuery)) *ServerTaskQuery { - query := &AuthUserQuery{config: stq.config} + query := (&AuthUserClient{config: stq.config}).Query() for _, opt := range opts { opt(query) } @@ -416,7 +425,7 @@ func (stq *ServerTaskQuery) WithServerTaskToAuthUser(opts ...func(*AuthUserQuery // WithServerTaskToStatus tells the query-builder to eager-load the nodes that are connected to // the "ServerTaskToStatus" edge. The optional arguments are used to configure the query builder of the edge. func (stq *ServerTaskQuery) WithServerTaskToStatus(opts ...func(*StatusQuery)) *ServerTaskQuery { - query := &StatusQuery{config: stq.config} + query := (&StatusClient{config: stq.config}).Query() for _, opt := range opts { opt(query) } @@ -427,7 +436,7 @@ func (stq *ServerTaskQuery) WithServerTaskToStatus(opts ...func(*StatusQuery)) * // WithServerTaskToEnvironment tells the query-builder to eager-load the nodes that are connected to // the "ServerTaskToEnvironment" edge. The optional arguments are used to configure the query builder of the edge. func (stq *ServerTaskQuery) WithServerTaskToEnvironment(opts ...func(*EnvironmentQuery)) *ServerTaskQuery { - query := &EnvironmentQuery{config: stq.config} + query := (&EnvironmentClient{config: stq.config}).Query() for _, opt := range opts { opt(query) } @@ -438,7 +447,7 @@ func (stq *ServerTaskQuery) WithServerTaskToEnvironment(opts ...func(*Environmen // WithServerTaskToBuild tells the query-builder to eager-load the nodes that are connected to // the "ServerTaskToBuild" edge. The optional arguments are used to configure the query builder of the edge. func (stq *ServerTaskQuery) WithServerTaskToBuild(opts ...func(*BuildQuery)) *ServerTaskQuery { - query := &BuildQuery{config: stq.config} + query := (&BuildClient{config: stq.config}).Query() for _, opt := range opts { opt(query) } @@ -449,7 +458,7 @@ func (stq *ServerTaskQuery) WithServerTaskToBuild(opts ...func(*BuildQuery)) *Se // WithServerTaskToBuildCommit tells the query-builder to eager-load the nodes that are connected to // the "ServerTaskToBuildCommit" edge. The optional arguments are used to configure the query builder of the edge. func (stq *ServerTaskQuery) WithServerTaskToBuildCommit(opts ...func(*BuildCommitQuery)) *ServerTaskQuery { - query := &BuildCommitQuery{config: stq.config} + query := (&BuildCommitClient{config: stq.config}).Query() for _, opt := range opts { opt(query) } @@ -460,7 +469,7 @@ func (stq *ServerTaskQuery) WithServerTaskToBuildCommit(opts ...func(*BuildCommi // WithServerTaskToGinFileMiddleware tells the query-builder to eager-load the nodes that are connected to // the "ServerTaskToGinFileMiddleware" edge. The optional arguments are used to configure the query builder of the edge. func (stq *ServerTaskQuery) WithServerTaskToGinFileMiddleware(opts ...func(*GinFileMiddlewareQuery)) *ServerTaskQuery { - query := &GinFileMiddlewareQuery{config: stq.config} + query := (&GinFileMiddlewareClient{config: stq.config}).Query() for _, opt := range opts { opt(query) } @@ -482,17 +491,13 @@ func (stq *ServerTaskQuery) WithServerTaskToGinFileMiddleware(opts ...func(*GinF // GroupBy(servertask.FieldType). // Aggregate(ent.Count()). // Scan(ctx, &v) -// func (stq *ServerTaskQuery) GroupBy(field string, fields ...string) *ServerTaskGroupBy { - group := &ServerTaskGroupBy{config: stq.config} - group.fields = append([]string{field}, fields...) - group.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := stq.prepareQuery(ctx); err != nil { - return nil, err - } - return stq.sqlQuery(ctx), nil - } - return group + stq.ctx.Fields = append([]string{field}, fields...) + grbuild := &ServerTaskGroupBy{build: stq} + grbuild.flds = &stq.ctx.Fields + grbuild.label = servertask.Label + grbuild.scan = grbuild.Scan + return grbuild } // Select allows the selection one or more fields/columns for the given query, @@ -507,14 +512,31 @@ func (stq *ServerTaskQuery) GroupBy(field string, fields ...string) *ServerTaskG // client.ServerTask.Query(). // Select(servertask.FieldType). // Scan(ctx, &v) -// func (stq *ServerTaskQuery) Select(fields ...string) *ServerTaskSelect { - stq.fields = append(stq.fields, fields...) - return &ServerTaskSelect{ServerTaskQuery: stq} + stq.ctx.Fields = append(stq.ctx.Fields, fields...) + sbuild := &ServerTaskSelect{ServerTaskQuery: stq} + sbuild.label = servertask.Label + sbuild.flds, sbuild.scan = &stq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a ServerTaskSelect configured with the given aggregations. +func (stq *ServerTaskQuery) Aggregate(fns ...AggregateFunc) *ServerTaskSelect { + return stq.Select().Aggregate(fns...) } func (stq *ServerTaskQuery) prepareQuery(ctx context.Context) error { - for _, f := range stq.fields { + for _, inter := range stq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, stq); err != nil { + return err + } + } + } + for _, f := range stq.ctx.Fields { if !servertask.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -529,7 +551,7 @@ func (stq *ServerTaskQuery) prepareQuery(ctx context.Context) error { return nil } -func (stq *ServerTaskQuery) sqlAll(ctx context.Context) ([]*ServerTask, error) { +func (stq *ServerTaskQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*ServerTask, error) { var ( nodes = []*ServerTask{} withFKs = stq.withFKs @@ -549,236 +571,290 @@ func (stq *ServerTaskQuery) sqlAll(ctx context.Context) ([]*ServerTask, error) { if withFKs { _spec.Node.Columns = append(_spec.Node.Columns, servertask.ForeignKeys...) } - _spec.ScanValues = func(columns []string) ([]interface{}, error) { + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*ServerTask).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { node := &ServerTask{config: stq.config} nodes = append(nodes, node) - return node.scanValues(columns) - } - _spec.Assign = func(columns []string, values []interface{}) error { - if len(nodes) == 0 { - return fmt.Errorf("ent: Assign called without calling ScanValues") - } - node := nodes[len(nodes)-1] node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(stq.modifiers) > 0 { + _spec.Modifiers = stq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } if err := sqlgraph.QueryNodes(ctx, stq.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := stq.withServerTaskToAuthUser; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*ServerTask) - for i := range nodes { - if nodes[i].server_task_server_task_to_auth_user == nil { - continue - } - fk := *nodes[i].server_task_server_task_to_auth_user - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + if err := stq.loadServerTaskToAuthUser(ctx, query, nodes, nil, + func(n *ServerTask, e *AuthUser) { n.Edges.ServerTaskToAuthUser = e }); err != nil { + return nil, err } - query.Where(authuser.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + } + if query := stq.withServerTaskToStatus; query != nil { + if err := stq.loadServerTaskToStatus(ctx, query, nodes, nil, + func(n *ServerTask, e *Status) { n.Edges.ServerTaskToStatus = e }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "server_task_server_task_to_auth_user" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.ServerTaskToAuthUser = n - } + } + if query := stq.withServerTaskToEnvironment; query != nil { + if err := stq.loadServerTaskToEnvironment(ctx, query, nodes, nil, + func(n *ServerTask, e *Environment) { n.Edges.ServerTaskToEnvironment = e }); err != nil { + return nil, err } } - - if query := stq.withServerTaskToStatus; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*ServerTask) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] + if query := stq.withServerTaskToBuild; query != nil { + if err := stq.loadServerTaskToBuild(ctx, query, nodes, nil, + func(n *ServerTask, e *Build) { n.Edges.ServerTaskToBuild = e }); err != nil { + return nil, err } - query.withFKs = true - query.Where(predicate.Status(func(s *sql.Selector) { - s.Where(sql.InValues(servertask.ServerTaskToStatusColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { + } + if query := stq.withServerTaskToBuildCommit; query != nil { + if err := stq.loadServerTaskToBuildCommit(ctx, query, nodes, nil, + func(n *ServerTask, e *BuildCommit) { n.Edges.ServerTaskToBuildCommit = e }); err != nil { return nil, err } - for _, n := range neighbors { - fk := n.server_task_server_task_to_status - if fk == nil { - return nil, fmt.Errorf(`foreign-key "server_task_server_task_to_status" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "server_task_server_task_to_status" returned %v for node %v`, *fk, n.ID) - } - node.Edges.ServerTaskToStatus = n + } + if query := stq.withServerTaskToGinFileMiddleware; query != nil { + if err := stq.loadServerTaskToGinFileMiddleware(ctx, query, nodes, + func(n *ServerTask) { n.Edges.ServerTaskToGinFileMiddleware = []*GinFileMiddleware{} }, + func(n *ServerTask, e *GinFileMiddleware) { + n.Edges.ServerTaskToGinFileMiddleware = append(n.Edges.ServerTaskToGinFileMiddleware, e) + }); err != nil { + return nil, err } } + for name, query := range stq.withNamedServerTaskToGinFileMiddleware { + if err := stq.loadServerTaskToGinFileMiddleware(ctx, query, nodes, + func(n *ServerTask) { n.appendNamedServerTaskToGinFileMiddleware(name) }, + func(n *ServerTask, e *GinFileMiddleware) { n.appendNamedServerTaskToGinFileMiddleware(name, e) }); err != nil { + return nil, err + } + } + for i := range stq.loadTotal { + if err := stq.loadTotal[i](ctx, nodes); err != nil { + return nil, err + } + } + return nodes, nil +} - if query := stq.withServerTaskToEnvironment; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*ServerTask) +func (stq *ServerTaskQuery) loadServerTaskToAuthUser(ctx context.Context, query *AuthUserQuery, nodes []*ServerTask, init func(*ServerTask), assign func(*ServerTask, *AuthUser)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*ServerTask) + for i := range nodes { + if nodes[i].server_task_server_task_to_auth_user == nil { + continue + } + fk := *nodes[i].server_task_server_task_to_auth_user + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(authuser.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "server_task_server_task_to_auth_user" returned %v`, n.ID) + } for i := range nodes { - if nodes[i].server_task_server_task_to_environment == nil { - continue - } - fk := *nodes[i].server_task_server_task_to_environment - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + assign(nodes[i], n) } - query.Where(environment.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + } + return nil +} +func (stq *ServerTaskQuery) loadServerTaskToStatus(ctx context.Context, query *StatusQuery, nodes []*ServerTask, init func(*ServerTask), assign func(*ServerTask, *Status)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*ServerTask) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + } + query.withFKs = true + query.Where(predicate.Status(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(servertask.ServerTaskToStatusColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.server_task_server_task_to_status + if fk == nil { + return fmt.Errorf(`foreign-key "server_task_server_task_to_status" is nil for node %v`, n.ID) } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "server_task_server_task_to_environment" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.ServerTaskToEnvironment = n - } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "server_task_server_task_to_status" returned %v for node %v`, *fk, n.ID) } + assign(node, n) } - - if query := stq.withServerTaskToBuild; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*ServerTask) + return nil +} +func (stq *ServerTaskQuery) loadServerTaskToEnvironment(ctx context.Context, query *EnvironmentQuery, nodes []*ServerTask, init func(*ServerTask), assign func(*ServerTask, *Environment)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*ServerTask) + for i := range nodes { + if nodes[i].server_task_server_task_to_environment == nil { + continue + } + fk := *nodes[i].server_task_server_task_to_environment + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(environment.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "server_task_server_task_to_environment" returned %v`, n.ID) + } for i := range nodes { - if nodes[i].server_task_server_task_to_build == nil { - continue - } - fk := *nodes[i].server_task_server_task_to_build - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + assign(nodes[i], n) } - query.Where(build.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + } + return nil +} +func (stq *ServerTaskQuery) loadServerTaskToBuild(ctx context.Context, query *BuildQuery, nodes []*ServerTask, init func(*ServerTask), assign func(*ServerTask, *Build)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*ServerTask) + for i := range nodes { + if nodes[i].server_task_server_task_to_build == nil { + continue } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "server_task_server_task_to_build" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.ServerTaskToBuild = n - } + fk := *nodes[i].server_task_server_task_to_build + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) } + nodeids[fk] = append(nodeids[fk], nodes[i]) } - - if query := stq.withServerTaskToBuildCommit; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*ServerTask) + if len(ids) == 0 { + return nil + } + query.Where(build.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "server_task_server_task_to_build" returned %v`, n.ID) + } for i := range nodes { - if nodes[i].server_task_server_task_to_build_commit == nil { - continue - } - fk := *nodes[i].server_task_server_task_to_build_commit - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + assign(nodes[i], n) } - query.Where(buildcommit.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + } + return nil +} +func (stq *ServerTaskQuery) loadServerTaskToBuildCommit(ctx context.Context, query *BuildCommitQuery, nodes []*ServerTask, init func(*ServerTask), assign func(*ServerTask, *BuildCommit)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*ServerTask) + for i := range nodes { + if nodes[i].server_task_server_task_to_build_commit == nil { + continue } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "server_task_server_task_to_build_commit" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.ServerTaskToBuildCommit = n - } + fk := *nodes[i].server_task_server_task_to_build_commit + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) } + nodeids[fk] = append(nodeids[fk], nodes[i]) } - - if query := stq.withServerTaskToGinFileMiddleware; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*ServerTask) + if len(ids) == 0 { + return nil + } + query.Where(buildcommit.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "server_task_server_task_to_build_commit" returned %v`, n.ID) + } for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.ServerTaskToGinFileMiddleware = []*GinFileMiddleware{} + assign(nodes[i], n) } - query.withFKs = true - query.Where(predicate.GinFileMiddleware(func(s *sql.Selector) { - s.Where(sql.InValues(servertask.ServerTaskToGinFileMiddlewareColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + } + return nil +} +func (stq *ServerTaskQuery) loadServerTaskToGinFileMiddleware(ctx context.Context, query *GinFileMiddlewareQuery, nodes []*ServerTask, init func(*ServerTask), assign func(*ServerTask, *GinFileMiddleware)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*ServerTask) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.GinFileMiddleware(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(servertask.ServerTaskToGinFileMiddlewareColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.server_task_server_task_to_gin_file_middleware + if fk == nil { + return fmt.Errorf(`foreign-key "server_task_server_task_to_gin_file_middleware" is nil for node %v`, n.ID) } - for _, n := range neighbors { - fk := n.server_task_server_task_to_gin_file_middleware - if fk == nil { - return nil, fmt.Errorf(`foreign-key "server_task_server_task_to_gin_file_middleware" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "server_task_server_task_to_gin_file_middleware" returned %v for node %v`, *fk, n.ID) - } - node.Edges.ServerTaskToGinFileMiddleware = append(node.Edges.ServerTaskToGinFileMiddleware, n) + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "server_task_server_task_to_gin_file_middleware" returned %v for node %v`, *fk, n.ID) } + assign(node, n) } - - return nodes, nil + return nil } func (stq *ServerTaskQuery) sqlCount(ctx context.Context) (int, error) { _spec := stq.querySpec() - _spec.Node.Columns = stq.fields - if len(stq.fields) > 0 { - _spec.Unique = stq.unique != nil && *stq.unique + if len(stq.modifiers) > 0 { + _spec.Modifiers = stq.modifiers } - return sqlgraph.CountNodes(ctx, stq.driver, _spec) -} - -func (stq *ServerTaskQuery) sqlExist(ctx context.Context) (bool, error) { - n, err := stq.sqlCount(ctx) - if err != nil { - return false, fmt.Errorf("ent: check existence: %w", err) + _spec.Node.Columns = stq.ctx.Fields + if len(stq.ctx.Fields) > 0 { + _spec.Unique = stq.ctx.Unique != nil && *stq.ctx.Unique } - return n > 0, nil + return sqlgraph.CountNodes(ctx, stq.driver, _spec) } func (stq *ServerTaskQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: servertask.Table, - Columns: servertask.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: servertask.FieldID, - }, - }, - From: stq.sql, - Unique: true, - } - if unique := stq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(servertask.Table, servertask.Columns, sqlgraph.NewFieldSpec(servertask.FieldID, field.TypeUUID)) + _spec.From = stq.sql + if unique := stq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if stq.path != nil { + _spec.Unique = true } - if fields := stq.fields; len(fields) > 0 { + if fields := stq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, servertask.FieldID) for i := range fields { @@ -794,10 +870,10 @@ func (stq *ServerTaskQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := stq.limit; limit != nil { + if limit := stq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := stq.offset; offset != nil { + if offset := stq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := stq.order; len(ps) > 0 { @@ -813,7 +889,7 @@ func (stq *ServerTaskQuery) querySpec() *sqlgraph.QuerySpec { func (stq *ServerTaskQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(stq.driver.Dialect()) t1 := builder.Table(servertask.Table) - columns := stq.fields + columns := stq.ctx.Fields if len(columns) == 0 { columns = servertask.Columns } @@ -822,7 +898,7 @@ func (stq *ServerTaskQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = stq.sql selector.Select(selector.Columns(columns...)...) } - if stq.unique != nil && *stq.unique { + if stq.ctx.Unique != nil && *stq.ctx.Unique { selector.Distinct() } for _, p := range stq.predicates { @@ -831,25 +907,35 @@ func (stq *ServerTaskQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range stq.order { p(selector) } - if offset := stq.offset; offset != nil { + if offset := stq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := stq.limit; limit != nil { + if limit := stq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector } +// WithNamedServerTaskToGinFileMiddleware tells the query-builder to eager-load the nodes that are connected to the "ServerTaskToGinFileMiddleware" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (stq *ServerTaskQuery) WithNamedServerTaskToGinFileMiddleware(name string, opts ...func(*GinFileMiddlewareQuery)) *ServerTaskQuery { + query := (&GinFileMiddlewareClient{config: stq.config}).Query() + for _, opt := range opts { + opt(query) + } + if stq.withNamedServerTaskToGinFileMiddleware == nil { + stq.withNamedServerTaskToGinFileMiddleware = make(map[string]*GinFileMiddlewareQuery) + } + stq.withNamedServerTaskToGinFileMiddleware[name] = query + return stq +} + // ServerTaskGroupBy is the group-by builder for ServerTask entities. type ServerTaskGroupBy struct { - config - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) + selector + build *ServerTaskQuery } // Aggregate adds the given aggregation functions to the group-by query. @@ -858,471 +944,77 @@ func (stgb *ServerTaskGroupBy) Aggregate(fns ...AggregateFunc) *ServerTaskGroupB return stgb } -// Scan applies the group-by query and scans the result into the given value. -func (stgb *ServerTaskGroupBy) Scan(ctx context.Context, v interface{}) error { - query, err := stgb.path(ctx) - if err != nil { +// Scan applies the selector query and scans the result into the given value. +func (stgb *ServerTaskGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, stgb.build.ctx, "GroupBy") + if err := stgb.build.prepareQuery(ctx); err != nil { return err } - stgb.sql = query - return stgb.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (stgb *ServerTaskGroupBy) ScanX(ctx context.Context, v interface{}) { - if err := stgb.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from group-by. -// It is only allowed when executing a group-by query with one field. -func (stgb *ServerTaskGroupBy) Strings(ctx context.Context) ([]string, error) { - if len(stgb.fields) > 1 { - return nil, errors.New("ent: ServerTaskGroupBy.Strings is not achievable when grouping more than 1 field") - } - var v []string - if err := stgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (stgb *ServerTaskGroupBy) StringsX(ctx context.Context) []string { - v, err := stgb.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (stgb *ServerTaskGroupBy) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = stgb.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{servertask.Label} - default: - err = fmt.Errorf("ent: ServerTaskGroupBy.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (stgb *ServerTaskGroupBy) StringX(ctx context.Context) string { - v, err := stgb.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from group-by. -// It is only allowed when executing a group-by query with one field. -func (stgb *ServerTaskGroupBy) Ints(ctx context.Context) ([]int, error) { - if len(stgb.fields) > 1 { - return nil, errors.New("ent: ServerTaskGroupBy.Ints is not achievable when grouping more than 1 field") - } - var v []int - if err := stgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil + return scanWithInterceptors[*ServerTaskQuery, *ServerTaskGroupBy](ctx, stgb.build, stgb, stgb.build.inters, v) } -// IntsX is like Ints, but panics if an error occurs. -func (stgb *ServerTaskGroupBy) IntsX(ctx context.Context) []int { - v, err := stgb.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (stgb *ServerTaskGroupBy) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = stgb.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{servertask.Label} - default: - err = fmt.Errorf("ent: ServerTaskGroupBy.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (stgb *ServerTaskGroupBy) IntX(ctx context.Context) int { - v, err := stgb.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from group-by. -// It is only allowed when executing a group-by query with one field. -func (stgb *ServerTaskGroupBy) Float64s(ctx context.Context) ([]float64, error) { - if len(stgb.fields) > 1 { - return nil, errors.New("ent: ServerTaskGroupBy.Float64s is not achievable when grouping more than 1 field") - } - var v []float64 - if err := stgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (stgb *ServerTaskGroupBy) Float64sX(ctx context.Context) []float64 { - v, err := stgb.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (stgb *ServerTaskGroupBy) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = stgb.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{servertask.Label} - default: - err = fmt.Errorf("ent: ServerTaskGroupBy.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (stgb *ServerTaskGroupBy) Float64X(ctx context.Context) float64 { - v, err := stgb.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from group-by. -// It is only allowed when executing a group-by query with one field. -func (stgb *ServerTaskGroupBy) Bools(ctx context.Context) ([]bool, error) { - if len(stgb.fields) > 1 { - return nil, errors.New("ent: ServerTaskGroupBy.Bools is not achievable when grouping more than 1 field") - } - var v []bool - if err := stgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (stgb *ServerTaskGroupBy) BoolsX(ctx context.Context) []bool { - v, err := stgb.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (stgb *ServerTaskGroupBy) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = stgb.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{servertask.Label} - default: - err = fmt.Errorf("ent: ServerTaskGroupBy.Bools returned %d results when one was expected", len(v)) - } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (stgb *ServerTaskGroupBy) BoolX(ctx context.Context) bool { - v, err := stgb.Bool(ctx) - if err != nil { - panic(err) +func (stgb *ServerTaskGroupBy) sqlScan(ctx context.Context, root *ServerTaskQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(stgb.fns)) + for _, fn := range stgb.fns { + aggregation = append(aggregation, fn(selector)) } - return v -} - -func (stgb *ServerTaskGroupBy) sqlScan(ctx context.Context, v interface{}) error { - for _, f := range stgb.fields { - if !servertask.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*stgb.flds)+len(stgb.fns)) + for _, f := range *stgb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := stgb.sqlQuery() + selector.GroupBy(selector.Columns(*stgb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := stgb.driver.Query(ctx, query, args, rows); err != nil { + if err := stgb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (stgb *ServerTaskGroupBy) sqlQuery() *sql.Selector { - selector := stgb.sql.Select() - aggregation := make([]string, 0, len(stgb.fns)) - for _, fn := range stgb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(stgb.fields)+len(stgb.fns)) - for _, f := range stgb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(stgb.fields...)...) -} - // ServerTaskSelect is the builder for selecting fields of ServerTask entities. type ServerTaskSelect struct { *ServerTaskQuery - // intermediate query (i.e. traversal path). - sql *sql.Selector + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (sts *ServerTaskSelect) Aggregate(fns ...AggregateFunc) *ServerTaskSelect { + sts.fns = append(sts.fns, fns...) + return sts } // Scan applies the selector query and scans the result into the given value. -func (sts *ServerTaskSelect) Scan(ctx context.Context, v interface{}) error { +func (sts *ServerTaskSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, sts.ctx, "Select") if err := sts.prepareQuery(ctx); err != nil { return err } - sts.sql = sts.ServerTaskQuery.sqlQuery(ctx) - return sts.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (sts *ServerTaskSelect) ScanX(ctx context.Context, v interface{}) { - if err := sts.Scan(ctx, v); err != nil { - panic(err) - } + return scanWithInterceptors[*ServerTaskQuery, *ServerTaskSelect](ctx, sts.ServerTaskQuery, sts, sts.inters, v) } -// Strings returns list of strings from a selector. It is only allowed when selecting one field. -func (sts *ServerTaskSelect) Strings(ctx context.Context) ([]string, error) { - if len(sts.fields) > 1 { - return nil, errors.New("ent: ServerTaskSelect.Strings is not achievable when selecting more than 1 field") - } - var v []string - if err := sts.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (sts *ServerTaskSelect) StringsX(ctx context.Context) []string { - v, err := sts.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a selector. It is only allowed when selecting one field. -func (sts *ServerTaskSelect) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = sts.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{servertask.Label} - default: - err = fmt.Errorf("ent: ServerTaskSelect.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (sts *ServerTaskSelect) StringX(ctx context.Context) string { - v, err := sts.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from a selector. It is only allowed when selecting one field. -func (sts *ServerTaskSelect) Ints(ctx context.Context) ([]int, error) { - if len(sts.fields) > 1 { - return nil, errors.New("ent: ServerTaskSelect.Ints is not achievable when selecting more than 1 field") - } - var v []int - if err := sts.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (sts *ServerTaskSelect) IntsX(ctx context.Context) []int { - v, err := sts.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a selector. It is only allowed when selecting one field. -func (sts *ServerTaskSelect) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = sts.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{servertask.Label} - default: - err = fmt.Errorf("ent: ServerTaskSelect.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (sts *ServerTaskSelect) IntX(ctx context.Context) int { - v, err := sts.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. -func (sts *ServerTaskSelect) Float64s(ctx context.Context) ([]float64, error) { - if len(sts.fields) > 1 { - return nil, errors.New("ent: ServerTaskSelect.Float64s is not achievable when selecting more than 1 field") - } - var v []float64 - if err := sts.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (sts *ServerTaskSelect) Float64sX(ctx context.Context) []float64 { - v, err := sts.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. -func (sts *ServerTaskSelect) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = sts.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{servertask.Label} - default: - err = fmt.Errorf("ent: ServerTaskSelect.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (sts *ServerTaskSelect) Float64X(ctx context.Context) float64 { - v, err := sts.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from a selector. It is only allowed when selecting one field. -func (sts *ServerTaskSelect) Bools(ctx context.Context) ([]bool, error) { - if len(sts.fields) > 1 { - return nil, errors.New("ent: ServerTaskSelect.Bools is not achievable when selecting more than 1 field") - } - var v []bool - if err := sts.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (sts *ServerTaskSelect) BoolsX(ctx context.Context) []bool { - v, err := sts.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a selector. It is only allowed when selecting one field. -func (sts *ServerTaskSelect) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = sts.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{servertask.Label} - default: - err = fmt.Errorf("ent: ServerTaskSelect.Bools returned %d results when one was expected", len(v)) +func (sts *ServerTaskSelect) sqlScan(ctx context.Context, root *ServerTaskQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(sts.fns)) + for _, fn := range sts.fns { + aggregation = append(aggregation, fn(selector)) } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (sts *ServerTaskSelect) BoolX(ctx context.Context) bool { - v, err := sts.Bool(ctx) - if err != nil { - panic(err) + switch n := len(*sts.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) } - return v -} - -func (sts *ServerTaskSelect) sqlScan(ctx context.Context, v interface{}) error { rows := &sql.Rows{} - query, args := sts.sql.Query() + query, args := selector.Query() if err := sts.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/ent/servertask_update.go b/ent/servertask_update.go index 7e23741c..8e48e152 100755 --- a/ent/servertask_update.go +++ b/ent/servertask_update.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -10,6 +10,7 @@ import ( "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/dialect/sql/sqljson" "entgo.io/ent/schema/field" "github.com/gen0cide/laforge/ent/authuser" "github.com/gen0cide/laforge/ent/build" @@ -41,6 +42,14 @@ func (stu *ServerTaskUpdate) SetType(s servertask.Type) *ServerTaskUpdate { return stu } +// SetNillableType sets the "type" field if the given value is not nil. +func (stu *ServerTaskUpdate) SetNillableType(s *servertask.Type) *ServerTaskUpdate { + if s != nil { + stu.SetType(*s) + } + return stu +} + // SetStartTime sets the "start_time" field. func (stu *ServerTaskUpdate) SetStartTime(t time.Time) *ServerTaskUpdate { stu.mutation.SetStartTime(t) @@ -87,6 +96,12 @@ func (stu *ServerTaskUpdate) SetErrors(s []string) *ServerTaskUpdate { return stu } +// AppendErrors appends s to the "errors" field. +func (stu *ServerTaskUpdate) AppendErrors(s []string) *ServerTaskUpdate { + stu.mutation.AppendErrors(s) + return stu +} + // ClearErrors clears the value of the "errors" field. func (stu *ServerTaskUpdate) ClearErrors() *ServerTaskUpdate { stu.mutation.ClearErrors() @@ -265,40 +280,7 @@ func (stu *ServerTaskUpdate) RemoveServerTaskToGinFileMiddleware(g ...*GinFileMi // Save executes the query and returns the number of nodes affected by the update operation. func (stu *ServerTaskUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(stu.hooks) == 0 { - if err = stu.check(); err != nil { - return 0, err - } - affected, err = stu.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*ServerTaskMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = stu.check(); err != nil { - return 0, err - } - stu.mutation = mutation - affected, err = stu.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(stu.hooks) - 1; i >= 0; i-- { - if stu.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = stu.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, stu.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, stu.sqlSave, stu.mutation, stu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -340,16 +322,10 @@ func (stu *ServerTaskUpdate) check() error { } func (stu *ServerTaskUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: servertask.Table, - Columns: servertask.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: servertask.FieldID, - }, - }, + if err := stu.check(); err != nil { + return n, err } + _spec := sqlgraph.NewUpdateSpec(servertask.Table, servertask.Columns, sqlgraph.NewFieldSpec(servertask.FieldID, field.TypeUUID)) if ps := stu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -358,63 +334,36 @@ func (stu *ServerTaskUpdate) sqlSave(ctx context.Context) (n int, err error) { } } if value, ok := stu.mutation.GetType(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: servertask.FieldType, - }) + _spec.SetField(servertask.FieldType, field.TypeEnum, value) } if value, ok := stu.mutation.StartTime(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: servertask.FieldStartTime, - }) + _spec.SetField(servertask.FieldStartTime, field.TypeTime, value) } if stu.mutation.StartTimeCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: servertask.FieldStartTime, - }) + _spec.ClearField(servertask.FieldStartTime, field.TypeTime) } if value, ok := stu.mutation.EndTime(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: servertask.FieldEndTime, - }) + _spec.SetField(servertask.FieldEndTime, field.TypeTime, value) } if stu.mutation.EndTimeCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: servertask.FieldEndTime, - }) + _spec.ClearField(servertask.FieldEndTime, field.TypeTime) } if value, ok := stu.mutation.Errors(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: servertask.FieldErrors, + _spec.SetField(servertask.FieldErrors, field.TypeJSON, value) + } + if value, ok := stu.mutation.AppendedErrors(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, servertask.FieldErrors, value) }) } if stu.mutation.ErrorsCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Column: servertask.FieldErrors, - }) + _spec.ClearField(servertask.FieldErrors, field.TypeJSON) } if value, ok := stu.mutation.LogFilePath(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: servertask.FieldLogFilePath, - }) + _spec.SetField(servertask.FieldLogFilePath, field.TypeString, value) } if stu.mutation.LogFilePathCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: servertask.FieldLogFilePath, - }) + _spec.ClearField(servertask.FieldLogFilePath, field.TypeString) } if stu.mutation.ServerTaskToAuthUserCleared() { edge := &sqlgraph.EdgeSpec{ @@ -424,10 +373,7 @@ func (stu *ServerTaskUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{servertask.ServerTaskToAuthUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: authuser.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(authuser.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -440,10 +386,7 @@ func (stu *ServerTaskUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{servertask.ServerTaskToAuthUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: authuser.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(authuser.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -459,10 +402,7 @@ func (stu *ServerTaskUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{servertask.ServerTaskToStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -475,10 +415,7 @@ func (stu *ServerTaskUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{servertask.ServerTaskToStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -494,10 +431,7 @@ func (stu *ServerTaskUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{servertask.ServerTaskToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -510,10 +444,7 @@ func (stu *ServerTaskUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{servertask.ServerTaskToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -529,10 +460,7 @@ func (stu *ServerTaskUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{servertask.ServerTaskToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -545,10 +473,7 @@ func (stu *ServerTaskUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{servertask.ServerTaskToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -564,10 +489,7 @@ func (stu *ServerTaskUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{servertask.ServerTaskToBuildCommitColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: buildcommit.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(buildcommit.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -580,10 +502,7 @@ func (stu *ServerTaskUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{servertask.ServerTaskToBuildCommitColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: buildcommit.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(buildcommit.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -599,10 +518,7 @@ func (stu *ServerTaskUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{servertask.ServerTaskToGinFileMiddlewareColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: ginfilemiddleware.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(ginfilemiddleware.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -615,10 +531,7 @@ func (stu *ServerTaskUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{servertask.ServerTaskToGinFileMiddlewareColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: ginfilemiddleware.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(ginfilemiddleware.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -634,10 +547,7 @@ func (stu *ServerTaskUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{servertask.ServerTaskToGinFileMiddlewareColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: ginfilemiddleware.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(ginfilemiddleware.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -649,10 +559,11 @@ func (stu *ServerTaskUpdate) sqlSave(ctx context.Context) (n int, err error) { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{servertask.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return 0, err } + stu.mutation.done = true return n, nil } @@ -670,6 +581,14 @@ func (stuo *ServerTaskUpdateOne) SetType(s servertask.Type) *ServerTaskUpdateOne return stuo } +// SetNillableType sets the "type" field if the given value is not nil. +func (stuo *ServerTaskUpdateOne) SetNillableType(s *servertask.Type) *ServerTaskUpdateOne { + if s != nil { + stuo.SetType(*s) + } + return stuo +} + // SetStartTime sets the "start_time" field. func (stuo *ServerTaskUpdateOne) SetStartTime(t time.Time) *ServerTaskUpdateOne { stuo.mutation.SetStartTime(t) @@ -716,6 +635,12 @@ func (stuo *ServerTaskUpdateOne) SetErrors(s []string) *ServerTaskUpdateOne { return stuo } +// AppendErrors appends s to the "errors" field. +func (stuo *ServerTaskUpdateOne) AppendErrors(s []string) *ServerTaskUpdateOne { + stuo.mutation.AppendErrors(s) + return stuo +} + // ClearErrors clears the value of the "errors" field. func (stuo *ServerTaskUpdateOne) ClearErrors() *ServerTaskUpdateOne { stuo.mutation.ClearErrors() @@ -892,6 +817,12 @@ func (stuo *ServerTaskUpdateOne) RemoveServerTaskToGinFileMiddleware(g ...*GinFi return stuo.RemoveServerTaskToGinFileMiddlewareIDs(ids...) } +// Where appends a list predicates to the ServerTaskUpdate builder. +func (stuo *ServerTaskUpdateOne) Where(ps ...predicate.ServerTask) *ServerTaskUpdateOne { + stuo.mutation.Where(ps...) + return stuo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (stuo *ServerTaskUpdateOne) Select(field string, fields ...string) *ServerTaskUpdateOne { @@ -901,40 +832,7 @@ func (stuo *ServerTaskUpdateOne) Select(field string, fields ...string) *ServerT // Save executes the query and returns the updated ServerTask entity. func (stuo *ServerTaskUpdateOne) Save(ctx context.Context) (*ServerTask, error) { - var ( - err error - node *ServerTask - ) - if len(stuo.hooks) == 0 { - if err = stuo.check(); err != nil { - return nil, err - } - node, err = stuo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*ServerTaskMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = stuo.check(); err != nil { - return nil, err - } - stuo.mutation = mutation - node, err = stuo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(stuo.hooks) - 1; i >= 0; i-- { - if stuo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = stuo.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, stuo.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, stuo.sqlSave, stuo.mutation, stuo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -976,16 +874,10 @@ func (stuo *ServerTaskUpdateOne) check() error { } func (stuo *ServerTaskUpdateOne) sqlSave(ctx context.Context) (_node *ServerTask, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: servertask.Table, - Columns: servertask.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: servertask.FieldID, - }, - }, + if err := stuo.check(); err != nil { + return _node, err } + _spec := sqlgraph.NewUpdateSpec(servertask.Table, servertask.Columns, sqlgraph.NewFieldSpec(servertask.FieldID, field.TypeUUID)) id, ok := stuo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "ServerTask.id" for update`)} @@ -1011,63 +903,36 @@ func (stuo *ServerTaskUpdateOne) sqlSave(ctx context.Context) (_node *ServerTask } } if value, ok := stuo.mutation.GetType(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: servertask.FieldType, - }) + _spec.SetField(servertask.FieldType, field.TypeEnum, value) } if value, ok := stuo.mutation.StartTime(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: servertask.FieldStartTime, - }) + _spec.SetField(servertask.FieldStartTime, field.TypeTime, value) } if stuo.mutation.StartTimeCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: servertask.FieldStartTime, - }) + _spec.ClearField(servertask.FieldStartTime, field.TypeTime) } if value, ok := stuo.mutation.EndTime(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: servertask.FieldEndTime, - }) + _spec.SetField(servertask.FieldEndTime, field.TypeTime, value) } if stuo.mutation.EndTimeCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: servertask.FieldEndTime, - }) + _spec.ClearField(servertask.FieldEndTime, field.TypeTime) } if value, ok := stuo.mutation.Errors(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: servertask.FieldErrors, + _spec.SetField(servertask.FieldErrors, field.TypeJSON, value) + } + if value, ok := stuo.mutation.AppendedErrors(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, servertask.FieldErrors, value) }) } if stuo.mutation.ErrorsCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Column: servertask.FieldErrors, - }) + _spec.ClearField(servertask.FieldErrors, field.TypeJSON) } if value, ok := stuo.mutation.LogFilePath(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: servertask.FieldLogFilePath, - }) + _spec.SetField(servertask.FieldLogFilePath, field.TypeString, value) } if stuo.mutation.LogFilePathCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: servertask.FieldLogFilePath, - }) + _spec.ClearField(servertask.FieldLogFilePath, field.TypeString) } if stuo.mutation.ServerTaskToAuthUserCleared() { edge := &sqlgraph.EdgeSpec{ @@ -1077,10 +942,7 @@ func (stuo *ServerTaskUpdateOne) sqlSave(ctx context.Context) (_node *ServerTask Columns: []string{servertask.ServerTaskToAuthUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: authuser.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(authuser.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1093,10 +955,7 @@ func (stuo *ServerTaskUpdateOne) sqlSave(ctx context.Context) (_node *ServerTask Columns: []string{servertask.ServerTaskToAuthUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: authuser.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(authuser.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1112,10 +971,7 @@ func (stuo *ServerTaskUpdateOne) sqlSave(ctx context.Context) (_node *ServerTask Columns: []string{servertask.ServerTaskToStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1128,10 +984,7 @@ func (stuo *ServerTaskUpdateOne) sqlSave(ctx context.Context) (_node *ServerTask Columns: []string{servertask.ServerTaskToStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1147,10 +1000,7 @@ func (stuo *ServerTaskUpdateOne) sqlSave(ctx context.Context) (_node *ServerTask Columns: []string{servertask.ServerTaskToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1163,10 +1013,7 @@ func (stuo *ServerTaskUpdateOne) sqlSave(ctx context.Context) (_node *ServerTask Columns: []string{servertask.ServerTaskToEnvironmentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1182,10 +1029,7 @@ func (stuo *ServerTaskUpdateOne) sqlSave(ctx context.Context) (_node *ServerTask Columns: []string{servertask.ServerTaskToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1198,10 +1042,7 @@ func (stuo *ServerTaskUpdateOne) sqlSave(ctx context.Context) (_node *ServerTask Columns: []string{servertask.ServerTaskToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1217,10 +1058,7 @@ func (stuo *ServerTaskUpdateOne) sqlSave(ctx context.Context) (_node *ServerTask Columns: []string{servertask.ServerTaskToBuildCommitColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: buildcommit.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(buildcommit.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1233,10 +1071,7 @@ func (stuo *ServerTaskUpdateOne) sqlSave(ctx context.Context) (_node *ServerTask Columns: []string{servertask.ServerTaskToBuildCommitColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: buildcommit.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(buildcommit.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1252,10 +1087,7 @@ func (stuo *ServerTaskUpdateOne) sqlSave(ctx context.Context) (_node *ServerTask Columns: []string{servertask.ServerTaskToGinFileMiddlewareColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: ginfilemiddleware.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(ginfilemiddleware.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1268,10 +1100,7 @@ func (stuo *ServerTaskUpdateOne) sqlSave(ctx context.Context) (_node *ServerTask Columns: []string{servertask.ServerTaskToGinFileMiddlewareColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: ginfilemiddleware.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(ginfilemiddleware.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1287,10 +1116,7 @@ func (stuo *ServerTaskUpdateOne) sqlSave(ctx context.Context) (_node *ServerTask Columns: []string{servertask.ServerTaskToGinFileMiddlewareColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: ginfilemiddleware.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(ginfilemiddleware.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1305,9 +1131,10 @@ func (stuo *ServerTaskUpdateOne) sqlSave(ctx context.Context) (_node *ServerTask if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{servertask.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } + stuo.mutation.done = true return _node, nil } diff --git a/ent/status.go b/ent/status.go index 7d5c0f5d..d3543402 100755 --- a/ent/status.go +++ b/ent/status.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -7,6 +7,7 @@ import ( "strings" "time" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/gen0cide/laforge/ent/adhocplan" "github.com/gen0cide/laforge/ent/build" @@ -43,6 +44,7 @@ type Status struct { // The values are being populated by the StatusQuery when eager-loading is set. Edges StatusEdges `json:"edges"` + // vvvvvvvvvvvv CUSTOM vvvvvvvvvvvv // Edges put into the main struct to be loaded via hcl // StatusToBuild holds the value of the StatusToBuild edge. HCLStatusToBuild *Build `json:"StatusToBuild,omitempty"` @@ -60,7 +62,7 @@ type Status struct { HCLStatusToServerTask *ServerTask `json:"StatusToServerTask,omitempty"` // StatusToAdhocPlan holds the value of the StatusToAdhocPlan edge. HCLStatusToAdhocPlan *AdhocPlan `json:"StatusToAdhocPlan,omitempty"` - // + // ^^^^^^^^^^^^ CUSTOM ^^^^^^^^^^^^^ adhoc_plan_adhoc_plan_to_status *uuid.UUID build_build_to_status *uuid.UUID plan_plan_to_status *uuid.UUID @@ -69,6 +71,7 @@ type Status struct { provisioning_step_provisioning_step_to_status *uuid.UUID server_task_server_task_to_status *uuid.UUID team_team_to_status *uuid.UUID + selectValues sql.SelectValues } // StatusEdges holds the relations/edges for other nodes in the graph. @@ -92,6 +95,8 @@ type StatusEdges struct { // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. loadedTypes [8]bool + // totalCount holds the count of the edges above. + totalCount [8]map[string]int } // StatusToBuildOrErr returns the StatusToBuild value or an error if the edge @@ -99,8 +104,7 @@ type StatusEdges struct { func (e StatusEdges) StatusToBuildOrErr() (*Build, error) { if e.loadedTypes[0] { if e.StatusToBuild == nil { - // The edge StatusToBuild was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: build.Label} } return e.StatusToBuild, nil @@ -113,8 +117,7 @@ func (e StatusEdges) StatusToBuildOrErr() (*Build, error) { func (e StatusEdges) StatusToProvisionedNetworkOrErr() (*ProvisionedNetwork, error) { if e.loadedTypes[1] { if e.StatusToProvisionedNetwork == nil { - // The edge StatusToProvisionedNetwork was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: provisionednetwork.Label} } return e.StatusToProvisionedNetwork, nil @@ -127,8 +130,7 @@ func (e StatusEdges) StatusToProvisionedNetworkOrErr() (*ProvisionedNetwork, err func (e StatusEdges) StatusToProvisionedHostOrErr() (*ProvisionedHost, error) { if e.loadedTypes[2] { if e.StatusToProvisionedHost == nil { - // The edge StatusToProvisionedHost was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: provisionedhost.Label} } return e.StatusToProvisionedHost, nil @@ -141,8 +143,7 @@ func (e StatusEdges) StatusToProvisionedHostOrErr() (*ProvisionedHost, error) { func (e StatusEdges) StatusToProvisioningStepOrErr() (*ProvisioningStep, error) { if e.loadedTypes[3] { if e.StatusToProvisioningStep == nil { - // The edge StatusToProvisioningStep was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: provisioningstep.Label} } return e.StatusToProvisioningStep, nil @@ -155,8 +156,7 @@ func (e StatusEdges) StatusToProvisioningStepOrErr() (*ProvisioningStep, error) func (e StatusEdges) StatusToTeamOrErr() (*Team, error) { if e.loadedTypes[4] { if e.StatusToTeam == nil { - // The edge StatusToTeam was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: team.Label} } return e.StatusToTeam, nil @@ -169,8 +169,7 @@ func (e StatusEdges) StatusToTeamOrErr() (*Team, error) { func (e StatusEdges) StatusToPlanOrErr() (*Plan, error) { if e.loadedTypes[5] { if e.StatusToPlan == nil { - // The edge StatusToPlan was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: plan.Label} } return e.StatusToPlan, nil @@ -183,8 +182,7 @@ func (e StatusEdges) StatusToPlanOrErr() (*Plan, error) { func (e StatusEdges) StatusToServerTaskOrErr() (*ServerTask, error) { if e.loadedTypes[6] { if e.StatusToServerTask == nil { - // The edge StatusToServerTask was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: servertask.Label} } return e.StatusToServerTask, nil @@ -197,8 +195,7 @@ func (e StatusEdges) StatusToServerTaskOrErr() (*ServerTask, error) { func (e StatusEdges) StatusToAdhocPlanOrErr() (*AdhocPlan, error) { if e.loadedTypes[7] { if e.StatusToAdhocPlan == nil { - // The edge StatusToAdhocPlan was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: adhocplan.Label} } return e.StatusToAdhocPlan, nil @@ -207,8 +204,8 @@ func (e StatusEdges) StatusToAdhocPlanOrErr() (*AdhocPlan, error) { } // scanValues returns the types for scanning values from sql.Rows. -func (*Status) scanValues(columns []string) ([]interface{}, error) { - values := make([]interface{}, len(columns)) +func (*Status) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) for i := range columns { switch columns[i] { case status.FieldFailed, status.FieldCompleted: @@ -236,7 +233,7 @@ func (*Status) scanValues(columns []string) ([]interface{}, error) { case status.ForeignKeys[7]: // team_team_to_status values[i] = &sql.NullScanner{S: new(uuid.UUID)} default: - return nil, fmt.Errorf("unexpected column %q for type Status", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -244,7 +241,7 @@ func (*Status) scanValues(columns []string) ([]interface{}, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the Status fields. -func (s *Status) assignValues(columns []string, values []interface{}) error { +func (s *Status) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -354,66 +351,74 @@ func (s *Status) assignValues(columns []string, values []interface{}) error { s.team_team_to_status = new(uuid.UUID) *s.team_team_to_status = *value.S.(*uuid.UUID) } + default: + s.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the Status. +// This includes values selected through modifiers, order, etc. +func (s *Status) Value(name string) (ent.Value, error) { + return s.selectValues.Get(name) +} + // QueryStatusToBuild queries the "StatusToBuild" edge of the Status entity. func (s *Status) QueryStatusToBuild() *BuildQuery { - return (&StatusClient{config: s.config}).QueryStatusToBuild(s) + return NewStatusClient(s.config).QueryStatusToBuild(s) } // QueryStatusToProvisionedNetwork queries the "StatusToProvisionedNetwork" edge of the Status entity. func (s *Status) QueryStatusToProvisionedNetwork() *ProvisionedNetworkQuery { - return (&StatusClient{config: s.config}).QueryStatusToProvisionedNetwork(s) + return NewStatusClient(s.config).QueryStatusToProvisionedNetwork(s) } // QueryStatusToProvisionedHost queries the "StatusToProvisionedHost" edge of the Status entity. func (s *Status) QueryStatusToProvisionedHost() *ProvisionedHostQuery { - return (&StatusClient{config: s.config}).QueryStatusToProvisionedHost(s) + return NewStatusClient(s.config).QueryStatusToProvisionedHost(s) } // QueryStatusToProvisioningStep queries the "StatusToProvisioningStep" edge of the Status entity. func (s *Status) QueryStatusToProvisioningStep() *ProvisioningStepQuery { - return (&StatusClient{config: s.config}).QueryStatusToProvisioningStep(s) + return NewStatusClient(s.config).QueryStatusToProvisioningStep(s) } // QueryStatusToTeam queries the "StatusToTeam" edge of the Status entity. func (s *Status) QueryStatusToTeam() *TeamQuery { - return (&StatusClient{config: s.config}).QueryStatusToTeam(s) + return NewStatusClient(s.config).QueryStatusToTeam(s) } // QueryStatusToPlan queries the "StatusToPlan" edge of the Status entity. func (s *Status) QueryStatusToPlan() *PlanQuery { - return (&StatusClient{config: s.config}).QueryStatusToPlan(s) + return NewStatusClient(s.config).QueryStatusToPlan(s) } // QueryStatusToServerTask queries the "StatusToServerTask" edge of the Status entity. func (s *Status) QueryStatusToServerTask() *ServerTaskQuery { - return (&StatusClient{config: s.config}).QueryStatusToServerTask(s) + return NewStatusClient(s.config).QueryStatusToServerTask(s) } // QueryStatusToAdhocPlan queries the "StatusToAdhocPlan" edge of the Status entity. func (s *Status) QueryStatusToAdhocPlan() *AdhocPlanQuery { - return (&StatusClient{config: s.config}).QueryStatusToAdhocPlan(s) + return NewStatusClient(s.config).QueryStatusToAdhocPlan(s) } // Update returns a builder for updating this Status. // Note that you need to call Status.Unwrap() before calling this method if this Status // was returned from a transaction, and the transaction was committed or rolled back. func (s *Status) Update() *StatusUpdateOne { - return (&StatusClient{config: s.config}).UpdateOne(s) + return NewStatusClient(s.config).UpdateOne(s) } // Unwrap unwraps the Status entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. func (s *Status) Unwrap() *Status { - tx, ok := s.config.driver.(*txDriver) + _tx, ok := s.config.driver.(*txDriver) if !ok { panic("ent: Status is not a transactional entity") } - s.config.driver = tx.drv + s.config.driver = _tx.drv return s } @@ -421,20 +426,26 @@ func (s *Status) Unwrap() *Status { func (s *Status) String() string { var builder strings.Builder builder.WriteString("Status(") - builder.WriteString(fmt.Sprintf("id=%v", s.ID)) - builder.WriteString(", state=") + builder.WriteString(fmt.Sprintf("id=%v, ", s.ID)) + builder.WriteString("state=") builder.WriteString(fmt.Sprintf("%v", s.State)) - builder.WriteString(", status_for=") + builder.WriteString(", ") + builder.WriteString("status_for=") builder.WriteString(fmt.Sprintf("%v", s.StatusFor)) - builder.WriteString(", started_at=") + builder.WriteString(", ") + builder.WriteString("started_at=") builder.WriteString(s.StartedAt.Format(time.ANSIC)) - builder.WriteString(", ended_at=") + builder.WriteString(", ") + builder.WriteString("ended_at=") builder.WriteString(s.EndedAt.Format(time.ANSIC)) - builder.WriteString(", failed=") + builder.WriteString(", ") + builder.WriteString("failed=") builder.WriteString(fmt.Sprintf("%v", s.Failed)) - builder.WriteString(", completed=") + builder.WriteString(", ") + builder.WriteString("completed=") builder.WriteString(fmt.Sprintf("%v", s.Completed)) - builder.WriteString(", error=") + builder.WriteString(", ") + builder.WriteString("error=") builder.WriteString(s.Error) builder.WriteByte(')') return builder.String() @@ -442,9 +453,3 @@ func (s *Status) String() string { // StatusSlice is a parsable slice of Status. type StatusSlice []*Status - -func (s StatusSlice) config(cfg config) { - for _i := range s { - s[_i].config = cfg - } -} diff --git a/ent/status/status.go b/ent/status/status.go index f5393b17..54e72f9f 100755 --- a/ent/status/status.go +++ b/ent/status/status.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package status @@ -7,6 +7,8 @@ import ( "io" "strconv" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -215,37 +217,192 @@ func StatusForValidator(sf StatusFor) error { } } +// OrderOption defines the ordering options for the Status queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByState orders the results by the state field. +func ByState(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldState, opts...).ToFunc() +} + +// ByStatusFor orders the results by the status_for field. +func ByStatusFor(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStatusFor, opts...).ToFunc() +} + +// ByStartedAt orders the results by the started_at field. +func ByStartedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStartedAt, opts...).ToFunc() +} + +// ByEndedAt orders the results by the ended_at field. +func ByEndedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldEndedAt, opts...).ToFunc() +} + +// ByFailed orders the results by the failed field. +func ByFailed(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldFailed, opts...).ToFunc() +} + +// ByCompleted orders the results by the completed field. +func ByCompleted(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCompleted, opts...).ToFunc() +} + +// ByError orders the results by the error field. +func ByError(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldError, opts...).ToFunc() +} + +// ByStatusToBuildField orders the results by StatusToBuild field. +func ByStatusToBuildField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newStatusToBuildStep(), sql.OrderByField(field, opts...)) + } +} + +// ByStatusToProvisionedNetworkField orders the results by StatusToProvisionedNetwork field. +func ByStatusToProvisionedNetworkField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newStatusToProvisionedNetworkStep(), sql.OrderByField(field, opts...)) + } +} + +// ByStatusToProvisionedHostField orders the results by StatusToProvisionedHost field. +func ByStatusToProvisionedHostField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newStatusToProvisionedHostStep(), sql.OrderByField(field, opts...)) + } +} + +// ByStatusToProvisioningStepField orders the results by StatusToProvisioningStep field. +func ByStatusToProvisioningStepField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newStatusToProvisioningStepStep(), sql.OrderByField(field, opts...)) + } +} + +// ByStatusToTeamField orders the results by StatusToTeam field. +func ByStatusToTeamField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newStatusToTeamStep(), sql.OrderByField(field, opts...)) + } +} + +// ByStatusToPlanField orders the results by StatusToPlan field. +func ByStatusToPlanField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newStatusToPlanStep(), sql.OrderByField(field, opts...)) + } +} + +// ByStatusToServerTaskField orders the results by StatusToServerTask field. +func ByStatusToServerTaskField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newStatusToServerTaskStep(), sql.OrderByField(field, opts...)) + } +} + +// ByStatusToAdhocPlanField orders the results by StatusToAdhocPlan field. +func ByStatusToAdhocPlanField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newStatusToAdhocPlanStep(), sql.OrderByField(field, opts...)) + } +} +func newStatusToBuildStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(StatusToBuildInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2O, true, StatusToBuildTable, StatusToBuildColumn), + ) +} +func newStatusToProvisionedNetworkStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(StatusToProvisionedNetworkInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2O, true, StatusToProvisionedNetworkTable, StatusToProvisionedNetworkColumn), + ) +} +func newStatusToProvisionedHostStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(StatusToProvisionedHostInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2O, true, StatusToProvisionedHostTable, StatusToProvisionedHostColumn), + ) +} +func newStatusToProvisioningStepStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(StatusToProvisioningStepInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2O, true, StatusToProvisioningStepTable, StatusToProvisioningStepColumn), + ) +} +func newStatusToTeamStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(StatusToTeamInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2O, true, StatusToTeamTable, StatusToTeamColumn), + ) +} +func newStatusToPlanStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(StatusToPlanInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2O, true, StatusToPlanTable, StatusToPlanColumn), + ) +} +func newStatusToServerTaskStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(StatusToServerTaskInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2O, true, StatusToServerTaskTable, StatusToServerTaskColumn), + ) +} +func newStatusToAdhocPlanStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(StatusToAdhocPlanInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2O, true, StatusToAdhocPlanTable, StatusToAdhocPlanColumn), + ) +} + // MarshalGQL implements graphql.Marshaler interface. -func (s State) MarshalGQL(w io.Writer) { - io.WriteString(w, strconv.Quote(s.String())) +func (e State) MarshalGQL(w io.Writer) { + io.WriteString(w, strconv.Quote(e.String())) } // UnmarshalGQL implements graphql.Unmarshaler interface. -func (s *State) UnmarshalGQL(val interface{}) error { +func (e *State) UnmarshalGQL(val interface{}) error { str, ok := val.(string) if !ok { return fmt.Errorf("enum %T must be a string", val) } - *s = State(str) - if err := StateValidator(*s); err != nil { + *e = State(str) + if err := StateValidator(*e); err != nil { return fmt.Errorf("%s is not a valid State", str) } return nil } // MarshalGQL implements graphql.Marshaler interface. -func (sf StatusFor) MarshalGQL(w io.Writer) { - io.WriteString(w, strconv.Quote(sf.String())) +func (e StatusFor) MarshalGQL(w io.Writer) { + io.WriteString(w, strconv.Quote(e.String())) } // UnmarshalGQL implements graphql.Unmarshaler interface. -func (sf *StatusFor) UnmarshalGQL(val interface{}) error { +func (e *StatusFor) UnmarshalGQL(val interface{}) error { str, ok := val.(string) if !ok { return fmt.Errorf("enum %T must be a string", val) } - *sf = StatusFor(str) - if err := StatusForValidator(*sf); err != nil { + *e = StatusFor(str) + if err := StatusForValidator(*e); err != nil { return fmt.Errorf("%s is not a valid StatusFor", str) } return nil diff --git a/ent/status/where.go b/ent/status/where.go index 039b0a5e..4fe87b19 100755 --- a/ent/status/where.go +++ b/ent/status/where.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package status @@ -13,549 +13,307 @@ import ( // ID filters vertices based on their ID field. func ID(id uuid.UUID) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Status(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id uuid.UUID) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Status(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id uuid.UUID) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.Status(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...uuid.UUID) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.Status(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...uuid.UUID) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.Status(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id uuid.UUID) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.Status(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id uuid.UUID) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.Status(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id uuid.UUID) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.Status(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id uuid.UUID) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.Status(sql.FieldLTE(FieldID, id)) } // StartedAt applies equality check predicate on the "started_at" field. It's identical to StartedAtEQ. func StartedAt(v time.Time) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldStartedAt), v)) - }) + return predicate.Status(sql.FieldEQ(FieldStartedAt, v)) } // EndedAt applies equality check predicate on the "ended_at" field. It's identical to EndedAtEQ. func EndedAt(v time.Time) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldEndedAt), v)) - }) + return predicate.Status(sql.FieldEQ(FieldEndedAt, v)) } // Failed applies equality check predicate on the "failed" field. It's identical to FailedEQ. func Failed(v bool) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldFailed), v)) - }) + return predicate.Status(sql.FieldEQ(FieldFailed, v)) } // Completed applies equality check predicate on the "completed" field. It's identical to CompletedEQ. func Completed(v bool) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCompleted), v)) - }) + return predicate.Status(sql.FieldEQ(FieldCompleted, v)) } // Error applies equality check predicate on the "error" field. It's identical to ErrorEQ. func Error(v string) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldError), v)) - }) + return predicate.Status(sql.FieldEQ(FieldError, v)) } // StateEQ applies the EQ predicate on the "state" field. func StateEQ(v State) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldState), v)) - }) + return predicate.Status(sql.FieldEQ(FieldState, v)) } // StateNEQ applies the NEQ predicate on the "state" field. func StateNEQ(v State) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldState), v)) - }) + return predicate.Status(sql.FieldNEQ(FieldState, v)) } // StateIn applies the In predicate on the "state" field. func StateIn(vs ...State) predicate.Status { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Status(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldState), v...)) - }) + return predicate.Status(sql.FieldIn(FieldState, vs...)) } // StateNotIn applies the NotIn predicate on the "state" field. func StateNotIn(vs ...State) predicate.Status { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Status(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldState), v...)) - }) + return predicate.Status(sql.FieldNotIn(FieldState, vs...)) } // StatusForEQ applies the EQ predicate on the "status_for" field. func StatusForEQ(v StatusFor) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldStatusFor), v)) - }) + return predicate.Status(sql.FieldEQ(FieldStatusFor, v)) } // StatusForNEQ applies the NEQ predicate on the "status_for" field. func StatusForNEQ(v StatusFor) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldStatusFor), v)) - }) + return predicate.Status(sql.FieldNEQ(FieldStatusFor, v)) } // StatusForIn applies the In predicate on the "status_for" field. func StatusForIn(vs ...StatusFor) predicate.Status { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Status(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldStatusFor), v...)) - }) + return predicate.Status(sql.FieldIn(FieldStatusFor, vs...)) } // StatusForNotIn applies the NotIn predicate on the "status_for" field. func StatusForNotIn(vs ...StatusFor) predicate.Status { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Status(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldStatusFor), v...)) - }) + return predicate.Status(sql.FieldNotIn(FieldStatusFor, vs...)) } // StartedAtEQ applies the EQ predicate on the "started_at" field. func StartedAtEQ(v time.Time) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldStartedAt), v)) - }) + return predicate.Status(sql.FieldEQ(FieldStartedAt, v)) } // StartedAtNEQ applies the NEQ predicate on the "started_at" field. func StartedAtNEQ(v time.Time) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldStartedAt), v)) - }) + return predicate.Status(sql.FieldNEQ(FieldStartedAt, v)) } // StartedAtIn applies the In predicate on the "started_at" field. func StartedAtIn(vs ...time.Time) predicate.Status { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Status(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldStartedAt), v...)) - }) + return predicate.Status(sql.FieldIn(FieldStartedAt, vs...)) } // StartedAtNotIn applies the NotIn predicate on the "started_at" field. func StartedAtNotIn(vs ...time.Time) predicate.Status { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Status(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldStartedAt), v...)) - }) + return predicate.Status(sql.FieldNotIn(FieldStartedAt, vs...)) } // StartedAtGT applies the GT predicate on the "started_at" field. func StartedAtGT(v time.Time) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldStartedAt), v)) - }) + return predicate.Status(sql.FieldGT(FieldStartedAt, v)) } // StartedAtGTE applies the GTE predicate on the "started_at" field. func StartedAtGTE(v time.Time) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldStartedAt), v)) - }) + return predicate.Status(sql.FieldGTE(FieldStartedAt, v)) } // StartedAtLT applies the LT predicate on the "started_at" field. func StartedAtLT(v time.Time) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldStartedAt), v)) - }) + return predicate.Status(sql.FieldLT(FieldStartedAt, v)) } // StartedAtLTE applies the LTE predicate on the "started_at" field. func StartedAtLTE(v time.Time) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldStartedAt), v)) - }) + return predicate.Status(sql.FieldLTE(FieldStartedAt, v)) } // StartedAtIsNil applies the IsNil predicate on the "started_at" field. func StartedAtIsNil() predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldStartedAt))) - }) + return predicate.Status(sql.FieldIsNull(FieldStartedAt)) } // StartedAtNotNil applies the NotNil predicate on the "started_at" field. func StartedAtNotNil() predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldStartedAt))) - }) + return predicate.Status(sql.FieldNotNull(FieldStartedAt)) } // EndedAtEQ applies the EQ predicate on the "ended_at" field. func EndedAtEQ(v time.Time) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldEndedAt), v)) - }) + return predicate.Status(sql.FieldEQ(FieldEndedAt, v)) } // EndedAtNEQ applies the NEQ predicate on the "ended_at" field. func EndedAtNEQ(v time.Time) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldEndedAt), v)) - }) + return predicate.Status(sql.FieldNEQ(FieldEndedAt, v)) } // EndedAtIn applies the In predicate on the "ended_at" field. func EndedAtIn(vs ...time.Time) predicate.Status { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Status(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldEndedAt), v...)) - }) + return predicate.Status(sql.FieldIn(FieldEndedAt, vs...)) } // EndedAtNotIn applies the NotIn predicate on the "ended_at" field. func EndedAtNotIn(vs ...time.Time) predicate.Status { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Status(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldEndedAt), v...)) - }) + return predicate.Status(sql.FieldNotIn(FieldEndedAt, vs...)) } // EndedAtGT applies the GT predicate on the "ended_at" field. func EndedAtGT(v time.Time) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldEndedAt), v)) - }) + return predicate.Status(sql.FieldGT(FieldEndedAt, v)) } // EndedAtGTE applies the GTE predicate on the "ended_at" field. func EndedAtGTE(v time.Time) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldEndedAt), v)) - }) + return predicate.Status(sql.FieldGTE(FieldEndedAt, v)) } // EndedAtLT applies the LT predicate on the "ended_at" field. func EndedAtLT(v time.Time) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldEndedAt), v)) - }) + return predicate.Status(sql.FieldLT(FieldEndedAt, v)) } // EndedAtLTE applies the LTE predicate on the "ended_at" field. func EndedAtLTE(v time.Time) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldEndedAt), v)) - }) + return predicate.Status(sql.FieldLTE(FieldEndedAt, v)) } // EndedAtIsNil applies the IsNil predicate on the "ended_at" field. func EndedAtIsNil() predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldEndedAt))) - }) + return predicate.Status(sql.FieldIsNull(FieldEndedAt)) } // EndedAtNotNil applies the NotNil predicate on the "ended_at" field. func EndedAtNotNil() predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldEndedAt))) - }) + return predicate.Status(sql.FieldNotNull(FieldEndedAt)) } // FailedEQ applies the EQ predicate on the "failed" field. func FailedEQ(v bool) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldFailed), v)) - }) + return predicate.Status(sql.FieldEQ(FieldFailed, v)) } // FailedNEQ applies the NEQ predicate on the "failed" field. func FailedNEQ(v bool) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldFailed), v)) - }) + return predicate.Status(sql.FieldNEQ(FieldFailed, v)) } // CompletedEQ applies the EQ predicate on the "completed" field. func CompletedEQ(v bool) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCompleted), v)) - }) + return predicate.Status(sql.FieldEQ(FieldCompleted, v)) } // CompletedNEQ applies the NEQ predicate on the "completed" field. func CompletedNEQ(v bool) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldCompleted), v)) - }) + return predicate.Status(sql.FieldNEQ(FieldCompleted, v)) } // ErrorEQ applies the EQ predicate on the "error" field. func ErrorEQ(v string) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldError), v)) - }) + return predicate.Status(sql.FieldEQ(FieldError, v)) } // ErrorNEQ applies the NEQ predicate on the "error" field. func ErrorNEQ(v string) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldError), v)) - }) + return predicate.Status(sql.FieldNEQ(FieldError, v)) } // ErrorIn applies the In predicate on the "error" field. func ErrorIn(vs ...string) predicate.Status { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Status(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldError), v...)) - }) + return predicate.Status(sql.FieldIn(FieldError, vs...)) } // ErrorNotIn applies the NotIn predicate on the "error" field. func ErrorNotIn(vs ...string) predicate.Status { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Status(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldError), v...)) - }) + return predicate.Status(sql.FieldNotIn(FieldError, vs...)) } // ErrorGT applies the GT predicate on the "error" field. func ErrorGT(v string) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldError), v)) - }) + return predicate.Status(sql.FieldGT(FieldError, v)) } // ErrorGTE applies the GTE predicate on the "error" field. func ErrorGTE(v string) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldError), v)) - }) + return predicate.Status(sql.FieldGTE(FieldError, v)) } // ErrorLT applies the LT predicate on the "error" field. func ErrorLT(v string) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldError), v)) - }) + return predicate.Status(sql.FieldLT(FieldError, v)) } // ErrorLTE applies the LTE predicate on the "error" field. func ErrorLTE(v string) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldError), v)) - }) + return predicate.Status(sql.FieldLTE(FieldError, v)) } // ErrorContains applies the Contains predicate on the "error" field. func ErrorContains(v string) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldError), v)) - }) + return predicate.Status(sql.FieldContains(FieldError, v)) } // ErrorHasPrefix applies the HasPrefix predicate on the "error" field. func ErrorHasPrefix(v string) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldError), v)) - }) + return predicate.Status(sql.FieldHasPrefix(FieldError, v)) } // ErrorHasSuffix applies the HasSuffix predicate on the "error" field. func ErrorHasSuffix(v string) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldError), v)) - }) + return predicate.Status(sql.FieldHasSuffix(FieldError, v)) } // ErrorIsNil applies the IsNil predicate on the "error" field. func ErrorIsNil() predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldError))) - }) + return predicate.Status(sql.FieldIsNull(FieldError)) } // ErrorNotNil applies the NotNil predicate on the "error" field. func ErrorNotNil() predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldError))) - }) + return predicate.Status(sql.FieldNotNull(FieldError)) } // ErrorEqualFold applies the EqualFold predicate on the "error" field. func ErrorEqualFold(v string) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldError), v)) - }) + return predicate.Status(sql.FieldEqualFold(FieldError, v)) } // ErrorContainsFold applies the ContainsFold predicate on the "error" field. func ErrorContainsFold(v string) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldError), v)) - }) + return predicate.Status(sql.FieldContainsFold(FieldError, v)) } // HasStatusToBuild applies the HasEdge predicate on the "StatusToBuild" edge. @@ -563,7 +321,6 @@ func HasStatusToBuild() predicate.Status { return predicate.Status(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(StatusToBuildTable, FieldID), sqlgraph.Edge(sqlgraph.O2O, true, StatusToBuildTable, StatusToBuildColumn), ) sqlgraph.HasNeighbors(s, step) @@ -573,11 +330,7 @@ func HasStatusToBuild() predicate.Status { // HasStatusToBuildWith applies the HasEdge predicate on the "StatusToBuild" edge with a given conditions (other predicates). func HasStatusToBuildWith(preds ...predicate.Build) predicate.Status { return predicate.Status(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(StatusToBuildInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2O, true, StatusToBuildTable, StatusToBuildColumn), - ) + step := newStatusToBuildStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -591,7 +344,6 @@ func HasStatusToProvisionedNetwork() predicate.Status { return predicate.Status(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(StatusToProvisionedNetworkTable, FieldID), sqlgraph.Edge(sqlgraph.O2O, true, StatusToProvisionedNetworkTable, StatusToProvisionedNetworkColumn), ) sqlgraph.HasNeighbors(s, step) @@ -601,11 +353,7 @@ func HasStatusToProvisionedNetwork() predicate.Status { // HasStatusToProvisionedNetworkWith applies the HasEdge predicate on the "StatusToProvisionedNetwork" edge with a given conditions (other predicates). func HasStatusToProvisionedNetworkWith(preds ...predicate.ProvisionedNetwork) predicate.Status { return predicate.Status(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(StatusToProvisionedNetworkInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2O, true, StatusToProvisionedNetworkTable, StatusToProvisionedNetworkColumn), - ) + step := newStatusToProvisionedNetworkStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -619,7 +367,6 @@ func HasStatusToProvisionedHost() predicate.Status { return predicate.Status(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(StatusToProvisionedHostTable, FieldID), sqlgraph.Edge(sqlgraph.O2O, true, StatusToProvisionedHostTable, StatusToProvisionedHostColumn), ) sqlgraph.HasNeighbors(s, step) @@ -629,11 +376,7 @@ func HasStatusToProvisionedHost() predicate.Status { // HasStatusToProvisionedHostWith applies the HasEdge predicate on the "StatusToProvisionedHost" edge with a given conditions (other predicates). func HasStatusToProvisionedHostWith(preds ...predicate.ProvisionedHost) predicate.Status { return predicate.Status(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(StatusToProvisionedHostInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2O, true, StatusToProvisionedHostTable, StatusToProvisionedHostColumn), - ) + step := newStatusToProvisionedHostStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -647,7 +390,6 @@ func HasStatusToProvisioningStep() predicate.Status { return predicate.Status(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(StatusToProvisioningStepTable, FieldID), sqlgraph.Edge(sqlgraph.O2O, true, StatusToProvisioningStepTable, StatusToProvisioningStepColumn), ) sqlgraph.HasNeighbors(s, step) @@ -657,11 +399,7 @@ func HasStatusToProvisioningStep() predicate.Status { // HasStatusToProvisioningStepWith applies the HasEdge predicate on the "StatusToProvisioningStep" edge with a given conditions (other predicates). func HasStatusToProvisioningStepWith(preds ...predicate.ProvisioningStep) predicate.Status { return predicate.Status(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(StatusToProvisioningStepInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2O, true, StatusToProvisioningStepTable, StatusToProvisioningStepColumn), - ) + step := newStatusToProvisioningStepStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -675,7 +413,6 @@ func HasStatusToTeam() predicate.Status { return predicate.Status(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(StatusToTeamTable, FieldID), sqlgraph.Edge(sqlgraph.O2O, true, StatusToTeamTable, StatusToTeamColumn), ) sqlgraph.HasNeighbors(s, step) @@ -685,11 +422,7 @@ func HasStatusToTeam() predicate.Status { // HasStatusToTeamWith applies the HasEdge predicate on the "StatusToTeam" edge with a given conditions (other predicates). func HasStatusToTeamWith(preds ...predicate.Team) predicate.Status { return predicate.Status(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(StatusToTeamInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2O, true, StatusToTeamTable, StatusToTeamColumn), - ) + step := newStatusToTeamStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -703,7 +436,6 @@ func HasStatusToPlan() predicate.Status { return predicate.Status(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(StatusToPlanTable, FieldID), sqlgraph.Edge(sqlgraph.O2O, true, StatusToPlanTable, StatusToPlanColumn), ) sqlgraph.HasNeighbors(s, step) @@ -713,11 +445,7 @@ func HasStatusToPlan() predicate.Status { // HasStatusToPlanWith applies the HasEdge predicate on the "StatusToPlan" edge with a given conditions (other predicates). func HasStatusToPlanWith(preds ...predicate.Plan) predicate.Status { return predicate.Status(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(StatusToPlanInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2O, true, StatusToPlanTable, StatusToPlanColumn), - ) + step := newStatusToPlanStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -731,7 +459,6 @@ func HasStatusToServerTask() predicate.Status { return predicate.Status(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(StatusToServerTaskTable, FieldID), sqlgraph.Edge(sqlgraph.O2O, true, StatusToServerTaskTable, StatusToServerTaskColumn), ) sqlgraph.HasNeighbors(s, step) @@ -741,11 +468,7 @@ func HasStatusToServerTask() predicate.Status { // HasStatusToServerTaskWith applies the HasEdge predicate on the "StatusToServerTask" edge with a given conditions (other predicates). func HasStatusToServerTaskWith(preds ...predicate.ServerTask) predicate.Status { return predicate.Status(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(StatusToServerTaskInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2O, true, StatusToServerTaskTable, StatusToServerTaskColumn), - ) + step := newStatusToServerTaskStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -759,7 +482,6 @@ func HasStatusToAdhocPlan() predicate.Status { return predicate.Status(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(StatusToAdhocPlanTable, FieldID), sqlgraph.Edge(sqlgraph.O2O, true, StatusToAdhocPlanTable, StatusToAdhocPlanColumn), ) sqlgraph.HasNeighbors(s, step) @@ -769,11 +491,7 @@ func HasStatusToAdhocPlan() predicate.Status { // HasStatusToAdhocPlanWith applies the HasEdge predicate on the "StatusToAdhocPlan" edge with a given conditions (other predicates). func HasStatusToAdhocPlanWith(preds ...predicate.AdhocPlan) predicate.Status { return predicate.Status(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(StatusToAdhocPlanInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2O, true, StatusToAdhocPlanTable, StatusToAdhocPlanColumn), - ) + step := newStatusToAdhocPlanStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -784,32 +502,15 @@ func HasStatusToAdhocPlanWith(preds ...predicate.AdhocPlan) predicate.Status { // And groups predicates with the AND operator between them. func And(predicates ...predicate.Status) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Status(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.Status) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Status(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.Status) predicate.Status { - return predicate.Status(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.Status(sql.NotPredicates(p)) } diff --git a/ent/status_create.go b/ent/status_create.go index e36b5054..1fe8c4b2 100755 --- a/ent/status_create.go +++ b/ent/status_create.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -284,44 +284,8 @@ func (sc *StatusCreate) Mutation() *StatusMutation { // Save creates the Status in the database. func (sc *StatusCreate) Save(ctx context.Context) (*Status, error) { - var ( - err error - node *Status - ) sc.defaults() - if len(sc.hooks) == 0 { - if err = sc.check(); err != nil { - return nil, err - } - node, err = sc.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*StatusMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = sc.check(); err != nil { - return nil, err - } - sc.mutation = mutation - if node, err = sc.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(sc.hooks) - 1; i >= 0; i-- { - if sc.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = sc.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, sc.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, sc.sqlSave, sc.mutation, sc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -390,10 +354,13 @@ func (sc *StatusCreate) check() error { } func (sc *StatusCreate) sqlSave(ctx context.Context) (*Status, error) { + if err := sc.check(); err != nil { + return nil, err + } _node, _spec := sc.createSpec() if err := sqlgraph.CreateNode(ctx, sc.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } @@ -404,78 +371,46 @@ func (sc *StatusCreate) sqlSave(ctx context.Context) (*Status, error) { return nil, err } } + sc.mutation.id = &_node.ID + sc.mutation.done = true return _node, nil } func (sc *StatusCreate) createSpec() (*Status, *sqlgraph.CreateSpec) { var ( _node = &Status{config: sc.config} - _spec = &sqlgraph.CreateSpec{ - Table: status.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(status.Table, sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID)) ) if id, ok := sc.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } if value, ok := sc.mutation.State(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: status.FieldState, - }) + _spec.SetField(status.FieldState, field.TypeEnum, value) _node.State = value } if value, ok := sc.mutation.StatusFor(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: status.FieldStatusFor, - }) + _spec.SetField(status.FieldStatusFor, field.TypeEnum, value) _node.StatusFor = value } if value, ok := sc.mutation.StartedAt(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: status.FieldStartedAt, - }) + _spec.SetField(status.FieldStartedAt, field.TypeTime, value) _node.StartedAt = value } if value, ok := sc.mutation.EndedAt(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: status.FieldEndedAt, - }) + _spec.SetField(status.FieldEndedAt, field.TypeTime, value) _node.EndedAt = value } if value, ok := sc.mutation.Failed(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: status.FieldFailed, - }) + _spec.SetField(status.FieldFailed, field.TypeBool, value) _node.Failed = value } if value, ok := sc.mutation.Completed(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: status.FieldCompleted, - }) + _spec.SetField(status.FieldCompleted, field.TypeBool, value) _node.Completed = value } if value, ok := sc.mutation.Error(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: status.FieldError, - }) + _spec.SetField(status.FieldError, field.TypeString, value) _node.Error = value } if nodes := sc.mutation.StatusToBuildIDs(); len(nodes) > 0 { @@ -486,10 +421,7 @@ func (sc *StatusCreate) createSpec() (*Status, *sqlgraph.CreateSpec) { Columns: []string{status.StatusToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -506,10 +438,7 @@ func (sc *StatusCreate) createSpec() (*Status, *sqlgraph.CreateSpec) { Columns: []string{status.StatusToProvisionedNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionednetwork.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -526,10 +455,7 @@ func (sc *StatusCreate) createSpec() (*Status, *sqlgraph.CreateSpec) { Columns: []string{status.StatusToProvisionedHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionedhost.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionedhost.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -546,10 +472,7 @@ func (sc *StatusCreate) createSpec() (*Status, *sqlgraph.CreateSpec) { Columns: []string{status.StatusToProvisioningStepColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisioningstep.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisioningstep.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -566,10 +489,7 @@ func (sc *StatusCreate) createSpec() (*Status, *sqlgraph.CreateSpec) { Columns: []string{status.StatusToTeamColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: team.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(team.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -586,10 +506,7 @@ func (sc *StatusCreate) createSpec() (*Status, *sqlgraph.CreateSpec) { Columns: []string{status.StatusToPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -606,10 +523,7 @@ func (sc *StatusCreate) createSpec() (*Status, *sqlgraph.CreateSpec) { Columns: []string{status.StatusToServerTaskColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: servertask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(servertask.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -626,10 +540,7 @@ func (sc *StatusCreate) createSpec() (*Status, *sqlgraph.CreateSpec) { Columns: []string{status.StatusToAdhocPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: adhocplan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(adhocplan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -644,11 +555,15 @@ func (sc *StatusCreate) createSpec() (*Status, *sqlgraph.CreateSpec) { // StatusCreateBulk is the builder for creating many Status entities in bulk. type StatusCreateBulk struct { config + err error builders []*StatusCreate } // Save creates the Status entities in the database. func (scb *StatusCreateBulk) Save(ctx context.Context) ([]*Status, error) { + if scb.err != nil { + return nil, scb.err + } specs := make([]*sqlgraph.CreateSpec, len(scb.builders)) nodes := make([]*Status, len(scb.builders)) mutators := make([]Mutator, len(scb.builders)) @@ -665,8 +580,8 @@ func (scb *StatusCreateBulk) Save(ctx context.Context) ([]*Status, error) { return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, scb.builders[i+1].mutation) } else { @@ -674,7 +589,7 @@ func (scb *StatusCreateBulk) Save(ctx context.Context) ([]*Status, error) { // Invoke the actual operation on the latest mutation in the chain. if err = sqlgraph.BatchCreate(ctx, scb.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } } } diff --git a/ent/status_delete.go b/ent/status_delete.go index bda07cf9..383a8214 100755 --- a/ent/status_delete.go +++ b/ent/status_delete.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (sd *StatusDelete) Where(ps ...predicate.Status) *StatusDelete { // Exec executes the deletion query and returns how many vertices were deleted. func (sd *StatusDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(sd.hooks) == 0 { - affected, err = sd.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*StatusMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - sd.mutation = mutation - affected, err = sd.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(sd.hooks) - 1; i >= 0; i-- { - if sd.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = sd.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, sd.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, sd.sqlExec, sd.mutation, sd.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (sd *StatusDelete) ExecX(ctx context.Context) int { } func (sd *StatusDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: status.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(status.Table, sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID)) if ps := sd.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -84,7 +48,12 @@ func (sd *StatusDelete) sqlExec(ctx context.Context) (int, error) { } } } - return sqlgraph.DeleteNodes(ctx, sd.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, sd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + sd.mutation.done = true + return affected, err } // StatusDeleteOne is the builder for deleting a single Status entity. @@ -92,6 +61,12 @@ type StatusDeleteOne struct { sd *StatusDelete } +// Where appends a list predicates to the StatusDelete builder. +func (sdo *StatusDeleteOne) Where(ps ...predicate.Status) *StatusDeleteOne { + sdo.sd.mutation.Where(ps...) + return sdo +} + // Exec executes the deletion query. func (sdo *StatusDeleteOne) Exec(ctx context.Context) error { n, err := sdo.sd.Exec(ctx) @@ -107,5 +82,7 @@ func (sdo *StatusDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (sdo *StatusDeleteOne) ExecX(ctx context.Context) { - sdo.sd.ExecX(ctx) + if err := sdo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/ent/status_query.go b/ent/status_query.go index 0691a30d..2aaef653 100755 --- a/ent/status_query.go +++ b/ent/status_query.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "errors" "fmt" "math" @@ -27,13 +26,10 @@ import ( // StatusQuery is the builder for querying Status entities. type StatusQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string - predicates []predicate.Status - // eager-loading edges. + ctx *QueryContext + order []status.OrderOption + inters []Interceptor + predicates []predicate.Status withStatusToBuild *BuildQuery withStatusToProvisionedNetwork *ProvisionedNetworkQuery withStatusToProvisionedHost *ProvisionedHostQuery @@ -43,6 +39,8 @@ type StatusQuery struct { withStatusToServerTask *ServerTaskQuery withStatusToAdhocPlan *AdhocPlanQuery withFKs bool + modifiers []func(*sql.Selector) + loadTotal []func(context.Context, []*Status) error // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -54,34 +52,34 @@ func (sq *StatusQuery) Where(ps ...predicate.Status) *StatusQuery { return sq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (sq *StatusQuery) Limit(limit int) *StatusQuery { - sq.limit = &limit + sq.ctx.Limit = &limit return sq } -// Offset adds an offset step to the query. +// Offset to start from. func (sq *StatusQuery) Offset(offset int) *StatusQuery { - sq.offset = &offset + sq.ctx.Offset = &offset return sq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (sq *StatusQuery) Unique(unique bool) *StatusQuery { - sq.unique = &unique + sq.ctx.Unique = &unique return sq } -// Order adds an order step to the query. -func (sq *StatusQuery) Order(o ...OrderFunc) *StatusQuery { +// Order specifies how the records should be ordered. +func (sq *StatusQuery) Order(o ...status.OrderOption) *StatusQuery { sq.order = append(sq.order, o...) return sq } // QueryStatusToBuild chains the current query on the "StatusToBuild" edge. func (sq *StatusQuery) QueryStatusToBuild() *BuildQuery { - query := &BuildQuery{config: sq.config} + query := (&BuildClient{config: sq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := sq.prepareQuery(ctx); err != nil { return nil, err @@ -103,7 +101,7 @@ func (sq *StatusQuery) QueryStatusToBuild() *BuildQuery { // QueryStatusToProvisionedNetwork chains the current query on the "StatusToProvisionedNetwork" edge. func (sq *StatusQuery) QueryStatusToProvisionedNetwork() *ProvisionedNetworkQuery { - query := &ProvisionedNetworkQuery{config: sq.config} + query := (&ProvisionedNetworkClient{config: sq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := sq.prepareQuery(ctx); err != nil { return nil, err @@ -125,7 +123,7 @@ func (sq *StatusQuery) QueryStatusToProvisionedNetwork() *ProvisionedNetworkQuer // QueryStatusToProvisionedHost chains the current query on the "StatusToProvisionedHost" edge. func (sq *StatusQuery) QueryStatusToProvisionedHost() *ProvisionedHostQuery { - query := &ProvisionedHostQuery{config: sq.config} + query := (&ProvisionedHostClient{config: sq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := sq.prepareQuery(ctx); err != nil { return nil, err @@ -147,7 +145,7 @@ func (sq *StatusQuery) QueryStatusToProvisionedHost() *ProvisionedHostQuery { // QueryStatusToProvisioningStep chains the current query on the "StatusToProvisioningStep" edge. func (sq *StatusQuery) QueryStatusToProvisioningStep() *ProvisioningStepQuery { - query := &ProvisioningStepQuery{config: sq.config} + query := (&ProvisioningStepClient{config: sq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := sq.prepareQuery(ctx); err != nil { return nil, err @@ -169,7 +167,7 @@ func (sq *StatusQuery) QueryStatusToProvisioningStep() *ProvisioningStepQuery { // QueryStatusToTeam chains the current query on the "StatusToTeam" edge. func (sq *StatusQuery) QueryStatusToTeam() *TeamQuery { - query := &TeamQuery{config: sq.config} + query := (&TeamClient{config: sq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := sq.prepareQuery(ctx); err != nil { return nil, err @@ -191,7 +189,7 @@ func (sq *StatusQuery) QueryStatusToTeam() *TeamQuery { // QueryStatusToPlan chains the current query on the "StatusToPlan" edge. func (sq *StatusQuery) QueryStatusToPlan() *PlanQuery { - query := &PlanQuery{config: sq.config} + query := (&PlanClient{config: sq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := sq.prepareQuery(ctx); err != nil { return nil, err @@ -213,7 +211,7 @@ func (sq *StatusQuery) QueryStatusToPlan() *PlanQuery { // QueryStatusToServerTask chains the current query on the "StatusToServerTask" edge. func (sq *StatusQuery) QueryStatusToServerTask() *ServerTaskQuery { - query := &ServerTaskQuery{config: sq.config} + query := (&ServerTaskClient{config: sq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := sq.prepareQuery(ctx); err != nil { return nil, err @@ -235,7 +233,7 @@ func (sq *StatusQuery) QueryStatusToServerTask() *ServerTaskQuery { // QueryStatusToAdhocPlan chains the current query on the "StatusToAdhocPlan" edge. func (sq *StatusQuery) QueryStatusToAdhocPlan() *AdhocPlanQuery { - query := &AdhocPlanQuery{config: sq.config} + query := (&AdhocPlanClient{config: sq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := sq.prepareQuery(ctx); err != nil { return nil, err @@ -258,7 +256,7 @@ func (sq *StatusQuery) QueryStatusToAdhocPlan() *AdhocPlanQuery { // First returns the first Status entity from the query. // Returns a *NotFoundError when no Status was found. func (sq *StatusQuery) First(ctx context.Context) (*Status, error) { - nodes, err := sq.Limit(1).All(ctx) + nodes, err := sq.Limit(1).All(setContextOp(ctx, sq.ctx, "First")) if err != nil { return nil, err } @@ -281,7 +279,7 @@ func (sq *StatusQuery) FirstX(ctx context.Context) *Status { // Returns a *NotFoundError when no Status ID was found. func (sq *StatusQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = sq.Limit(1).IDs(ctx); err != nil { + if ids, err = sq.Limit(1).IDs(setContextOp(ctx, sq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -304,7 +302,7 @@ func (sq *StatusQuery) FirstIDX(ctx context.Context) uuid.UUID { // Returns a *NotSingularError when more than one Status entity is found. // Returns a *NotFoundError when no Status entities are found. func (sq *StatusQuery) Only(ctx context.Context) (*Status, error) { - nodes, err := sq.Limit(2).All(ctx) + nodes, err := sq.Limit(2).All(setContextOp(ctx, sq.ctx, "Only")) if err != nil { return nil, err } @@ -332,7 +330,7 @@ func (sq *StatusQuery) OnlyX(ctx context.Context) *Status { // Returns a *NotFoundError when no entities are found. func (sq *StatusQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = sq.Limit(2).IDs(ctx); err != nil { + if ids, err = sq.Limit(2).IDs(setContextOp(ctx, sq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -357,10 +355,12 @@ func (sq *StatusQuery) OnlyIDX(ctx context.Context) uuid.UUID { // All executes the query and returns a list of StatusSlice. func (sq *StatusQuery) All(ctx context.Context) ([]*Status, error) { + ctx = setContextOp(ctx, sq.ctx, "All") if err := sq.prepareQuery(ctx); err != nil { return nil, err } - return sq.sqlAll(ctx) + qr := querierAll[[]*Status, *StatusQuery]() + return withInterceptors[[]*Status](ctx, sq, qr, sq.inters) } // AllX is like All, but panics if an error occurs. @@ -373,9 +373,12 @@ func (sq *StatusQuery) AllX(ctx context.Context) []*Status { } // IDs executes the query and returns a list of Status IDs. -func (sq *StatusQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { - var ids []uuid.UUID - if err := sq.Select(status.FieldID).Scan(ctx, &ids); err != nil { +func (sq *StatusQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if sq.ctx.Unique == nil && sq.path != nil { + sq.Unique(true) + } + ctx = setContextOp(ctx, sq.ctx, "IDs") + if err = sq.Select(status.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -392,10 +395,11 @@ func (sq *StatusQuery) IDsX(ctx context.Context) []uuid.UUID { // Count returns the count of the given query. func (sq *StatusQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, sq.ctx, "Count") if err := sq.prepareQuery(ctx); err != nil { return 0, err } - return sq.sqlCount(ctx) + return withInterceptors[int](ctx, sq, querierCount[*StatusQuery](), sq.inters) } // CountX is like Count, but panics if an error occurs. @@ -409,10 +413,15 @@ func (sq *StatusQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (sq *StatusQuery) Exist(ctx context.Context) (bool, error) { - if err := sq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, sq.ctx, "Exist") + switch _, err := sq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return sq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -432,9 +441,9 @@ func (sq *StatusQuery) Clone() *StatusQuery { } return &StatusQuery{ config: sq.config, - limit: sq.limit, - offset: sq.offset, - order: append([]OrderFunc{}, sq.order...), + ctx: sq.ctx.Clone(), + order: append([]status.OrderOption{}, sq.order...), + inters: append([]Interceptor{}, sq.inters...), predicates: append([]predicate.Status{}, sq.predicates...), withStatusToBuild: sq.withStatusToBuild.Clone(), withStatusToProvisionedNetwork: sq.withStatusToProvisionedNetwork.Clone(), @@ -445,16 +454,15 @@ func (sq *StatusQuery) Clone() *StatusQuery { withStatusToServerTask: sq.withStatusToServerTask.Clone(), withStatusToAdhocPlan: sq.withStatusToAdhocPlan.Clone(), // clone intermediate query. - sql: sq.sql.Clone(), - path: sq.path, - unique: sq.unique, + sql: sq.sql.Clone(), + path: sq.path, } } // WithStatusToBuild tells the query-builder to eager-load the nodes that are connected to // the "StatusToBuild" edge. The optional arguments are used to configure the query builder of the edge. func (sq *StatusQuery) WithStatusToBuild(opts ...func(*BuildQuery)) *StatusQuery { - query := &BuildQuery{config: sq.config} + query := (&BuildClient{config: sq.config}).Query() for _, opt := range opts { opt(query) } @@ -465,7 +473,7 @@ func (sq *StatusQuery) WithStatusToBuild(opts ...func(*BuildQuery)) *StatusQuery // WithStatusToProvisionedNetwork tells the query-builder to eager-load the nodes that are connected to // the "StatusToProvisionedNetwork" edge. The optional arguments are used to configure the query builder of the edge. func (sq *StatusQuery) WithStatusToProvisionedNetwork(opts ...func(*ProvisionedNetworkQuery)) *StatusQuery { - query := &ProvisionedNetworkQuery{config: sq.config} + query := (&ProvisionedNetworkClient{config: sq.config}).Query() for _, opt := range opts { opt(query) } @@ -476,7 +484,7 @@ func (sq *StatusQuery) WithStatusToProvisionedNetwork(opts ...func(*ProvisionedN // WithStatusToProvisionedHost tells the query-builder to eager-load the nodes that are connected to // the "StatusToProvisionedHost" edge. The optional arguments are used to configure the query builder of the edge. func (sq *StatusQuery) WithStatusToProvisionedHost(opts ...func(*ProvisionedHostQuery)) *StatusQuery { - query := &ProvisionedHostQuery{config: sq.config} + query := (&ProvisionedHostClient{config: sq.config}).Query() for _, opt := range opts { opt(query) } @@ -487,7 +495,7 @@ func (sq *StatusQuery) WithStatusToProvisionedHost(opts ...func(*ProvisionedHost // WithStatusToProvisioningStep tells the query-builder to eager-load the nodes that are connected to // the "StatusToProvisioningStep" edge. The optional arguments are used to configure the query builder of the edge. func (sq *StatusQuery) WithStatusToProvisioningStep(opts ...func(*ProvisioningStepQuery)) *StatusQuery { - query := &ProvisioningStepQuery{config: sq.config} + query := (&ProvisioningStepClient{config: sq.config}).Query() for _, opt := range opts { opt(query) } @@ -498,7 +506,7 @@ func (sq *StatusQuery) WithStatusToProvisioningStep(opts ...func(*ProvisioningSt // WithStatusToTeam tells the query-builder to eager-load the nodes that are connected to // the "StatusToTeam" edge. The optional arguments are used to configure the query builder of the edge. func (sq *StatusQuery) WithStatusToTeam(opts ...func(*TeamQuery)) *StatusQuery { - query := &TeamQuery{config: sq.config} + query := (&TeamClient{config: sq.config}).Query() for _, opt := range opts { opt(query) } @@ -509,7 +517,7 @@ func (sq *StatusQuery) WithStatusToTeam(opts ...func(*TeamQuery)) *StatusQuery { // WithStatusToPlan tells the query-builder to eager-load the nodes that are connected to // the "StatusToPlan" edge. The optional arguments are used to configure the query builder of the edge. func (sq *StatusQuery) WithStatusToPlan(opts ...func(*PlanQuery)) *StatusQuery { - query := &PlanQuery{config: sq.config} + query := (&PlanClient{config: sq.config}).Query() for _, opt := range opts { opt(query) } @@ -520,7 +528,7 @@ func (sq *StatusQuery) WithStatusToPlan(opts ...func(*PlanQuery)) *StatusQuery { // WithStatusToServerTask tells the query-builder to eager-load the nodes that are connected to // the "StatusToServerTask" edge. The optional arguments are used to configure the query builder of the edge. func (sq *StatusQuery) WithStatusToServerTask(opts ...func(*ServerTaskQuery)) *StatusQuery { - query := &ServerTaskQuery{config: sq.config} + query := (&ServerTaskClient{config: sq.config}).Query() for _, opt := range opts { opt(query) } @@ -531,7 +539,7 @@ func (sq *StatusQuery) WithStatusToServerTask(opts ...func(*ServerTaskQuery)) *S // WithStatusToAdhocPlan tells the query-builder to eager-load the nodes that are connected to // the "StatusToAdhocPlan" edge. The optional arguments are used to configure the query builder of the edge. func (sq *StatusQuery) WithStatusToAdhocPlan(opts ...func(*AdhocPlanQuery)) *StatusQuery { - query := &AdhocPlanQuery{config: sq.config} + query := (&AdhocPlanClient{config: sq.config}).Query() for _, opt := range opts { opt(query) } @@ -553,17 +561,13 @@ func (sq *StatusQuery) WithStatusToAdhocPlan(opts ...func(*AdhocPlanQuery)) *Sta // GroupBy(status.FieldState). // Aggregate(ent.Count()). // Scan(ctx, &v) -// func (sq *StatusQuery) GroupBy(field string, fields ...string) *StatusGroupBy { - group := &StatusGroupBy{config: sq.config} - group.fields = append([]string{field}, fields...) - group.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := sq.prepareQuery(ctx); err != nil { - return nil, err - } - return sq.sqlQuery(ctx), nil - } - return group + sq.ctx.Fields = append([]string{field}, fields...) + grbuild := &StatusGroupBy{build: sq} + grbuild.flds = &sq.ctx.Fields + grbuild.label = status.Label + grbuild.scan = grbuild.Scan + return grbuild } // Select allows the selection one or more fields/columns for the given query, @@ -578,14 +582,31 @@ func (sq *StatusQuery) GroupBy(field string, fields ...string) *StatusGroupBy { // client.Status.Query(). // Select(status.FieldState). // Scan(ctx, &v) -// func (sq *StatusQuery) Select(fields ...string) *StatusSelect { - sq.fields = append(sq.fields, fields...) - return &StatusSelect{StatusQuery: sq} + sq.ctx.Fields = append(sq.ctx.Fields, fields...) + sbuild := &StatusSelect{StatusQuery: sq} + sbuild.label = status.Label + sbuild.flds, sbuild.scan = &sq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a StatusSelect configured with the given aggregations. +func (sq *StatusQuery) Aggregate(fns ...AggregateFunc) *StatusSelect { + return sq.Select().Aggregate(fns...) } func (sq *StatusQuery) prepareQuery(ctx context.Context) error { - for _, f := range sq.fields { + for _, inter := range sq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, sq); err != nil { + return err + } + } + } + for _, f := range sq.ctx.Fields { if !status.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -600,7 +621,7 @@ func (sq *StatusQuery) prepareQuery(ctx context.Context) error { return nil } -func (sq *StatusQuery) sqlAll(ctx context.Context) ([]*Status, error) { +func (sq *StatusQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Status, error) { var ( nodes = []*Status{} withFKs = sq.withFKs @@ -622,295 +643,361 @@ func (sq *StatusQuery) sqlAll(ctx context.Context) ([]*Status, error) { if withFKs { _spec.Node.Columns = append(_spec.Node.Columns, status.ForeignKeys...) } - _spec.ScanValues = func(columns []string) ([]interface{}, error) { + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Status).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { node := &Status{config: sq.config} nodes = append(nodes, node) - return node.scanValues(columns) - } - _spec.Assign = func(columns []string, values []interface{}) error { - if len(nodes) == 0 { - return fmt.Errorf("ent: Assign called without calling ScanValues") - } - node := nodes[len(nodes)-1] node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(sq.modifiers) > 0 { + _spec.Modifiers = sq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } if err := sqlgraph.QueryNodes(ctx, sq.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := sq.withStatusToBuild; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*Status) - for i := range nodes { - if nodes[i].build_build_to_status == nil { - continue - } - fk := *nodes[i].build_build_to_status - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + if err := sq.loadStatusToBuild(ctx, query, nodes, nil, + func(n *Status, e *Build) { n.Edges.StatusToBuild = e }); err != nil { + return nil, err } - query.Where(build.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + } + if query := sq.withStatusToProvisionedNetwork; query != nil { + if err := sq.loadStatusToProvisionedNetwork(ctx, query, nodes, nil, + func(n *Status, e *ProvisionedNetwork) { n.Edges.StatusToProvisionedNetwork = e }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "build_build_to_status" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.StatusToBuild = n - } + } + if query := sq.withStatusToProvisionedHost; query != nil { + if err := sq.loadStatusToProvisionedHost(ctx, query, nodes, nil, + func(n *Status, e *ProvisionedHost) { n.Edges.StatusToProvisionedHost = e }); err != nil { + return nil, err } } - - if query := sq.withStatusToProvisionedNetwork; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*Status) - for i := range nodes { - if nodes[i].provisioned_network_provisioned_network_to_status == nil { - continue - } - fk := *nodes[i].provisioned_network_provisioned_network_to_status - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + if query := sq.withStatusToProvisioningStep; query != nil { + if err := sq.loadStatusToProvisioningStep(ctx, query, nodes, nil, + func(n *Status, e *ProvisioningStep) { n.Edges.StatusToProvisioningStep = e }); err != nil { + return nil, err } - query.Where(provisionednetwork.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + } + if query := sq.withStatusToTeam; query != nil { + if err := sq.loadStatusToTeam(ctx, query, nodes, nil, + func(n *Status, e *Team) { n.Edges.StatusToTeam = e }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "provisioned_network_provisioned_network_to_status" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.StatusToProvisionedNetwork = n - } + } + if query := sq.withStatusToPlan; query != nil { + if err := sq.loadStatusToPlan(ctx, query, nodes, nil, + func(n *Status, e *Plan) { n.Edges.StatusToPlan = e }); err != nil { + return nil, err } } - - if query := sq.withStatusToProvisionedHost; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*Status) - for i := range nodes { - if nodes[i].provisioned_host_provisioned_host_to_status == nil { - continue - } - fk := *nodes[i].provisioned_host_provisioned_host_to_status - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + if query := sq.withStatusToServerTask; query != nil { + if err := sq.loadStatusToServerTask(ctx, query, nodes, nil, + func(n *Status, e *ServerTask) { n.Edges.StatusToServerTask = e }); err != nil { + return nil, err } - query.Where(provisionedhost.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + } + if query := sq.withStatusToAdhocPlan; query != nil { + if err := sq.loadStatusToAdhocPlan(ctx, query, nodes, nil, + func(n *Status, e *AdhocPlan) { n.Edges.StatusToAdhocPlan = e }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "provisioned_host_provisioned_host_to_status" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.StatusToProvisionedHost = n - } + } + for i := range sq.loadTotal { + if err := sq.loadTotal[i](ctx, nodes); err != nil { + return nil, err } } + return nodes, nil +} - if query := sq.withStatusToProvisioningStep; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*Status) +func (sq *StatusQuery) loadStatusToBuild(ctx context.Context, query *BuildQuery, nodes []*Status, init func(*Status), assign func(*Status, *Build)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*Status) + for i := range nodes { + if nodes[i].build_build_to_status == nil { + continue + } + fk := *nodes[i].build_build_to_status + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(build.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "build_build_to_status" returned %v`, n.ID) + } for i := range nodes { - if nodes[i].provisioning_step_provisioning_step_to_status == nil { - continue - } - fk := *nodes[i].provisioning_step_provisioning_step_to_status - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + assign(nodes[i], n) } - query.Where(provisioningstep.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + } + return nil +} +func (sq *StatusQuery) loadStatusToProvisionedNetwork(ctx context.Context, query *ProvisionedNetworkQuery, nodes []*Status, init func(*Status), assign func(*Status, *ProvisionedNetwork)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*Status) + for i := range nodes { + if nodes[i].provisioned_network_provisioned_network_to_status == nil { + continue } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "provisioning_step_provisioning_step_to_status" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.StatusToProvisioningStep = n - } + fk := *nodes[i].provisioned_network_provisioned_network_to_status + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) } + nodeids[fk] = append(nodeids[fk], nodes[i]) } - - if query := sq.withStatusToTeam; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*Status) + if len(ids) == 0 { + return nil + } + query.Where(provisionednetwork.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "provisioned_network_provisioned_network_to_status" returned %v`, n.ID) + } for i := range nodes { - if nodes[i].team_team_to_status == nil { - continue - } - fk := *nodes[i].team_team_to_status - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + assign(nodes[i], n) } - query.Where(team.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + } + return nil +} +func (sq *StatusQuery) loadStatusToProvisionedHost(ctx context.Context, query *ProvisionedHostQuery, nodes []*Status, init func(*Status), assign func(*Status, *ProvisionedHost)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*Status) + for i := range nodes { + if nodes[i].provisioned_host_provisioned_host_to_status == nil { + continue } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "team_team_to_status" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.StatusToTeam = n - } + fk := *nodes[i].provisioned_host_provisioned_host_to_status + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) } + nodeids[fk] = append(nodeids[fk], nodes[i]) } - - if query := sq.withStatusToPlan; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*Status) + if len(ids) == 0 { + return nil + } + query.Where(provisionedhost.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "provisioned_host_provisioned_host_to_status" returned %v`, n.ID) + } for i := range nodes { - if nodes[i].plan_plan_to_status == nil { - continue - } - fk := *nodes[i].plan_plan_to_status - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + assign(nodes[i], n) } - query.Where(plan.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + } + return nil +} +func (sq *StatusQuery) loadStatusToProvisioningStep(ctx context.Context, query *ProvisioningStepQuery, nodes []*Status, init func(*Status), assign func(*Status, *ProvisioningStep)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*Status) + for i := range nodes { + if nodes[i].provisioning_step_provisioning_step_to_status == nil { + continue } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "plan_plan_to_status" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.StatusToPlan = n - } + fk := *nodes[i].provisioning_step_provisioning_step_to_status + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) } + nodeids[fk] = append(nodeids[fk], nodes[i]) } - - if query := sq.withStatusToServerTask; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*Status) + if len(ids) == 0 { + return nil + } + query.Where(provisioningstep.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "provisioning_step_provisioning_step_to_status" returned %v`, n.ID) + } for i := range nodes { - if nodes[i].server_task_server_task_to_status == nil { - continue - } - fk := *nodes[i].server_task_server_task_to_status - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + assign(nodes[i], n) } - query.Where(servertask.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + } + return nil +} +func (sq *StatusQuery) loadStatusToTeam(ctx context.Context, query *TeamQuery, nodes []*Status, init func(*Status), assign func(*Status, *Team)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*Status) + for i := range nodes { + if nodes[i].team_team_to_status == nil { + continue } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "server_task_server_task_to_status" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.StatusToServerTask = n - } + fk := *nodes[i].team_team_to_status + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) } + nodeids[fk] = append(nodeids[fk], nodes[i]) } - - if query := sq.withStatusToAdhocPlan; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*Status) + if len(ids) == 0 { + return nil + } + query.Where(team.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "team_team_to_status" returned %v`, n.ID) + } for i := range nodes { - if nodes[i].adhoc_plan_adhoc_plan_to_status == nil { - continue - } - fk := *nodes[i].adhoc_plan_adhoc_plan_to_status - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + assign(nodes[i], n) } - query.Where(adhocplan.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + } + return nil +} +func (sq *StatusQuery) loadStatusToPlan(ctx context.Context, query *PlanQuery, nodes []*Status, init func(*Status), assign func(*Status, *Plan)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*Status) + for i := range nodes { + if nodes[i].plan_plan_to_status == nil { + continue } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "adhoc_plan_adhoc_plan_to_status" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.StatusToAdhocPlan = n - } + fk := *nodes[i].plan_plan_to_status + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) } + nodeids[fk] = append(nodeids[fk], nodes[i]) } - - return nodes, nil + if len(ids) == 0 { + return nil + } + query.Where(plan.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "plan_plan_to_status" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (sq *StatusQuery) loadStatusToServerTask(ctx context.Context, query *ServerTaskQuery, nodes []*Status, init func(*Status), assign func(*Status, *ServerTask)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*Status) + for i := range nodes { + if nodes[i].server_task_server_task_to_status == nil { + continue + } + fk := *nodes[i].server_task_server_task_to_status + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(servertask.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "server_task_server_task_to_status" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (sq *StatusQuery) loadStatusToAdhocPlan(ctx context.Context, query *AdhocPlanQuery, nodes []*Status, init func(*Status), assign func(*Status, *AdhocPlan)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*Status) + for i := range nodes { + if nodes[i].adhoc_plan_adhoc_plan_to_status == nil { + continue + } + fk := *nodes[i].adhoc_plan_adhoc_plan_to_status + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(adhocplan.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "adhoc_plan_adhoc_plan_to_status" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil } func (sq *StatusQuery) sqlCount(ctx context.Context) (int, error) { _spec := sq.querySpec() - _spec.Node.Columns = sq.fields - if len(sq.fields) > 0 { - _spec.Unique = sq.unique != nil && *sq.unique + if len(sq.modifiers) > 0 { + _spec.Modifiers = sq.modifiers } - return sqlgraph.CountNodes(ctx, sq.driver, _spec) -} - -func (sq *StatusQuery) sqlExist(ctx context.Context) (bool, error) { - n, err := sq.sqlCount(ctx) - if err != nil { - return false, fmt.Errorf("ent: check existence: %w", err) + _spec.Node.Columns = sq.ctx.Fields + if len(sq.ctx.Fields) > 0 { + _spec.Unique = sq.ctx.Unique != nil && *sq.ctx.Unique } - return n > 0, nil + return sqlgraph.CountNodes(ctx, sq.driver, _spec) } func (sq *StatusQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: status.Table, - Columns: status.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, - }, - From: sq.sql, - Unique: true, - } - if unique := sq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(status.Table, status.Columns, sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID)) + _spec.From = sq.sql + if unique := sq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if sq.path != nil { + _spec.Unique = true } - if fields := sq.fields; len(fields) > 0 { + if fields := sq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, status.FieldID) for i := range fields { @@ -926,10 +1013,10 @@ func (sq *StatusQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := sq.limit; limit != nil { + if limit := sq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := sq.offset; offset != nil { + if offset := sq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := sq.order; len(ps) > 0 { @@ -945,7 +1032,7 @@ func (sq *StatusQuery) querySpec() *sqlgraph.QuerySpec { func (sq *StatusQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(sq.driver.Dialect()) t1 := builder.Table(status.Table) - columns := sq.fields + columns := sq.ctx.Fields if len(columns) == 0 { columns = status.Columns } @@ -954,7 +1041,7 @@ func (sq *StatusQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = sq.sql selector.Select(selector.Columns(columns...)...) } - if sq.unique != nil && *sq.unique { + if sq.ctx.Unique != nil && *sq.ctx.Unique { selector.Distinct() } for _, p := range sq.predicates { @@ -963,12 +1050,12 @@ func (sq *StatusQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range sq.order { p(selector) } - if offset := sq.offset; offset != nil { + if offset := sq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := sq.limit; limit != nil { + if limit := sq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -976,12 +1063,8 @@ func (sq *StatusQuery) sqlQuery(ctx context.Context) *sql.Selector { // StatusGroupBy is the group-by builder for Status entities. type StatusGroupBy struct { - config - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) + selector + build *StatusQuery } // Aggregate adds the given aggregation functions to the group-by query. @@ -990,471 +1073,77 @@ func (sgb *StatusGroupBy) Aggregate(fns ...AggregateFunc) *StatusGroupBy { return sgb } -// Scan applies the group-by query and scans the result into the given value. -func (sgb *StatusGroupBy) Scan(ctx context.Context, v interface{}) error { - query, err := sgb.path(ctx) - if err != nil { +// Scan applies the selector query and scans the result into the given value. +func (sgb *StatusGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, sgb.build.ctx, "GroupBy") + if err := sgb.build.prepareQuery(ctx); err != nil { return err } - sgb.sql = query - return sgb.sqlScan(ctx, v) + return scanWithInterceptors[*StatusQuery, *StatusGroupBy](ctx, sgb.build, sgb, sgb.build.inters, v) } -// ScanX is like Scan, but panics if an error occurs. -func (sgb *StatusGroupBy) ScanX(ctx context.Context, v interface{}) { - if err := sgb.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from group-by. -// It is only allowed when executing a group-by query with one field. -func (sgb *StatusGroupBy) Strings(ctx context.Context) ([]string, error) { - if len(sgb.fields) > 1 { - return nil, errors.New("ent: StatusGroupBy.Strings is not achievable when grouping more than 1 field") - } - var v []string - if err := sgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (sgb *StatusGroupBy) StringsX(ctx context.Context) []string { - v, err := sgb.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (sgb *StatusGroupBy) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = sgb.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{status.Label} - default: - err = fmt.Errorf("ent: StatusGroupBy.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (sgb *StatusGroupBy) StringX(ctx context.Context) string { - v, err := sgb.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from group-by. -// It is only allowed when executing a group-by query with one field. -func (sgb *StatusGroupBy) Ints(ctx context.Context) ([]int, error) { - if len(sgb.fields) > 1 { - return nil, errors.New("ent: StatusGroupBy.Ints is not achievable when grouping more than 1 field") - } - var v []int - if err := sgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (sgb *StatusGroupBy) IntsX(ctx context.Context) []int { - v, err := sgb.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (sgb *StatusGroupBy) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = sgb.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{status.Label} - default: - err = fmt.Errorf("ent: StatusGroupBy.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (sgb *StatusGroupBy) IntX(ctx context.Context) int { - v, err := sgb.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from group-by. -// It is only allowed when executing a group-by query with one field. -func (sgb *StatusGroupBy) Float64s(ctx context.Context) ([]float64, error) { - if len(sgb.fields) > 1 { - return nil, errors.New("ent: StatusGroupBy.Float64s is not achievable when grouping more than 1 field") - } - var v []float64 - if err := sgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (sgb *StatusGroupBy) Float64sX(ctx context.Context) []float64 { - v, err := sgb.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (sgb *StatusGroupBy) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = sgb.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{status.Label} - default: - err = fmt.Errorf("ent: StatusGroupBy.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (sgb *StatusGroupBy) Float64X(ctx context.Context) float64 { - v, err := sgb.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from group-by. -// It is only allowed when executing a group-by query with one field. -func (sgb *StatusGroupBy) Bools(ctx context.Context) ([]bool, error) { - if len(sgb.fields) > 1 { - return nil, errors.New("ent: StatusGroupBy.Bools is not achievable when grouping more than 1 field") - } - var v []bool - if err := sgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (sgb *StatusGroupBy) BoolsX(ctx context.Context) []bool { - v, err := sgb.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (sgb *StatusGroupBy) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = sgb.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{status.Label} - default: - err = fmt.Errorf("ent: StatusGroupBy.Bools returned %d results when one was expected", len(v)) - } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (sgb *StatusGroupBy) BoolX(ctx context.Context) bool { - v, err := sgb.Bool(ctx) - if err != nil { - panic(err) +func (sgb *StatusGroupBy) sqlScan(ctx context.Context, root *StatusQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(sgb.fns)) + for _, fn := range sgb.fns { + aggregation = append(aggregation, fn(selector)) } - return v -} - -func (sgb *StatusGroupBy) sqlScan(ctx context.Context, v interface{}) error { - for _, f := range sgb.fields { - if !status.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*sgb.flds)+len(sgb.fns)) + for _, f := range *sgb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := sgb.sqlQuery() + selector.GroupBy(selector.Columns(*sgb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := sgb.driver.Query(ctx, query, args, rows); err != nil { + if err := sgb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (sgb *StatusGroupBy) sqlQuery() *sql.Selector { - selector := sgb.sql.Select() - aggregation := make([]string, 0, len(sgb.fns)) - for _, fn := range sgb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(sgb.fields)+len(sgb.fns)) - for _, f := range sgb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(sgb.fields...)...) -} - // StatusSelect is the builder for selecting fields of Status entities. type StatusSelect struct { *StatusQuery - // intermediate query (i.e. traversal path). - sql *sql.Selector + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (ss *StatusSelect) Aggregate(fns ...AggregateFunc) *StatusSelect { + ss.fns = append(ss.fns, fns...) + return ss } // Scan applies the selector query and scans the result into the given value. -func (ss *StatusSelect) Scan(ctx context.Context, v interface{}) error { +func (ss *StatusSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ss.ctx, "Select") if err := ss.prepareQuery(ctx); err != nil { return err } - ss.sql = ss.StatusQuery.sqlQuery(ctx) - return ss.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (ss *StatusSelect) ScanX(ctx context.Context, v interface{}) { - if err := ss.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from a selector. It is only allowed when selecting one field. -func (ss *StatusSelect) Strings(ctx context.Context) ([]string, error) { - if len(ss.fields) > 1 { - return nil, errors.New("ent: StatusSelect.Strings is not achievable when selecting more than 1 field") - } - var v []string - if err := ss.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil + return scanWithInterceptors[*StatusQuery, *StatusSelect](ctx, ss.StatusQuery, ss, ss.inters, v) } -// StringsX is like Strings, but panics if an error occurs. -func (ss *StatusSelect) StringsX(ctx context.Context) []string { - v, err := ss.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a selector. It is only allowed when selecting one field. -func (ss *StatusSelect) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = ss.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{status.Label} - default: - err = fmt.Errorf("ent: StatusSelect.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (ss *StatusSelect) StringX(ctx context.Context) string { - v, err := ss.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from a selector. It is only allowed when selecting one field. -func (ss *StatusSelect) Ints(ctx context.Context) ([]int, error) { - if len(ss.fields) > 1 { - return nil, errors.New("ent: StatusSelect.Ints is not achievable when selecting more than 1 field") - } - var v []int - if err := ss.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (ss *StatusSelect) IntsX(ctx context.Context) []int { - v, err := ss.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a selector. It is only allowed when selecting one field. -func (ss *StatusSelect) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = ss.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{status.Label} - default: - err = fmt.Errorf("ent: StatusSelect.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (ss *StatusSelect) IntX(ctx context.Context) int { - v, err := ss.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. -func (ss *StatusSelect) Float64s(ctx context.Context) ([]float64, error) { - if len(ss.fields) > 1 { - return nil, errors.New("ent: StatusSelect.Float64s is not achievable when selecting more than 1 field") - } - var v []float64 - if err := ss.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (ss *StatusSelect) Float64sX(ctx context.Context) []float64 { - v, err := ss.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. -func (ss *StatusSelect) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = ss.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{status.Label} - default: - err = fmt.Errorf("ent: StatusSelect.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (ss *StatusSelect) Float64X(ctx context.Context) float64 { - v, err := ss.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from a selector. It is only allowed when selecting one field. -func (ss *StatusSelect) Bools(ctx context.Context) ([]bool, error) { - if len(ss.fields) > 1 { - return nil, errors.New("ent: StatusSelect.Bools is not achievable when selecting more than 1 field") - } - var v []bool - if err := ss.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (ss *StatusSelect) BoolsX(ctx context.Context) []bool { - v, err := ss.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a selector. It is only allowed when selecting one field. -func (ss *StatusSelect) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = ss.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{status.Label} - default: - err = fmt.Errorf("ent: StatusSelect.Bools returned %d results when one was expected", len(v)) +func (ss *StatusSelect) sqlScan(ctx context.Context, root *StatusQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(ss.fns)) + for _, fn := range ss.fns { + aggregation = append(aggregation, fn(selector)) } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (ss *StatusSelect) BoolX(ctx context.Context) bool { - v, err := ss.Bool(ctx) - if err != nil { - panic(err) + switch n := len(*ss.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) } - return v -} - -func (ss *StatusSelect) sqlScan(ctx context.Context, v interface{}) error { rows := &sql.Rows{} - query, args := ss.sql.Query() + query, args := selector.Query() if err := ss.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/ent/status_update.go b/ent/status_update.go index 2f9bbd58..3238f519 100755 --- a/ent/status_update.go +++ b/ent/status_update.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -43,12 +43,28 @@ func (su *StatusUpdate) SetState(s status.State) *StatusUpdate { return su } +// SetNillableState sets the "state" field if the given value is not nil. +func (su *StatusUpdate) SetNillableState(s *status.State) *StatusUpdate { + if s != nil { + su.SetState(*s) + } + return su +} + // SetStatusFor sets the "status_for" field. func (su *StatusUpdate) SetStatusFor(sf status.StatusFor) *StatusUpdate { su.mutation.SetStatusFor(sf) return su } +// SetNillableStatusFor sets the "status_for" field if the given value is not nil. +func (su *StatusUpdate) SetNillableStatusFor(sf *status.StatusFor) *StatusUpdate { + if sf != nil { + su.SetStatusFor(*sf) + } + return su +} + // SetStartedAt sets the "started_at" field. func (su *StatusUpdate) SetStartedAt(t time.Time) *StatusUpdate { su.mutation.SetStartedAt(t) @@ -344,40 +360,7 @@ func (su *StatusUpdate) ClearStatusToAdhocPlan() *StatusUpdate { // Save executes the query and returns the number of nodes affected by the update operation. func (su *StatusUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(su.hooks) == 0 { - if err = su.check(); err != nil { - return 0, err - } - affected, err = su.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*StatusMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = su.check(); err != nil { - return 0, err - } - su.mutation = mutation - affected, err = su.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(su.hooks) - 1; i >= 0; i-- { - if su.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = su.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, su.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, su.sqlSave, su.mutation, su.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -418,16 +401,10 @@ func (su *StatusUpdate) check() error { } func (su *StatusUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: status.Table, - Columns: status.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, - }, + if err := su.check(); err != nil { + return n, err } + _spec := sqlgraph.NewUpdateSpec(status.Table, status.Columns, sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID)) if ps := su.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -436,71 +413,34 @@ func (su *StatusUpdate) sqlSave(ctx context.Context) (n int, err error) { } } if value, ok := su.mutation.State(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: status.FieldState, - }) + _spec.SetField(status.FieldState, field.TypeEnum, value) } if value, ok := su.mutation.StatusFor(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: status.FieldStatusFor, - }) + _spec.SetField(status.FieldStatusFor, field.TypeEnum, value) } if value, ok := su.mutation.StartedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: status.FieldStartedAt, - }) + _spec.SetField(status.FieldStartedAt, field.TypeTime, value) } if su.mutation.StartedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: status.FieldStartedAt, - }) + _spec.ClearField(status.FieldStartedAt, field.TypeTime) } if value, ok := su.mutation.EndedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: status.FieldEndedAt, - }) + _spec.SetField(status.FieldEndedAt, field.TypeTime, value) } if su.mutation.EndedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: status.FieldEndedAt, - }) + _spec.ClearField(status.FieldEndedAt, field.TypeTime) } if value, ok := su.mutation.Failed(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: status.FieldFailed, - }) + _spec.SetField(status.FieldFailed, field.TypeBool, value) } if value, ok := su.mutation.Completed(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: status.FieldCompleted, - }) + _spec.SetField(status.FieldCompleted, field.TypeBool, value) } if value, ok := su.mutation.Error(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: status.FieldError, - }) + _spec.SetField(status.FieldError, field.TypeString, value) } if su.mutation.ErrorCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: status.FieldError, - }) + _spec.ClearField(status.FieldError, field.TypeString) } if su.mutation.StatusToBuildCleared() { edge := &sqlgraph.EdgeSpec{ @@ -510,10 +450,7 @@ func (su *StatusUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{status.StatusToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -526,10 +463,7 @@ func (su *StatusUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{status.StatusToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -545,10 +479,7 @@ func (su *StatusUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{status.StatusToProvisionedNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionednetwork.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -561,10 +492,7 @@ func (su *StatusUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{status.StatusToProvisionedNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionednetwork.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -580,10 +508,7 @@ func (su *StatusUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{status.StatusToProvisionedHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionedhost.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionedhost.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -596,10 +521,7 @@ func (su *StatusUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{status.StatusToProvisionedHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionedhost.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionedhost.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -615,10 +537,7 @@ func (su *StatusUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{status.StatusToProvisioningStepColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisioningstep.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisioningstep.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -631,10 +550,7 @@ func (su *StatusUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{status.StatusToProvisioningStepColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisioningstep.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisioningstep.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -650,10 +566,7 @@ func (su *StatusUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{status.StatusToTeamColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: team.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(team.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -666,10 +579,7 @@ func (su *StatusUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{status.StatusToTeamColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: team.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(team.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -685,10 +595,7 @@ func (su *StatusUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{status.StatusToPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -701,10 +608,7 @@ func (su *StatusUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{status.StatusToPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -720,10 +624,7 @@ func (su *StatusUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{status.StatusToServerTaskColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: servertask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(servertask.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -736,10 +637,7 @@ func (su *StatusUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{status.StatusToServerTaskColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: servertask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(servertask.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -755,10 +653,7 @@ func (su *StatusUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{status.StatusToAdhocPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: adhocplan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(adhocplan.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -771,10 +666,7 @@ func (su *StatusUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{status.StatusToAdhocPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: adhocplan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(adhocplan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -786,10 +678,11 @@ func (su *StatusUpdate) sqlSave(ctx context.Context) (n int, err error) { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{status.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return 0, err } + su.mutation.done = true return n, nil } @@ -807,12 +700,28 @@ func (suo *StatusUpdateOne) SetState(s status.State) *StatusUpdateOne { return suo } +// SetNillableState sets the "state" field if the given value is not nil. +func (suo *StatusUpdateOne) SetNillableState(s *status.State) *StatusUpdateOne { + if s != nil { + suo.SetState(*s) + } + return suo +} + // SetStatusFor sets the "status_for" field. func (suo *StatusUpdateOne) SetStatusFor(sf status.StatusFor) *StatusUpdateOne { suo.mutation.SetStatusFor(sf) return suo } +// SetNillableStatusFor sets the "status_for" field if the given value is not nil. +func (suo *StatusUpdateOne) SetNillableStatusFor(sf *status.StatusFor) *StatusUpdateOne { + if sf != nil { + suo.SetStatusFor(*sf) + } + return suo +} + // SetStartedAt sets the "started_at" field. func (suo *StatusUpdateOne) SetStartedAt(t time.Time) *StatusUpdateOne { suo.mutation.SetStartedAt(t) @@ -1106,6 +1015,12 @@ func (suo *StatusUpdateOne) ClearStatusToAdhocPlan() *StatusUpdateOne { return suo } +// Where appends a list predicates to the StatusUpdate builder. +func (suo *StatusUpdateOne) Where(ps ...predicate.Status) *StatusUpdateOne { + suo.mutation.Where(ps...) + return suo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (suo *StatusUpdateOne) Select(field string, fields ...string) *StatusUpdateOne { @@ -1115,40 +1030,7 @@ func (suo *StatusUpdateOne) Select(field string, fields ...string) *StatusUpdate // Save executes the query and returns the updated Status entity. func (suo *StatusUpdateOne) Save(ctx context.Context) (*Status, error) { - var ( - err error - node *Status - ) - if len(suo.hooks) == 0 { - if err = suo.check(); err != nil { - return nil, err - } - node, err = suo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*StatusMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = suo.check(); err != nil { - return nil, err - } - suo.mutation = mutation - node, err = suo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(suo.hooks) - 1; i >= 0; i-- { - if suo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = suo.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, suo.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, suo.sqlSave, suo.mutation, suo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -1189,16 +1071,10 @@ func (suo *StatusUpdateOne) check() error { } func (suo *StatusUpdateOne) sqlSave(ctx context.Context) (_node *Status, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: status.Table, - Columns: status.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, - }, + if err := suo.check(); err != nil { + return _node, err } + _spec := sqlgraph.NewUpdateSpec(status.Table, status.Columns, sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID)) id, ok := suo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Status.id" for update`)} @@ -1224,71 +1100,34 @@ func (suo *StatusUpdateOne) sqlSave(ctx context.Context) (_node *Status, err err } } if value, ok := suo.mutation.State(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: status.FieldState, - }) + _spec.SetField(status.FieldState, field.TypeEnum, value) } if value, ok := suo.mutation.StatusFor(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeEnum, - Value: value, - Column: status.FieldStatusFor, - }) + _spec.SetField(status.FieldStatusFor, field.TypeEnum, value) } if value, ok := suo.mutation.StartedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: status.FieldStartedAt, - }) + _spec.SetField(status.FieldStartedAt, field.TypeTime, value) } if suo.mutation.StartedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: status.FieldStartedAt, - }) + _spec.ClearField(status.FieldStartedAt, field.TypeTime) } if value, ok := suo.mutation.EndedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: status.FieldEndedAt, - }) + _spec.SetField(status.FieldEndedAt, field.TypeTime, value) } if suo.mutation.EndedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: status.FieldEndedAt, - }) + _spec.ClearField(status.FieldEndedAt, field.TypeTime) } if value, ok := suo.mutation.Failed(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: status.FieldFailed, - }) + _spec.SetField(status.FieldFailed, field.TypeBool, value) } if value, ok := suo.mutation.Completed(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: status.FieldCompleted, - }) + _spec.SetField(status.FieldCompleted, field.TypeBool, value) } if value, ok := suo.mutation.Error(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: status.FieldError, - }) + _spec.SetField(status.FieldError, field.TypeString, value) } if suo.mutation.ErrorCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: status.FieldError, - }) + _spec.ClearField(status.FieldError, field.TypeString) } if suo.mutation.StatusToBuildCleared() { edge := &sqlgraph.EdgeSpec{ @@ -1298,10 +1137,7 @@ func (suo *StatusUpdateOne) sqlSave(ctx context.Context) (_node *Status, err err Columns: []string{status.StatusToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1314,10 +1150,7 @@ func (suo *StatusUpdateOne) sqlSave(ctx context.Context) (_node *Status, err err Columns: []string{status.StatusToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1333,10 +1166,7 @@ func (suo *StatusUpdateOne) sqlSave(ctx context.Context) (_node *Status, err err Columns: []string{status.StatusToProvisionedNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionednetwork.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1349,10 +1179,7 @@ func (suo *StatusUpdateOne) sqlSave(ctx context.Context) (_node *Status, err err Columns: []string{status.StatusToProvisionedNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionednetwork.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1368,10 +1195,7 @@ func (suo *StatusUpdateOne) sqlSave(ctx context.Context) (_node *Status, err err Columns: []string{status.StatusToProvisionedHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionedhost.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionedhost.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1384,10 +1208,7 @@ func (suo *StatusUpdateOne) sqlSave(ctx context.Context) (_node *Status, err err Columns: []string{status.StatusToProvisionedHostColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionedhost.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionedhost.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1403,10 +1224,7 @@ func (suo *StatusUpdateOne) sqlSave(ctx context.Context) (_node *Status, err err Columns: []string{status.StatusToProvisioningStepColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisioningstep.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisioningstep.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1419,10 +1237,7 @@ func (suo *StatusUpdateOne) sqlSave(ctx context.Context) (_node *Status, err err Columns: []string{status.StatusToProvisioningStepColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisioningstep.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisioningstep.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1438,10 +1253,7 @@ func (suo *StatusUpdateOne) sqlSave(ctx context.Context) (_node *Status, err err Columns: []string{status.StatusToTeamColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: team.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(team.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1454,10 +1266,7 @@ func (suo *StatusUpdateOne) sqlSave(ctx context.Context) (_node *Status, err err Columns: []string{status.StatusToTeamColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: team.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(team.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1473,10 +1282,7 @@ func (suo *StatusUpdateOne) sqlSave(ctx context.Context) (_node *Status, err err Columns: []string{status.StatusToPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1489,10 +1295,7 @@ func (suo *StatusUpdateOne) sqlSave(ctx context.Context) (_node *Status, err err Columns: []string{status.StatusToPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1508,10 +1311,7 @@ func (suo *StatusUpdateOne) sqlSave(ctx context.Context) (_node *Status, err err Columns: []string{status.StatusToServerTaskColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: servertask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(servertask.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1524,10 +1324,7 @@ func (suo *StatusUpdateOne) sqlSave(ctx context.Context) (_node *Status, err err Columns: []string{status.StatusToServerTaskColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: servertask.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(servertask.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1543,10 +1340,7 @@ func (suo *StatusUpdateOne) sqlSave(ctx context.Context) (_node *Status, err err Columns: []string{status.StatusToAdhocPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: adhocplan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(adhocplan.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1559,10 +1353,7 @@ func (suo *StatusUpdateOne) sqlSave(ctx context.Context) (_node *Status, err err Columns: []string{status.StatusToAdhocPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: adhocplan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(adhocplan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1577,9 +1368,10 @@ func (suo *StatusUpdateOne) sqlSave(ctx context.Context) (_node *Status, err err if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{status.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } + suo.mutation.done = true return _node, nil } diff --git a/ent/tag.go b/ent/tag.go index 62985b2c..5530eb92 100755 --- a/ent/tag.go +++ b/ent/tag.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -7,6 +7,7 @@ import ( "fmt" "strings" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/gen0cide/laforge/ent/tag" "github.com/google/uuid" @@ -25,11 +26,12 @@ type Tag struct { Description map[string]string `json:"description,omitempty"` included_network_included_network_to_tag *uuid.UUID user_user_to_tag *uuid.UUID + selectValues sql.SelectValues } // scanValues returns the types for scanning values from sql.Rows. -func (*Tag) scanValues(columns []string) ([]interface{}, error) { - values := make([]interface{}, len(columns)) +func (*Tag) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) for i := range columns { switch columns[i] { case tag.FieldDescription: @@ -43,7 +45,7 @@ func (*Tag) scanValues(columns []string) ([]interface{}, error) { case tag.ForeignKeys[1]: // user_user_to_tag values[i] = &sql.NullScanner{S: new(uuid.UUID)} default: - return nil, fmt.Errorf("unexpected column %q for type Tag", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -51,7 +53,7 @@ func (*Tag) scanValues(columns []string) ([]interface{}, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the Tag fields. -func (t *Tag) assignValues(columns []string, values []interface{}) error { +func (t *Tag) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -97,26 +99,34 @@ func (t *Tag) assignValues(columns []string, values []interface{}) error { t.user_user_to_tag = new(uuid.UUID) *t.user_user_to_tag = *value.S.(*uuid.UUID) } + default: + t.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the Tag. +// This includes values selected through modifiers, order, etc. +func (t *Tag) Value(name string) (ent.Value, error) { + return t.selectValues.Get(name) +} + // Update returns a builder for updating this Tag. // Note that you need to call Tag.Unwrap() before calling this method if this Tag // was returned from a transaction, and the transaction was committed or rolled back. func (t *Tag) Update() *TagUpdateOne { - return (&TagClient{config: t.config}).UpdateOne(t) + return NewTagClient(t.config).UpdateOne(t) } // Unwrap unwraps the Tag entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. func (t *Tag) Unwrap() *Tag { - tx, ok := t.config.driver.(*txDriver) + _tx, ok := t.config.driver.(*txDriver) if !ok { panic("ent: Tag is not a transactional entity") } - t.config.driver = tx.drv + t.config.driver = _tx.drv return t } @@ -124,12 +134,14 @@ func (t *Tag) Unwrap() *Tag { func (t *Tag) String() string { var builder strings.Builder builder.WriteString("Tag(") - builder.WriteString(fmt.Sprintf("id=%v", t.ID)) - builder.WriteString(", uuid=") + builder.WriteString(fmt.Sprintf("id=%v, ", t.ID)) + builder.WriteString("uuid=") builder.WriteString(fmt.Sprintf("%v", t.UUID)) - builder.WriteString(", name=") + builder.WriteString(", ") + builder.WriteString("name=") builder.WriteString(t.Name) - builder.WriteString(", description=") + builder.WriteString(", ") + builder.WriteString("description=") builder.WriteString(fmt.Sprintf("%v", t.Description)) builder.WriteByte(')') return builder.String() @@ -137,9 +149,3 @@ func (t *Tag) String() string { // Tags is a parsable slice of Tag. type Tags []*Tag - -func (t Tags) config(cfg config) { - for _i := range t { - t[_i].config = cfg - } -} diff --git a/ent/tag/tag.go b/ent/tag/tag.go index a4cacd6d..e536e1fe 100755 --- a/ent/tag/tag.go +++ b/ent/tag/tag.go @@ -1,8 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package tag import ( + "entgo.io/ent/dialect/sql" "github.com/google/uuid" ) @@ -55,3 +56,21 @@ var ( // DefaultID holds the default value on creation for the "id" field. DefaultID func() uuid.UUID ) + +// OrderOption defines the ordering options for the Tag queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByUUID orders the results by the uuid field. +func ByUUID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUUID, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} diff --git a/ent/tag/where.go b/ent/tag/where.go index d9b92d2a..193f199b 100755 --- a/ent/tag/where.go +++ b/ent/tag/where.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package tag @@ -10,316 +10,175 @@ import ( // ID filters vertices based on their ID field. func ID(id uuid.UUID) predicate.Tag { - return predicate.Tag(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Tag(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id uuid.UUID) predicate.Tag { - return predicate.Tag(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Tag(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id uuid.UUID) predicate.Tag { - return predicate.Tag(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.Tag(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...uuid.UUID) predicate.Tag { - return predicate.Tag(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.Tag(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...uuid.UUID) predicate.Tag { - return predicate.Tag(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.Tag(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id uuid.UUID) predicate.Tag { - return predicate.Tag(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.Tag(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id uuid.UUID) predicate.Tag { - return predicate.Tag(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.Tag(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id uuid.UUID) predicate.Tag { - return predicate.Tag(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.Tag(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id uuid.UUID) predicate.Tag { - return predicate.Tag(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.Tag(sql.FieldLTE(FieldID, id)) } // UUID applies equality check predicate on the "uuid" field. It's identical to UUIDEQ. func UUID(v uuid.UUID) predicate.Tag { - return predicate.Tag(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUUID), v)) - }) + return predicate.Tag(sql.FieldEQ(FieldUUID, v)) } // Name applies equality check predicate on the "name" field. It's identical to NameEQ. func Name(v string) predicate.Tag { - return predicate.Tag(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldName), v)) - }) + return predicate.Tag(sql.FieldEQ(FieldName, v)) } // UUIDEQ applies the EQ predicate on the "uuid" field. func UUIDEQ(v uuid.UUID) predicate.Tag { - return predicate.Tag(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUUID), v)) - }) + return predicate.Tag(sql.FieldEQ(FieldUUID, v)) } // UUIDNEQ applies the NEQ predicate on the "uuid" field. func UUIDNEQ(v uuid.UUID) predicate.Tag { - return predicate.Tag(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldUUID), v)) - }) + return predicate.Tag(sql.FieldNEQ(FieldUUID, v)) } // UUIDIn applies the In predicate on the "uuid" field. func UUIDIn(vs ...uuid.UUID) predicate.Tag { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Tag(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldUUID), v...)) - }) + return predicate.Tag(sql.FieldIn(FieldUUID, vs...)) } // UUIDNotIn applies the NotIn predicate on the "uuid" field. func UUIDNotIn(vs ...uuid.UUID) predicate.Tag { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Tag(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldUUID), v...)) - }) + return predicate.Tag(sql.FieldNotIn(FieldUUID, vs...)) } // UUIDGT applies the GT predicate on the "uuid" field. func UUIDGT(v uuid.UUID) predicate.Tag { - return predicate.Tag(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldUUID), v)) - }) + return predicate.Tag(sql.FieldGT(FieldUUID, v)) } // UUIDGTE applies the GTE predicate on the "uuid" field. func UUIDGTE(v uuid.UUID) predicate.Tag { - return predicate.Tag(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldUUID), v)) - }) + return predicate.Tag(sql.FieldGTE(FieldUUID, v)) } // UUIDLT applies the LT predicate on the "uuid" field. func UUIDLT(v uuid.UUID) predicate.Tag { - return predicate.Tag(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldUUID), v)) - }) + return predicate.Tag(sql.FieldLT(FieldUUID, v)) } // UUIDLTE applies the LTE predicate on the "uuid" field. func UUIDLTE(v uuid.UUID) predicate.Tag { - return predicate.Tag(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldUUID), v)) - }) + return predicate.Tag(sql.FieldLTE(FieldUUID, v)) } // NameEQ applies the EQ predicate on the "name" field. func NameEQ(v string) predicate.Tag { - return predicate.Tag(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldName), v)) - }) + return predicate.Tag(sql.FieldEQ(FieldName, v)) } // NameNEQ applies the NEQ predicate on the "name" field. func NameNEQ(v string) predicate.Tag { - return predicate.Tag(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldName), v)) - }) + return predicate.Tag(sql.FieldNEQ(FieldName, v)) } // NameIn applies the In predicate on the "name" field. func NameIn(vs ...string) predicate.Tag { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Tag(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldName), v...)) - }) + return predicate.Tag(sql.FieldIn(FieldName, vs...)) } // NameNotIn applies the NotIn predicate on the "name" field. func NameNotIn(vs ...string) predicate.Tag { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Tag(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldName), v...)) - }) + return predicate.Tag(sql.FieldNotIn(FieldName, vs...)) } // NameGT applies the GT predicate on the "name" field. func NameGT(v string) predicate.Tag { - return predicate.Tag(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldName), v)) - }) + return predicate.Tag(sql.FieldGT(FieldName, v)) } // NameGTE applies the GTE predicate on the "name" field. func NameGTE(v string) predicate.Tag { - return predicate.Tag(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldName), v)) - }) + return predicate.Tag(sql.FieldGTE(FieldName, v)) } // NameLT applies the LT predicate on the "name" field. func NameLT(v string) predicate.Tag { - return predicate.Tag(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldName), v)) - }) + return predicate.Tag(sql.FieldLT(FieldName, v)) } // NameLTE applies the LTE predicate on the "name" field. func NameLTE(v string) predicate.Tag { - return predicate.Tag(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldName), v)) - }) + return predicate.Tag(sql.FieldLTE(FieldName, v)) } // NameContains applies the Contains predicate on the "name" field. func NameContains(v string) predicate.Tag { - return predicate.Tag(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldName), v)) - }) + return predicate.Tag(sql.FieldContains(FieldName, v)) } // NameHasPrefix applies the HasPrefix predicate on the "name" field. func NameHasPrefix(v string) predicate.Tag { - return predicate.Tag(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldName), v)) - }) + return predicate.Tag(sql.FieldHasPrefix(FieldName, v)) } // NameHasSuffix applies the HasSuffix predicate on the "name" field. func NameHasSuffix(v string) predicate.Tag { - return predicate.Tag(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldName), v)) - }) + return predicate.Tag(sql.FieldHasSuffix(FieldName, v)) } // NameEqualFold applies the EqualFold predicate on the "name" field. func NameEqualFold(v string) predicate.Tag { - return predicate.Tag(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldName), v)) - }) + return predicate.Tag(sql.FieldEqualFold(FieldName, v)) } // NameContainsFold applies the ContainsFold predicate on the "name" field. func NameContainsFold(v string) predicate.Tag { - return predicate.Tag(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldName), v)) - }) + return predicate.Tag(sql.FieldContainsFold(FieldName, v)) } // And groups predicates with the AND operator between them. func And(predicates ...predicate.Tag) predicate.Tag { - return predicate.Tag(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Tag(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.Tag) predicate.Tag { - return predicate.Tag(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Tag(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.Tag) predicate.Tag { - return predicate.Tag(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.Tag(sql.NotPredicates(p)) } diff --git a/ent/tag_create.go b/ent/tag_create.go index 9077852f..e32fc262 100755 --- a/ent/tag_create.go +++ b/ent/tag_create.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -59,44 +59,8 @@ func (tc *TagCreate) Mutation() *TagMutation { // Save creates the Tag in the database. func (tc *TagCreate) Save(ctx context.Context) (*Tag, error) { - var ( - err error - node *Tag - ) tc.defaults() - if len(tc.hooks) == 0 { - if err = tc.check(); err != nil { - return nil, err - } - node, err = tc.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*TagMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = tc.check(); err != nil { - return nil, err - } - tc.mutation = mutation - if node, err = tc.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(tc.hooks) - 1; i >= 0; i-- { - if tc.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = tc.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, tc.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, tc.sqlSave, tc.mutation, tc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -144,10 +108,13 @@ func (tc *TagCreate) check() error { } func (tc *TagCreate) sqlSave(ctx context.Context) (*Tag, error) { + if err := tc.check(); err != nil { + return nil, err + } _node, _spec := tc.createSpec() if err := sqlgraph.CreateNode(ctx, tc.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } @@ -158,46 +125,30 @@ func (tc *TagCreate) sqlSave(ctx context.Context) (*Tag, error) { return nil, err } } + tc.mutation.id = &_node.ID + tc.mutation.done = true return _node, nil } func (tc *TagCreate) createSpec() (*Tag, *sqlgraph.CreateSpec) { var ( _node = &Tag{config: tc.config} - _spec = &sqlgraph.CreateSpec{ - Table: tag.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: tag.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(tag.Table, sqlgraph.NewFieldSpec(tag.FieldID, field.TypeUUID)) ) if id, ok := tc.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } if value, ok := tc.mutation.UUID(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Value: value, - Column: tag.FieldUUID, - }) + _spec.SetField(tag.FieldUUID, field.TypeUUID, value) _node.UUID = value } if value, ok := tc.mutation.Name(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: tag.FieldName, - }) + _spec.SetField(tag.FieldName, field.TypeString, value) _node.Name = value } if value, ok := tc.mutation.Description(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: tag.FieldDescription, - }) + _spec.SetField(tag.FieldDescription, field.TypeJSON, value) _node.Description = value } return _node, _spec @@ -206,11 +157,15 @@ func (tc *TagCreate) createSpec() (*Tag, *sqlgraph.CreateSpec) { // TagCreateBulk is the builder for creating many Tag entities in bulk. type TagCreateBulk struct { config + err error builders []*TagCreate } // Save creates the Tag entities in the database. func (tcb *TagCreateBulk) Save(ctx context.Context) ([]*Tag, error) { + if tcb.err != nil { + return nil, tcb.err + } specs := make([]*sqlgraph.CreateSpec, len(tcb.builders)) nodes := make([]*Tag, len(tcb.builders)) mutators := make([]Mutator, len(tcb.builders)) @@ -227,8 +182,8 @@ func (tcb *TagCreateBulk) Save(ctx context.Context) ([]*Tag, error) { return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, tcb.builders[i+1].mutation) } else { @@ -236,7 +191,7 @@ func (tcb *TagCreateBulk) Save(ctx context.Context) ([]*Tag, error) { // Invoke the actual operation on the latest mutation in the chain. if err = sqlgraph.BatchCreate(ctx, tcb.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } } } diff --git a/ent/tag_delete.go b/ent/tag_delete.go index efae6ddc..c77e41d9 100755 --- a/ent/tag_delete.go +++ b/ent/tag_delete.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (td *TagDelete) Where(ps ...predicate.Tag) *TagDelete { // Exec executes the deletion query and returns how many vertices were deleted. func (td *TagDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(td.hooks) == 0 { - affected, err = td.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*TagMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - td.mutation = mutation - affected, err = td.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(td.hooks) - 1; i >= 0; i-- { - if td.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = td.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, td.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, td.sqlExec, td.mutation, td.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (td *TagDelete) ExecX(ctx context.Context) int { } func (td *TagDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: tag.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: tag.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(tag.Table, sqlgraph.NewFieldSpec(tag.FieldID, field.TypeUUID)) if ps := td.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -84,7 +48,12 @@ func (td *TagDelete) sqlExec(ctx context.Context) (int, error) { } } } - return sqlgraph.DeleteNodes(ctx, td.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, td.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + td.mutation.done = true + return affected, err } // TagDeleteOne is the builder for deleting a single Tag entity. @@ -92,6 +61,12 @@ type TagDeleteOne struct { td *TagDelete } +// Where appends a list predicates to the TagDelete builder. +func (tdo *TagDeleteOne) Where(ps ...predicate.Tag) *TagDeleteOne { + tdo.td.mutation.Where(ps...) + return tdo +} + // Exec executes the deletion query. func (tdo *TagDeleteOne) Exec(ctx context.Context) error { n, err := tdo.td.Exec(ctx) @@ -107,5 +82,7 @@ func (tdo *TagDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (tdo *TagDeleteOne) ExecX(ctx context.Context) { - tdo.td.ExecX(ctx) + if err := tdo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/ent/tag_query.go b/ent/tag_query.go index 0fb63fee..b319a55f 100755 --- a/ent/tag_query.go +++ b/ent/tag_query.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "errors" "fmt" "math" @@ -19,13 +18,13 @@ import ( // TagQuery is the builder for querying Tag entities. type TagQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string + ctx *QueryContext + order []tag.OrderOption + inters []Interceptor predicates []predicate.Tag withFKs bool + modifiers []func(*sql.Selector) + loadTotal []func(context.Context, []*Tag) error // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -37,27 +36,27 @@ func (tq *TagQuery) Where(ps ...predicate.Tag) *TagQuery { return tq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (tq *TagQuery) Limit(limit int) *TagQuery { - tq.limit = &limit + tq.ctx.Limit = &limit return tq } -// Offset adds an offset step to the query. +// Offset to start from. func (tq *TagQuery) Offset(offset int) *TagQuery { - tq.offset = &offset + tq.ctx.Offset = &offset return tq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (tq *TagQuery) Unique(unique bool) *TagQuery { - tq.unique = &unique + tq.ctx.Unique = &unique return tq } -// Order adds an order step to the query. -func (tq *TagQuery) Order(o ...OrderFunc) *TagQuery { +// Order specifies how the records should be ordered. +func (tq *TagQuery) Order(o ...tag.OrderOption) *TagQuery { tq.order = append(tq.order, o...) return tq } @@ -65,7 +64,7 @@ func (tq *TagQuery) Order(o ...OrderFunc) *TagQuery { // First returns the first Tag entity from the query. // Returns a *NotFoundError when no Tag was found. func (tq *TagQuery) First(ctx context.Context) (*Tag, error) { - nodes, err := tq.Limit(1).All(ctx) + nodes, err := tq.Limit(1).All(setContextOp(ctx, tq.ctx, "First")) if err != nil { return nil, err } @@ -88,7 +87,7 @@ func (tq *TagQuery) FirstX(ctx context.Context) *Tag { // Returns a *NotFoundError when no Tag ID was found. func (tq *TagQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = tq.Limit(1).IDs(ctx); err != nil { + if ids, err = tq.Limit(1).IDs(setContextOp(ctx, tq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -111,7 +110,7 @@ func (tq *TagQuery) FirstIDX(ctx context.Context) uuid.UUID { // Returns a *NotSingularError when more than one Tag entity is found. // Returns a *NotFoundError when no Tag entities are found. func (tq *TagQuery) Only(ctx context.Context) (*Tag, error) { - nodes, err := tq.Limit(2).All(ctx) + nodes, err := tq.Limit(2).All(setContextOp(ctx, tq.ctx, "Only")) if err != nil { return nil, err } @@ -139,7 +138,7 @@ func (tq *TagQuery) OnlyX(ctx context.Context) *Tag { // Returns a *NotFoundError when no entities are found. func (tq *TagQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = tq.Limit(2).IDs(ctx); err != nil { + if ids, err = tq.Limit(2).IDs(setContextOp(ctx, tq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -164,10 +163,12 @@ func (tq *TagQuery) OnlyIDX(ctx context.Context) uuid.UUID { // All executes the query and returns a list of Tags. func (tq *TagQuery) All(ctx context.Context) ([]*Tag, error) { + ctx = setContextOp(ctx, tq.ctx, "All") if err := tq.prepareQuery(ctx); err != nil { return nil, err } - return tq.sqlAll(ctx) + qr := querierAll[[]*Tag, *TagQuery]() + return withInterceptors[[]*Tag](ctx, tq, qr, tq.inters) } // AllX is like All, but panics if an error occurs. @@ -180,9 +181,12 @@ func (tq *TagQuery) AllX(ctx context.Context) []*Tag { } // IDs executes the query and returns a list of Tag IDs. -func (tq *TagQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { - var ids []uuid.UUID - if err := tq.Select(tag.FieldID).Scan(ctx, &ids); err != nil { +func (tq *TagQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if tq.ctx.Unique == nil && tq.path != nil { + tq.Unique(true) + } + ctx = setContextOp(ctx, tq.ctx, "IDs") + if err = tq.Select(tag.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -199,10 +203,11 @@ func (tq *TagQuery) IDsX(ctx context.Context) []uuid.UUID { // Count returns the count of the given query. func (tq *TagQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, tq.ctx, "Count") if err := tq.prepareQuery(ctx); err != nil { return 0, err } - return tq.sqlCount(ctx) + return withInterceptors[int](ctx, tq, querierCount[*TagQuery](), tq.inters) } // CountX is like Count, but panics if an error occurs. @@ -216,10 +221,15 @@ func (tq *TagQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (tq *TagQuery) Exist(ctx context.Context) (bool, error) { - if err := tq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, tq.ctx, "Exist") + switch _, err := tq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return tq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -239,14 +249,13 @@ func (tq *TagQuery) Clone() *TagQuery { } return &TagQuery{ config: tq.config, - limit: tq.limit, - offset: tq.offset, - order: append([]OrderFunc{}, tq.order...), + ctx: tq.ctx.Clone(), + order: append([]tag.OrderOption{}, tq.order...), + inters: append([]Interceptor{}, tq.inters...), predicates: append([]predicate.Tag{}, tq.predicates...), // clone intermediate query. - sql: tq.sql.Clone(), - path: tq.path, - unique: tq.unique, + sql: tq.sql.Clone(), + path: tq.path, } } @@ -264,17 +273,13 @@ func (tq *TagQuery) Clone() *TagQuery { // GroupBy(tag.FieldUUID). // Aggregate(ent.Count()). // Scan(ctx, &v) -// func (tq *TagQuery) GroupBy(field string, fields ...string) *TagGroupBy { - group := &TagGroupBy{config: tq.config} - group.fields = append([]string{field}, fields...) - group.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := tq.prepareQuery(ctx); err != nil { - return nil, err - } - return tq.sqlQuery(ctx), nil - } - return group + tq.ctx.Fields = append([]string{field}, fields...) + grbuild := &TagGroupBy{build: tq} + grbuild.flds = &tq.ctx.Fields + grbuild.label = tag.Label + grbuild.scan = grbuild.Scan + return grbuild } // Select allows the selection one or more fields/columns for the given query, @@ -289,14 +294,31 @@ func (tq *TagQuery) GroupBy(field string, fields ...string) *TagGroupBy { // client.Tag.Query(). // Select(tag.FieldUUID). // Scan(ctx, &v) -// func (tq *TagQuery) Select(fields ...string) *TagSelect { - tq.fields = append(tq.fields, fields...) - return &TagSelect{TagQuery: tq} + tq.ctx.Fields = append(tq.ctx.Fields, fields...) + sbuild := &TagSelect{TagQuery: tq} + sbuild.label = tag.Label + sbuild.flds, sbuild.scan = &tq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a TagSelect configured with the given aggregations. +func (tq *TagQuery) Aggregate(fns ...AggregateFunc) *TagSelect { + return tq.Select().Aggregate(fns...) } func (tq *TagQuery) prepareQuery(ctx context.Context) error { - for _, f := range tq.fields { + for _, inter := range tq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, tq); err != nil { + return err + } + } + } + for _, f := range tq.ctx.Fields { if !tag.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -311,7 +333,7 @@ func (tq *TagQuery) prepareQuery(ctx context.Context) error { return nil } -func (tq *TagQuery) sqlAll(ctx context.Context) ([]*Tag, error) { +func (tq *TagQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Tag, error) { var ( nodes = []*Tag{} withFKs = tq.withFKs @@ -320,61 +342,55 @@ func (tq *TagQuery) sqlAll(ctx context.Context) ([]*Tag, error) { if withFKs { _spec.Node.Columns = append(_spec.Node.Columns, tag.ForeignKeys...) } - _spec.ScanValues = func(columns []string) ([]interface{}, error) { + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Tag).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { node := &Tag{config: tq.config} nodes = append(nodes, node) - return node.scanValues(columns) - } - _spec.Assign = func(columns []string, values []interface{}) error { - if len(nodes) == 0 { - return fmt.Errorf("ent: Assign called without calling ScanValues") - } - node := nodes[len(nodes)-1] return node.assignValues(columns, values) } + if len(tq.modifiers) > 0 { + _spec.Modifiers = tq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } if err := sqlgraph.QueryNodes(ctx, tq.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } + for i := range tq.loadTotal { + if err := tq.loadTotal[i](ctx, nodes); err != nil { + return nil, err + } + } return nodes, nil } func (tq *TagQuery) sqlCount(ctx context.Context) (int, error) { _spec := tq.querySpec() - _spec.Node.Columns = tq.fields - if len(tq.fields) > 0 { - _spec.Unique = tq.unique != nil && *tq.unique + if len(tq.modifiers) > 0 { + _spec.Modifiers = tq.modifiers } - return sqlgraph.CountNodes(ctx, tq.driver, _spec) -} - -func (tq *TagQuery) sqlExist(ctx context.Context) (bool, error) { - n, err := tq.sqlCount(ctx) - if err != nil { - return false, fmt.Errorf("ent: check existence: %w", err) + _spec.Node.Columns = tq.ctx.Fields + if len(tq.ctx.Fields) > 0 { + _spec.Unique = tq.ctx.Unique != nil && *tq.ctx.Unique } - return n > 0, nil + return sqlgraph.CountNodes(ctx, tq.driver, _spec) } func (tq *TagQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: tag.Table, - Columns: tag.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: tag.FieldID, - }, - }, - From: tq.sql, - Unique: true, - } - if unique := tq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(tag.Table, tag.Columns, sqlgraph.NewFieldSpec(tag.FieldID, field.TypeUUID)) + _spec.From = tq.sql + if unique := tq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if tq.path != nil { + _spec.Unique = true } - if fields := tq.fields; len(fields) > 0 { + if fields := tq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, tag.FieldID) for i := range fields { @@ -390,10 +406,10 @@ func (tq *TagQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := tq.limit; limit != nil { + if limit := tq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := tq.offset; offset != nil { + if offset := tq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := tq.order; len(ps) > 0 { @@ -409,7 +425,7 @@ func (tq *TagQuery) querySpec() *sqlgraph.QuerySpec { func (tq *TagQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(tq.driver.Dialect()) t1 := builder.Table(tag.Table) - columns := tq.fields + columns := tq.ctx.Fields if len(columns) == 0 { columns = tag.Columns } @@ -418,7 +434,7 @@ func (tq *TagQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = tq.sql selector.Select(selector.Columns(columns...)...) } - if tq.unique != nil && *tq.unique { + if tq.ctx.Unique != nil && *tq.ctx.Unique { selector.Distinct() } for _, p := range tq.predicates { @@ -427,12 +443,12 @@ func (tq *TagQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range tq.order { p(selector) } - if offset := tq.offset; offset != nil { + if offset := tq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := tq.limit; limit != nil { + if limit := tq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -440,12 +456,8 @@ func (tq *TagQuery) sqlQuery(ctx context.Context) *sql.Selector { // TagGroupBy is the group-by builder for Tag entities. type TagGroupBy struct { - config - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) + selector + build *TagQuery } // Aggregate adds the given aggregation functions to the group-by query. @@ -454,471 +466,77 @@ func (tgb *TagGroupBy) Aggregate(fns ...AggregateFunc) *TagGroupBy { return tgb } -// Scan applies the group-by query and scans the result into the given value. -func (tgb *TagGroupBy) Scan(ctx context.Context, v interface{}) error { - query, err := tgb.path(ctx) - if err != nil { +// Scan applies the selector query and scans the result into the given value. +func (tgb *TagGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, tgb.build.ctx, "GroupBy") + if err := tgb.build.prepareQuery(ctx); err != nil { return err } - tgb.sql = query - return tgb.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (tgb *TagGroupBy) ScanX(ctx context.Context, v interface{}) { - if err := tgb.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from group-by. -// It is only allowed when executing a group-by query with one field. -func (tgb *TagGroupBy) Strings(ctx context.Context) ([]string, error) { - if len(tgb.fields) > 1 { - return nil, errors.New("ent: TagGroupBy.Strings is not achievable when grouping more than 1 field") - } - var v []string - if err := tgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (tgb *TagGroupBy) StringsX(ctx context.Context) []string { - v, err := tgb.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (tgb *TagGroupBy) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = tgb.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{tag.Label} - default: - err = fmt.Errorf("ent: TagGroupBy.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (tgb *TagGroupBy) StringX(ctx context.Context) string { - v, err := tgb.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from group-by. -// It is only allowed when executing a group-by query with one field. -func (tgb *TagGroupBy) Ints(ctx context.Context) ([]int, error) { - if len(tgb.fields) > 1 { - return nil, errors.New("ent: TagGroupBy.Ints is not achievable when grouping more than 1 field") - } - var v []int - if err := tgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (tgb *TagGroupBy) IntsX(ctx context.Context) []int { - v, err := tgb.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (tgb *TagGroupBy) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = tgb.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{tag.Label} - default: - err = fmt.Errorf("ent: TagGroupBy.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (tgb *TagGroupBy) IntX(ctx context.Context) int { - v, err := tgb.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from group-by. -// It is only allowed when executing a group-by query with one field. -func (tgb *TagGroupBy) Float64s(ctx context.Context) ([]float64, error) { - if len(tgb.fields) > 1 { - return nil, errors.New("ent: TagGroupBy.Float64s is not achievable when grouping more than 1 field") - } - var v []float64 - if err := tgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (tgb *TagGroupBy) Float64sX(ctx context.Context) []float64 { - v, err := tgb.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (tgb *TagGroupBy) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = tgb.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{tag.Label} - default: - err = fmt.Errorf("ent: TagGroupBy.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (tgb *TagGroupBy) Float64X(ctx context.Context) float64 { - v, err := tgb.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from group-by. -// It is only allowed when executing a group-by query with one field. -func (tgb *TagGroupBy) Bools(ctx context.Context) ([]bool, error) { - if len(tgb.fields) > 1 { - return nil, errors.New("ent: TagGroupBy.Bools is not achievable when grouping more than 1 field") - } - var v []bool - if err := tgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (tgb *TagGroupBy) BoolsX(ctx context.Context) []bool { - v, err := tgb.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (tgb *TagGroupBy) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = tgb.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{tag.Label} - default: - err = fmt.Errorf("ent: TagGroupBy.Bools returned %d results when one was expected", len(v)) - } - return + return scanWithInterceptors[*TagQuery, *TagGroupBy](ctx, tgb.build, tgb, tgb.build.inters, v) } -// BoolX is like Bool, but panics if an error occurs. -func (tgb *TagGroupBy) BoolX(ctx context.Context) bool { - v, err := tgb.Bool(ctx) - if err != nil { - panic(err) +func (tgb *TagGroupBy) sqlScan(ctx context.Context, root *TagQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(tgb.fns)) + for _, fn := range tgb.fns { + aggregation = append(aggregation, fn(selector)) } - return v -} - -func (tgb *TagGroupBy) sqlScan(ctx context.Context, v interface{}) error { - for _, f := range tgb.fields { - if !tag.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*tgb.flds)+len(tgb.fns)) + for _, f := range *tgb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := tgb.sqlQuery() + selector.GroupBy(selector.Columns(*tgb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := tgb.driver.Query(ctx, query, args, rows); err != nil { + if err := tgb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (tgb *TagGroupBy) sqlQuery() *sql.Selector { - selector := tgb.sql.Select() - aggregation := make([]string, 0, len(tgb.fns)) - for _, fn := range tgb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(tgb.fields)+len(tgb.fns)) - for _, f := range tgb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(tgb.fields...)...) -} - // TagSelect is the builder for selecting fields of Tag entities. type TagSelect struct { *TagQuery - // intermediate query (i.e. traversal path). - sql *sql.Selector + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (ts *TagSelect) Aggregate(fns ...AggregateFunc) *TagSelect { + ts.fns = append(ts.fns, fns...) + return ts } // Scan applies the selector query and scans the result into the given value. -func (ts *TagSelect) Scan(ctx context.Context, v interface{}) error { +func (ts *TagSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ts.ctx, "Select") if err := ts.prepareQuery(ctx); err != nil { return err } - ts.sql = ts.TagQuery.sqlQuery(ctx) - return ts.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (ts *TagSelect) ScanX(ctx context.Context, v interface{}) { - if err := ts.Scan(ctx, v); err != nil { - panic(err) - } + return scanWithInterceptors[*TagQuery, *TagSelect](ctx, ts.TagQuery, ts, ts.inters, v) } -// Strings returns list of strings from a selector. It is only allowed when selecting one field. -func (ts *TagSelect) Strings(ctx context.Context) ([]string, error) { - if len(ts.fields) > 1 { - return nil, errors.New("ent: TagSelect.Strings is not achievable when selecting more than 1 field") - } - var v []string - if err := ts.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (ts *TagSelect) StringsX(ctx context.Context) []string { - v, err := ts.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a selector. It is only allowed when selecting one field. -func (ts *TagSelect) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = ts.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{tag.Label} - default: - err = fmt.Errorf("ent: TagSelect.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (ts *TagSelect) StringX(ctx context.Context) string { - v, err := ts.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from a selector. It is only allowed when selecting one field. -func (ts *TagSelect) Ints(ctx context.Context) ([]int, error) { - if len(ts.fields) > 1 { - return nil, errors.New("ent: TagSelect.Ints is not achievable when selecting more than 1 field") - } - var v []int - if err := ts.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (ts *TagSelect) IntsX(ctx context.Context) []int { - v, err := ts.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a selector. It is only allowed when selecting one field. -func (ts *TagSelect) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = ts.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{tag.Label} - default: - err = fmt.Errorf("ent: TagSelect.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (ts *TagSelect) IntX(ctx context.Context) int { - v, err := ts.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. -func (ts *TagSelect) Float64s(ctx context.Context) ([]float64, error) { - if len(ts.fields) > 1 { - return nil, errors.New("ent: TagSelect.Float64s is not achievable when selecting more than 1 field") - } - var v []float64 - if err := ts.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (ts *TagSelect) Float64sX(ctx context.Context) []float64 { - v, err := ts.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. -func (ts *TagSelect) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = ts.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{tag.Label} - default: - err = fmt.Errorf("ent: TagSelect.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (ts *TagSelect) Float64X(ctx context.Context) float64 { - v, err := ts.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from a selector. It is only allowed when selecting one field. -func (ts *TagSelect) Bools(ctx context.Context) ([]bool, error) { - if len(ts.fields) > 1 { - return nil, errors.New("ent: TagSelect.Bools is not achievable when selecting more than 1 field") - } - var v []bool - if err := ts.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (ts *TagSelect) BoolsX(ctx context.Context) []bool { - v, err := ts.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a selector. It is only allowed when selecting one field. -func (ts *TagSelect) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = ts.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{tag.Label} - default: - err = fmt.Errorf("ent: TagSelect.Bools returned %d results when one was expected", len(v)) +func (ts *TagSelect) sqlScan(ctx context.Context, root *TagQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(ts.fns)) + for _, fn := range ts.fns { + aggregation = append(aggregation, fn(selector)) } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (ts *TagSelect) BoolX(ctx context.Context) bool { - v, err := ts.Bool(ctx) - if err != nil { - panic(err) + switch n := len(*ts.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) } - return v -} - -func (ts *TagSelect) sqlScan(ctx context.Context, v interface{}) error { rows := &sql.Rows{} - query, args := ts.sql.Query() + query, args := selector.Query() if err := ts.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/ent/tag_update.go b/ent/tag_update.go index 067b741d..5ac18e7e 100755 --- a/ent/tag_update.go +++ b/ent/tag_update.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -34,12 +34,28 @@ func (tu *TagUpdate) SetUUID(u uuid.UUID) *TagUpdate { return tu } +// SetNillableUUID sets the "uuid" field if the given value is not nil. +func (tu *TagUpdate) SetNillableUUID(u *uuid.UUID) *TagUpdate { + if u != nil { + tu.SetUUID(*u) + } + return tu +} + // SetName sets the "name" field. func (tu *TagUpdate) SetName(s string) *TagUpdate { tu.mutation.SetName(s) return tu } +// SetNillableName sets the "name" field if the given value is not nil. +func (tu *TagUpdate) SetNillableName(s *string) *TagUpdate { + if s != nil { + tu.SetName(*s) + } + return tu +} + // SetDescription sets the "description" field. func (tu *TagUpdate) SetDescription(m map[string]string) *TagUpdate { tu.mutation.SetDescription(m) @@ -53,34 +69,7 @@ func (tu *TagUpdate) Mutation() *TagMutation { // Save executes the query and returns the number of nodes affected by the update operation. func (tu *TagUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(tu.hooks) == 0 { - affected, err = tu.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*TagMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - tu.mutation = mutation - affected, err = tu.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(tu.hooks) - 1; i >= 0; i-- { - if tu.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = tu.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, tu.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, tu.sqlSave, tu.mutation, tu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -106,16 +95,7 @@ func (tu *TagUpdate) ExecX(ctx context.Context) { } func (tu *TagUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: tag.Table, - Columns: tag.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: tag.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(tag.Table, tag.Columns, sqlgraph.NewFieldSpec(tag.FieldID, field.TypeUUID)) if ps := tu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -124,34 +104,23 @@ func (tu *TagUpdate) sqlSave(ctx context.Context) (n int, err error) { } } if value, ok := tu.mutation.UUID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Value: value, - Column: tag.FieldUUID, - }) + _spec.SetField(tag.FieldUUID, field.TypeUUID, value) } if value, ok := tu.mutation.Name(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: tag.FieldName, - }) + _spec.SetField(tag.FieldName, field.TypeString, value) } if value, ok := tu.mutation.Description(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: tag.FieldDescription, - }) + _spec.SetField(tag.FieldDescription, field.TypeJSON, value) } if n, err = sqlgraph.UpdateNodes(ctx, tu.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{tag.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return 0, err } + tu.mutation.done = true return n, nil } @@ -169,12 +138,28 @@ func (tuo *TagUpdateOne) SetUUID(u uuid.UUID) *TagUpdateOne { return tuo } +// SetNillableUUID sets the "uuid" field if the given value is not nil. +func (tuo *TagUpdateOne) SetNillableUUID(u *uuid.UUID) *TagUpdateOne { + if u != nil { + tuo.SetUUID(*u) + } + return tuo +} + // SetName sets the "name" field. func (tuo *TagUpdateOne) SetName(s string) *TagUpdateOne { tuo.mutation.SetName(s) return tuo } +// SetNillableName sets the "name" field if the given value is not nil. +func (tuo *TagUpdateOne) SetNillableName(s *string) *TagUpdateOne { + if s != nil { + tuo.SetName(*s) + } + return tuo +} + // SetDescription sets the "description" field. func (tuo *TagUpdateOne) SetDescription(m map[string]string) *TagUpdateOne { tuo.mutation.SetDescription(m) @@ -186,6 +171,12 @@ func (tuo *TagUpdateOne) Mutation() *TagMutation { return tuo.mutation } +// Where appends a list predicates to the TagUpdate builder. +func (tuo *TagUpdateOne) Where(ps ...predicate.Tag) *TagUpdateOne { + tuo.mutation.Where(ps...) + return tuo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (tuo *TagUpdateOne) Select(field string, fields ...string) *TagUpdateOne { @@ -195,34 +186,7 @@ func (tuo *TagUpdateOne) Select(field string, fields ...string) *TagUpdateOne { // Save executes the query and returns the updated Tag entity. func (tuo *TagUpdateOne) Save(ctx context.Context) (*Tag, error) { - var ( - err error - node *Tag - ) - if len(tuo.hooks) == 0 { - node, err = tuo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*TagMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - tuo.mutation = mutation - node, err = tuo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(tuo.hooks) - 1; i >= 0; i-- { - if tuo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = tuo.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, tuo.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, tuo.sqlSave, tuo.mutation, tuo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -248,16 +212,7 @@ func (tuo *TagUpdateOne) ExecX(ctx context.Context) { } func (tuo *TagUpdateOne) sqlSave(ctx context.Context) (_node *Tag, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: tag.Table, - Columns: tag.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: tag.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(tag.Table, tag.Columns, sqlgraph.NewFieldSpec(tag.FieldID, field.TypeUUID)) id, ok := tuo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Tag.id" for update`)} @@ -283,25 +238,13 @@ func (tuo *TagUpdateOne) sqlSave(ctx context.Context) (_node *Tag, err error) { } } if value, ok := tuo.mutation.UUID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Value: value, - Column: tag.FieldUUID, - }) + _spec.SetField(tag.FieldUUID, field.TypeUUID, value) } if value, ok := tuo.mutation.Name(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: tag.FieldName, - }) + _spec.SetField(tag.FieldName, field.TypeString, value) } if value, ok := tuo.mutation.Description(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: tag.FieldDescription, - }) + _spec.SetField(tag.FieldDescription, field.TypeJSON, value) } _node = &Tag{config: tuo.config} _spec.Assign = _node.assignValues @@ -310,9 +253,10 @@ func (tuo *TagUpdateOne) sqlSave(ctx context.Context) (_node *Tag, err error) { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{tag.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } + tuo.mutation.done = true return _node, nil } diff --git a/ent/team.go b/ent/team.go index 50a631b7..7e5db572 100755 --- a/ent/team.go +++ b/ent/team.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -7,6 +7,7 @@ import ( "fmt" "strings" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/gen0cide/laforge/ent/build" "github.com/gen0cide/laforge/ent/plan" @@ -28,6 +29,7 @@ type Team struct { // The values are being populated by the TeamQuery when eager-loading is set. Edges TeamEdges `json:"edges"` + // vvvvvvvvvvvv CUSTOM vvvvvvvvvvvv // Edges put into the main struct to be loaded via hcl // TeamToBuild holds the value of the TeamToBuild edge. HCLTeamToBuild *Build `json:"TeamToBuild,omitempty"` @@ -37,9 +39,10 @@ type Team struct { HCLTeamToProvisionedNetwork []*ProvisionedNetwork `json:"TeamToProvisionedNetwork,omitempty"` // TeamToPlan holds the value of the TeamToPlan edge. HCLTeamToPlan *Plan `json:"TeamToPlan,omitempty"` - // + // ^^^^^^^^^^^^ CUSTOM ^^^^^^^^^^^^^ plan_plan_to_team *uuid.UUID team_team_to_build *uuid.UUID + selectValues sql.SelectValues } // TeamEdges holds the relations/edges for other nodes in the graph. @@ -55,6 +58,10 @@ type TeamEdges struct { // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. loadedTypes [4]bool + // totalCount holds the count of the edges above. + totalCount [4]map[string]int + + namedTeamToProvisionedNetwork map[string][]*ProvisionedNetwork } // TeamToBuildOrErr returns the TeamToBuild value or an error if the edge @@ -62,8 +69,7 @@ type TeamEdges struct { func (e TeamEdges) TeamToBuildOrErr() (*Build, error) { if e.loadedTypes[0] { if e.TeamToBuild == nil { - // The edge TeamToBuild was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: build.Label} } return e.TeamToBuild, nil @@ -76,8 +82,7 @@ func (e TeamEdges) TeamToBuildOrErr() (*Build, error) { func (e TeamEdges) TeamToStatusOrErr() (*Status, error) { if e.loadedTypes[1] { if e.TeamToStatus == nil { - // The edge TeamToStatus was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: status.Label} } return e.TeamToStatus, nil @@ -99,8 +104,7 @@ func (e TeamEdges) TeamToProvisionedNetworkOrErr() ([]*ProvisionedNetwork, error func (e TeamEdges) TeamToPlanOrErr() (*Plan, error) { if e.loadedTypes[3] { if e.TeamToPlan == nil { - // The edge TeamToPlan was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: plan.Label} } return e.TeamToPlan, nil @@ -109,8 +113,8 @@ func (e TeamEdges) TeamToPlanOrErr() (*Plan, error) { } // scanValues returns the types for scanning values from sql.Rows. -func (*Team) scanValues(columns []string) ([]interface{}, error) { - values := make([]interface{}, len(columns)) +func (*Team) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) for i := range columns { switch columns[i] { case team.FieldVars: @@ -124,7 +128,7 @@ func (*Team) scanValues(columns []string) ([]interface{}, error) { case team.ForeignKeys[1]: // team_team_to_build values[i] = &sql.NullScanner{S: new(uuid.UUID)} default: - return nil, fmt.Errorf("unexpected column %q for type Team", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -132,7 +136,7 @@ func (*Team) scanValues(columns []string) ([]interface{}, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the Team fields. -func (t *Team) assignValues(columns []string, values []interface{}) error { +func (t *Team) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -172,46 +176,54 @@ func (t *Team) assignValues(columns []string, values []interface{}) error { t.team_team_to_build = new(uuid.UUID) *t.team_team_to_build = *value.S.(*uuid.UUID) } + default: + t.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the Team. +// This includes values selected through modifiers, order, etc. +func (t *Team) Value(name string) (ent.Value, error) { + return t.selectValues.Get(name) +} + // QueryTeamToBuild queries the "TeamToBuild" edge of the Team entity. func (t *Team) QueryTeamToBuild() *BuildQuery { - return (&TeamClient{config: t.config}).QueryTeamToBuild(t) + return NewTeamClient(t.config).QueryTeamToBuild(t) } // QueryTeamToStatus queries the "TeamToStatus" edge of the Team entity. func (t *Team) QueryTeamToStatus() *StatusQuery { - return (&TeamClient{config: t.config}).QueryTeamToStatus(t) + return NewTeamClient(t.config).QueryTeamToStatus(t) } // QueryTeamToProvisionedNetwork queries the "TeamToProvisionedNetwork" edge of the Team entity. func (t *Team) QueryTeamToProvisionedNetwork() *ProvisionedNetworkQuery { - return (&TeamClient{config: t.config}).QueryTeamToProvisionedNetwork(t) + return NewTeamClient(t.config).QueryTeamToProvisionedNetwork(t) } // QueryTeamToPlan queries the "TeamToPlan" edge of the Team entity. func (t *Team) QueryTeamToPlan() *PlanQuery { - return (&TeamClient{config: t.config}).QueryTeamToPlan(t) + return NewTeamClient(t.config).QueryTeamToPlan(t) } // Update returns a builder for updating this Team. // Note that you need to call Team.Unwrap() before calling this method if this Team // was returned from a transaction, and the transaction was committed or rolled back. func (t *Team) Update() *TeamUpdateOne { - return (&TeamClient{config: t.config}).UpdateOne(t) + return NewTeamClient(t.config).UpdateOne(t) } // Unwrap unwraps the Team entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. func (t *Team) Unwrap() *Team { - tx, ok := t.config.driver.(*txDriver) + _tx, ok := t.config.driver.(*txDriver) if !ok { panic("ent: Team is not a transactional entity") } - t.config.driver = tx.drv + t.config.driver = _tx.drv return t } @@ -219,20 +231,39 @@ func (t *Team) Unwrap() *Team { func (t *Team) String() string { var builder strings.Builder builder.WriteString("Team(") - builder.WriteString(fmt.Sprintf("id=%v", t.ID)) - builder.WriteString(", team_number=") + builder.WriteString(fmt.Sprintf("id=%v, ", t.ID)) + builder.WriteString("team_number=") builder.WriteString(fmt.Sprintf("%v", t.TeamNumber)) - builder.WriteString(", vars=") + builder.WriteString(", ") + builder.WriteString("vars=") builder.WriteString(fmt.Sprintf("%v", t.Vars)) builder.WriteByte(')') return builder.String() } -// Teams is a parsable slice of Team. -type Teams []*Team +// NamedTeamToProvisionedNetwork returns the TeamToProvisionedNetwork named value or an error if the edge was not +// loaded in eager-loading with this name. +func (t *Team) NamedTeamToProvisionedNetwork(name string) ([]*ProvisionedNetwork, error) { + if t.Edges.namedTeamToProvisionedNetwork == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := t.Edges.namedTeamToProvisionedNetwork[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} -func (t Teams) config(cfg config) { - for _i := range t { - t[_i].config = cfg +func (t *Team) appendNamedTeamToProvisionedNetwork(name string, edges ...*ProvisionedNetwork) { + if t.Edges.namedTeamToProvisionedNetwork == nil { + t.Edges.namedTeamToProvisionedNetwork = make(map[string][]*ProvisionedNetwork) + } + if len(edges) == 0 { + t.Edges.namedTeamToProvisionedNetwork[name] = []*ProvisionedNetwork{} + } else { + t.Edges.namedTeamToProvisionedNetwork[name] = append(t.Edges.namedTeamToProvisionedNetwork[name], edges...) } } + +// Teams is a parsable slice of Team. +type Teams []*Team diff --git a/ent/team/team.go b/ent/team/team.go index dba9277f..4b5289ce 100755 --- a/ent/team/team.go +++ b/ent/team/team.go @@ -1,8 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package team import ( + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -88,3 +90,79 @@ var ( // DefaultID holds the default value on creation for the "id" field. DefaultID func() uuid.UUID ) + +// OrderOption defines the ordering options for the Team queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByTeamNumber orders the results by the team_number field. +func ByTeamNumber(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldTeamNumber, opts...).ToFunc() +} + +// ByTeamToBuildField orders the results by TeamToBuild field. +func ByTeamToBuildField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newTeamToBuildStep(), sql.OrderByField(field, opts...)) + } +} + +// ByTeamToStatusField orders the results by TeamToStatus field. +func ByTeamToStatusField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newTeamToStatusStep(), sql.OrderByField(field, opts...)) + } +} + +// ByTeamToProvisionedNetworkCount orders the results by TeamToProvisionedNetwork count. +func ByTeamToProvisionedNetworkCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newTeamToProvisionedNetworkStep(), opts...) + } +} + +// ByTeamToProvisionedNetwork orders the results by TeamToProvisionedNetwork terms. +func ByTeamToProvisionedNetwork(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newTeamToProvisionedNetworkStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByTeamToPlanField orders the results by TeamToPlan field. +func ByTeamToPlanField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newTeamToPlanStep(), sql.OrderByField(field, opts...)) + } +} +func newTeamToBuildStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(TeamToBuildInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, TeamToBuildTable, TeamToBuildColumn), + ) +} +func newTeamToStatusStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(TeamToStatusInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2O, false, TeamToStatusTable, TeamToStatusColumn), + ) +} +func newTeamToProvisionedNetworkStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(TeamToProvisionedNetworkInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, TeamToProvisionedNetworkTable, TeamToProvisionedNetworkColumn), + ) +} +func newTeamToPlanStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(TeamToPlanInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2O, true, TeamToPlanTable, TeamToPlanColumn), + ) +} diff --git a/ent/team/where.go b/ent/team/where.go index 21ae6027..2beca98c 100755 --- a/ent/team/where.go +++ b/ent/team/where.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package team @@ -11,168 +11,92 @@ import ( // ID filters vertices based on their ID field. func ID(id uuid.UUID) predicate.Team { - return predicate.Team(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Team(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id uuid.UUID) predicate.Team { - return predicate.Team(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Team(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id uuid.UUID) predicate.Team { - return predicate.Team(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.Team(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...uuid.UUID) predicate.Team { - return predicate.Team(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.Team(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...uuid.UUID) predicate.Team { - return predicate.Team(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.Team(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id uuid.UUID) predicate.Team { - return predicate.Team(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.Team(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id uuid.UUID) predicate.Team { - return predicate.Team(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.Team(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id uuid.UUID) predicate.Team { - return predicate.Team(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.Team(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id uuid.UUID) predicate.Team { - return predicate.Team(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.Team(sql.FieldLTE(FieldID, id)) } // TeamNumber applies equality check predicate on the "team_number" field. It's identical to TeamNumberEQ. func TeamNumber(v int) predicate.Team { - return predicate.Team(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldTeamNumber), v)) - }) + return predicate.Team(sql.FieldEQ(FieldTeamNumber, v)) } // TeamNumberEQ applies the EQ predicate on the "team_number" field. func TeamNumberEQ(v int) predicate.Team { - return predicate.Team(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldTeamNumber), v)) - }) + return predicate.Team(sql.FieldEQ(FieldTeamNumber, v)) } // TeamNumberNEQ applies the NEQ predicate on the "team_number" field. func TeamNumberNEQ(v int) predicate.Team { - return predicate.Team(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldTeamNumber), v)) - }) + return predicate.Team(sql.FieldNEQ(FieldTeamNumber, v)) } // TeamNumberIn applies the In predicate on the "team_number" field. func TeamNumberIn(vs ...int) predicate.Team { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Team(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldTeamNumber), v...)) - }) + return predicate.Team(sql.FieldIn(FieldTeamNumber, vs...)) } // TeamNumberNotIn applies the NotIn predicate on the "team_number" field. func TeamNumberNotIn(vs ...int) predicate.Team { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Team(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldTeamNumber), v...)) - }) + return predicate.Team(sql.FieldNotIn(FieldTeamNumber, vs...)) } // TeamNumberGT applies the GT predicate on the "team_number" field. func TeamNumberGT(v int) predicate.Team { - return predicate.Team(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldTeamNumber), v)) - }) + return predicate.Team(sql.FieldGT(FieldTeamNumber, v)) } // TeamNumberGTE applies the GTE predicate on the "team_number" field. func TeamNumberGTE(v int) predicate.Team { - return predicate.Team(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldTeamNumber), v)) - }) + return predicate.Team(sql.FieldGTE(FieldTeamNumber, v)) } // TeamNumberLT applies the LT predicate on the "team_number" field. func TeamNumberLT(v int) predicate.Team { - return predicate.Team(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldTeamNumber), v)) - }) + return predicate.Team(sql.FieldLT(FieldTeamNumber, v)) } // TeamNumberLTE applies the LTE predicate on the "team_number" field. func TeamNumberLTE(v int) predicate.Team { - return predicate.Team(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldTeamNumber), v)) - }) + return predicate.Team(sql.FieldLTE(FieldTeamNumber, v)) } // HasTeamToBuild applies the HasEdge predicate on the "TeamToBuild" edge. @@ -180,7 +104,6 @@ func HasTeamToBuild() predicate.Team { return predicate.Team(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(TeamToBuildTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, false, TeamToBuildTable, TeamToBuildColumn), ) sqlgraph.HasNeighbors(s, step) @@ -190,11 +113,7 @@ func HasTeamToBuild() predicate.Team { // HasTeamToBuildWith applies the HasEdge predicate on the "TeamToBuild" edge with a given conditions (other predicates). func HasTeamToBuildWith(preds ...predicate.Build) predicate.Team { return predicate.Team(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(TeamToBuildInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, TeamToBuildTable, TeamToBuildColumn), - ) + step := newTeamToBuildStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -208,7 +127,6 @@ func HasTeamToStatus() predicate.Team { return predicate.Team(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(TeamToStatusTable, FieldID), sqlgraph.Edge(sqlgraph.O2O, false, TeamToStatusTable, TeamToStatusColumn), ) sqlgraph.HasNeighbors(s, step) @@ -218,11 +136,7 @@ func HasTeamToStatus() predicate.Team { // HasTeamToStatusWith applies the HasEdge predicate on the "TeamToStatus" edge with a given conditions (other predicates). func HasTeamToStatusWith(preds ...predicate.Status) predicate.Team { return predicate.Team(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(TeamToStatusInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2O, false, TeamToStatusTable, TeamToStatusColumn), - ) + step := newTeamToStatusStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -236,7 +150,6 @@ func HasTeamToProvisionedNetwork() predicate.Team { return predicate.Team(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(TeamToProvisionedNetworkTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, true, TeamToProvisionedNetworkTable, TeamToProvisionedNetworkColumn), ) sqlgraph.HasNeighbors(s, step) @@ -246,11 +159,7 @@ func HasTeamToProvisionedNetwork() predicate.Team { // HasTeamToProvisionedNetworkWith applies the HasEdge predicate on the "TeamToProvisionedNetwork" edge with a given conditions (other predicates). func HasTeamToProvisionedNetworkWith(preds ...predicate.ProvisionedNetwork) predicate.Team { return predicate.Team(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(TeamToProvisionedNetworkInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, true, TeamToProvisionedNetworkTable, TeamToProvisionedNetworkColumn), - ) + step := newTeamToProvisionedNetworkStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -264,7 +173,6 @@ func HasTeamToPlan() predicate.Team { return predicate.Team(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(TeamToPlanTable, FieldID), sqlgraph.Edge(sqlgraph.O2O, true, TeamToPlanTable, TeamToPlanColumn), ) sqlgraph.HasNeighbors(s, step) @@ -274,11 +182,7 @@ func HasTeamToPlan() predicate.Team { // HasTeamToPlanWith applies the HasEdge predicate on the "TeamToPlan" edge with a given conditions (other predicates). func HasTeamToPlanWith(preds ...predicate.Plan) predicate.Team { return predicate.Team(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(TeamToPlanInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2O, true, TeamToPlanTable, TeamToPlanColumn), - ) + step := newTeamToPlanStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -289,32 +193,15 @@ func HasTeamToPlanWith(preds ...predicate.Plan) predicate.Team { // And groups predicates with the AND operator between them. func And(predicates ...predicate.Team) predicate.Team { - return predicate.Team(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Team(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.Team) predicate.Team { - return predicate.Team(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Team(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.Team) predicate.Team { - return predicate.Team(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.Team(sql.NotPredicates(p)) } diff --git a/ent/team_create.go b/ent/team_create.go index f5301507..2beb523a 100755 --- a/ent/team_create.go +++ b/ent/team_create.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -121,44 +121,8 @@ func (tc *TeamCreate) Mutation() *TeamMutation { // Save creates the Team in the database. func (tc *TeamCreate) Save(ctx context.Context) (*Team, error) { - var ( - err error - node *Team - ) tc.defaults() - if len(tc.hooks) == 0 { - if err = tc.check(); err != nil { - return nil, err - } - node, err = tc.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*TeamMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = tc.check(); err != nil { - return nil, err - } - tc.mutation = mutation - if node, err = tc.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(tc.hooks) - 1; i >= 0; i-- { - if tc.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = tc.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, tc.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, tc.sqlSave, tc.mutation, tc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -206,10 +170,13 @@ func (tc *TeamCreate) check() error { } func (tc *TeamCreate) sqlSave(ctx context.Context) (*Team, error) { + if err := tc.check(); err != nil { + return nil, err + } _node, _spec := tc.createSpec() if err := sqlgraph.CreateNode(ctx, tc.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } @@ -220,38 +187,26 @@ func (tc *TeamCreate) sqlSave(ctx context.Context) (*Team, error) { return nil, err } } + tc.mutation.id = &_node.ID + tc.mutation.done = true return _node, nil } func (tc *TeamCreate) createSpec() (*Team, *sqlgraph.CreateSpec) { var ( _node = &Team{config: tc.config} - _spec = &sqlgraph.CreateSpec{ - Table: team.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: team.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(team.Table, sqlgraph.NewFieldSpec(team.FieldID, field.TypeUUID)) ) if id, ok := tc.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } if value, ok := tc.mutation.TeamNumber(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: team.FieldTeamNumber, - }) + _spec.SetField(team.FieldTeamNumber, field.TypeInt, value) _node.TeamNumber = value } if value, ok := tc.mutation.Vars(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: team.FieldVars, - }) + _spec.SetField(team.FieldVars, field.TypeJSON, value) _node.Vars = value } if nodes := tc.mutation.TeamToBuildIDs(); len(nodes) > 0 { @@ -262,10 +217,7 @@ func (tc *TeamCreate) createSpec() (*Team, *sqlgraph.CreateSpec) { Columns: []string{team.TeamToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -282,10 +234,7 @@ func (tc *TeamCreate) createSpec() (*Team, *sqlgraph.CreateSpec) { Columns: []string{team.TeamToStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -301,10 +250,7 @@ func (tc *TeamCreate) createSpec() (*Team, *sqlgraph.CreateSpec) { Columns: []string{team.TeamToProvisionedNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionednetwork.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -320,10 +266,7 @@ func (tc *TeamCreate) createSpec() (*Team, *sqlgraph.CreateSpec) { Columns: []string{team.TeamToPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -338,11 +281,15 @@ func (tc *TeamCreate) createSpec() (*Team, *sqlgraph.CreateSpec) { // TeamCreateBulk is the builder for creating many Team entities in bulk. type TeamCreateBulk struct { config + err error builders []*TeamCreate } // Save creates the Team entities in the database. func (tcb *TeamCreateBulk) Save(ctx context.Context) ([]*Team, error) { + if tcb.err != nil { + return nil, tcb.err + } specs := make([]*sqlgraph.CreateSpec, len(tcb.builders)) nodes := make([]*Team, len(tcb.builders)) mutators := make([]Mutator, len(tcb.builders)) @@ -359,8 +306,8 @@ func (tcb *TeamCreateBulk) Save(ctx context.Context) ([]*Team, error) { return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, tcb.builders[i+1].mutation) } else { @@ -368,7 +315,7 @@ func (tcb *TeamCreateBulk) Save(ctx context.Context) ([]*Team, error) { // Invoke the actual operation on the latest mutation in the chain. if err = sqlgraph.BatchCreate(ctx, tcb.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } } } diff --git a/ent/team_delete.go b/ent/team_delete.go index 9574bad5..ce4be9c9 100755 --- a/ent/team_delete.go +++ b/ent/team_delete.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (td *TeamDelete) Where(ps ...predicate.Team) *TeamDelete { // Exec executes the deletion query and returns how many vertices were deleted. func (td *TeamDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(td.hooks) == 0 { - affected, err = td.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*TeamMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - td.mutation = mutation - affected, err = td.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(td.hooks) - 1; i >= 0; i-- { - if td.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = td.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, td.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, td.sqlExec, td.mutation, td.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (td *TeamDelete) ExecX(ctx context.Context) int { } func (td *TeamDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: team.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: team.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(team.Table, sqlgraph.NewFieldSpec(team.FieldID, field.TypeUUID)) if ps := td.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -84,7 +48,12 @@ func (td *TeamDelete) sqlExec(ctx context.Context) (int, error) { } } } - return sqlgraph.DeleteNodes(ctx, td.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, td.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + td.mutation.done = true + return affected, err } // TeamDeleteOne is the builder for deleting a single Team entity. @@ -92,6 +61,12 @@ type TeamDeleteOne struct { td *TeamDelete } +// Where appends a list predicates to the TeamDelete builder. +func (tdo *TeamDeleteOne) Where(ps ...predicate.Team) *TeamDeleteOne { + tdo.td.mutation.Where(ps...) + return tdo +} + // Exec executes the deletion query. func (tdo *TeamDeleteOne) Exec(ctx context.Context) error { n, err := tdo.td.Exec(ctx) @@ -107,5 +82,7 @@ func (tdo *TeamDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (tdo *TeamDeleteOne) ExecX(ctx context.Context) { - tdo.td.ExecX(ctx) + if err := tdo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/ent/team_query.go b/ent/team_query.go index 2c679a5e..e12c2b2b 100755 --- a/ent/team_query.go +++ b/ent/team_query.go @@ -1,11 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" "database/sql/driver" - "errors" "fmt" "math" @@ -24,18 +23,18 @@ import ( // TeamQuery is the builder for querying Team entities. type TeamQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string - predicates []predicate.Team - // eager-loading edges. - withTeamToBuild *BuildQuery - withTeamToStatus *StatusQuery - withTeamToProvisionedNetwork *ProvisionedNetworkQuery - withTeamToPlan *PlanQuery - withFKs bool + ctx *QueryContext + order []team.OrderOption + inters []Interceptor + predicates []predicate.Team + withTeamToBuild *BuildQuery + withTeamToStatus *StatusQuery + withTeamToProvisionedNetwork *ProvisionedNetworkQuery + withTeamToPlan *PlanQuery + withFKs bool + modifiers []func(*sql.Selector) + loadTotal []func(context.Context, []*Team) error + withNamedTeamToProvisionedNetwork map[string]*ProvisionedNetworkQuery // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -47,34 +46,34 @@ func (tq *TeamQuery) Where(ps ...predicate.Team) *TeamQuery { return tq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (tq *TeamQuery) Limit(limit int) *TeamQuery { - tq.limit = &limit + tq.ctx.Limit = &limit return tq } -// Offset adds an offset step to the query. +// Offset to start from. func (tq *TeamQuery) Offset(offset int) *TeamQuery { - tq.offset = &offset + tq.ctx.Offset = &offset return tq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (tq *TeamQuery) Unique(unique bool) *TeamQuery { - tq.unique = &unique + tq.ctx.Unique = &unique return tq } -// Order adds an order step to the query. -func (tq *TeamQuery) Order(o ...OrderFunc) *TeamQuery { +// Order specifies how the records should be ordered. +func (tq *TeamQuery) Order(o ...team.OrderOption) *TeamQuery { tq.order = append(tq.order, o...) return tq } // QueryTeamToBuild chains the current query on the "TeamToBuild" edge. func (tq *TeamQuery) QueryTeamToBuild() *BuildQuery { - query := &BuildQuery{config: tq.config} + query := (&BuildClient{config: tq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := tq.prepareQuery(ctx); err != nil { return nil, err @@ -96,7 +95,7 @@ func (tq *TeamQuery) QueryTeamToBuild() *BuildQuery { // QueryTeamToStatus chains the current query on the "TeamToStatus" edge. func (tq *TeamQuery) QueryTeamToStatus() *StatusQuery { - query := &StatusQuery{config: tq.config} + query := (&StatusClient{config: tq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := tq.prepareQuery(ctx); err != nil { return nil, err @@ -118,7 +117,7 @@ func (tq *TeamQuery) QueryTeamToStatus() *StatusQuery { // QueryTeamToProvisionedNetwork chains the current query on the "TeamToProvisionedNetwork" edge. func (tq *TeamQuery) QueryTeamToProvisionedNetwork() *ProvisionedNetworkQuery { - query := &ProvisionedNetworkQuery{config: tq.config} + query := (&ProvisionedNetworkClient{config: tq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := tq.prepareQuery(ctx); err != nil { return nil, err @@ -140,7 +139,7 @@ func (tq *TeamQuery) QueryTeamToProvisionedNetwork() *ProvisionedNetworkQuery { // QueryTeamToPlan chains the current query on the "TeamToPlan" edge. func (tq *TeamQuery) QueryTeamToPlan() *PlanQuery { - query := &PlanQuery{config: tq.config} + query := (&PlanClient{config: tq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := tq.prepareQuery(ctx); err != nil { return nil, err @@ -163,7 +162,7 @@ func (tq *TeamQuery) QueryTeamToPlan() *PlanQuery { // First returns the first Team entity from the query. // Returns a *NotFoundError when no Team was found. func (tq *TeamQuery) First(ctx context.Context) (*Team, error) { - nodes, err := tq.Limit(1).All(ctx) + nodes, err := tq.Limit(1).All(setContextOp(ctx, tq.ctx, "First")) if err != nil { return nil, err } @@ -186,7 +185,7 @@ func (tq *TeamQuery) FirstX(ctx context.Context) *Team { // Returns a *NotFoundError when no Team ID was found. func (tq *TeamQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = tq.Limit(1).IDs(ctx); err != nil { + if ids, err = tq.Limit(1).IDs(setContextOp(ctx, tq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -209,7 +208,7 @@ func (tq *TeamQuery) FirstIDX(ctx context.Context) uuid.UUID { // Returns a *NotSingularError when more than one Team entity is found. // Returns a *NotFoundError when no Team entities are found. func (tq *TeamQuery) Only(ctx context.Context) (*Team, error) { - nodes, err := tq.Limit(2).All(ctx) + nodes, err := tq.Limit(2).All(setContextOp(ctx, tq.ctx, "Only")) if err != nil { return nil, err } @@ -237,7 +236,7 @@ func (tq *TeamQuery) OnlyX(ctx context.Context) *Team { // Returns a *NotFoundError when no entities are found. func (tq *TeamQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = tq.Limit(2).IDs(ctx); err != nil { + if ids, err = tq.Limit(2).IDs(setContextOp(ctx, tq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -262,10 +261,12 @@ func (tq *TeamQuery) OnlyIDX(ctx context.Context) uuid.UUID { // All executes the query and returns a list of Teams. func (tq *TeamQuery) All(ctx context.Context) ([]*Team, error) { + ctx = setContextOp(ctx, tq.ctx, "All") if err := tq.prepareQuery(ctx); err != nil { return nil, err } - return tq.sqlAll(ctx) + qr := querierAll[[]*Team, *TeamQuery]() + return withInterceptors[[]*Team](ctx, tq, qr, tq.inters) } // AllX is like All, but panics if an error occurs. @@ -278,9 +279,12 @@ func (tq *TeamQuery) AllX(ctx context.Context) []*Team { } // IDs executes the query and returns a list of Team IDs. -func (tq *TeamQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { - var ids []uuid.UUID - if err := tq.Select(team.FieldID).Scan(ctx, &ids); err != nil { +func (tq *TeamQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if tq.ctx.Unique == nil && tq.path != nil { + tq.Unique(true) + } + ctx = setContextOp(ctx, tq.ctx, "IDs") + if err = tq.Select(team.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -297,10 +301,11 @@ func (tq *TeamQuery) IDsX(ctx context.Context) []uuid.UUID { // Count returns the count of the given query. func (tq *TeamQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, tq.ctx, "Count") if err := tq.prepareQuery(ctx); err != nil { return 0, err } - return tq.sqlCount(ctx) + return withInterceptors[int](ctx, tq, querierCount[*TeamQuery](), tq.inters) } // CountX is like Count, but panics if an error occurs. @@ -314,10 +319,15 @@ func (tq *TeamQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (tq *TeamQuery) Exist(ctx context.Context) (bool, error) { - if err := tq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, tq.ctx, "Exist") + switch _, err := tq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return tq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -337,25 +347,24 @@ func (tq *TeamQuery) Clone() *TeamQuery { } return &TeamQuery{ config: tq.config, - limit: tq.limit, - offset: tq.offset, - order: append([]OrderFunc{}, tq.order...), + ctx: tq.ctx.Clone(), + order: append([]team.OrderOption{}, tq.order...), + inters: append([]Interceptor{}, tq.inters...), predicates: append([]predicate.Team{}, tq.predicates...), withTeamToBuild: tq.withTeamToBuild.Clone(), withTeamToStatus: tq.withTeamToStatus.Clone(), withTeamToProvisionedNetwork: tq.withTeamToProvisionedNetwork.Clone(), withTeamToPlan: tq.withTeamToPlan.Clone(), // clone intermediate query. - sql: tq.sql.Clone(), - path: tq.path, - unique: tq.unique, + sql: tq.sql.Clone(), + path: tq.path, } } // WithTeamToBuild tells the query-builder to eager-load the nodes that are connected to // the "TeamToBuild" edge. The optional arguments are used to configure the query builder of the edge. func (tq *TeamQuery) WithTeamToBuild(opts ...func(*BuildQuery)) *TeamQuery { - query := &BuildQuery{config: tq.config} + query := (&BuildClient{config: tq.config}).Query() for _, opt := range opts { opt(query) } @@ -366,7 +375,7 @@ func (tq *TeamQuery) WithTeamToBuild(opts ...func(*BuildQuery)) *TeamQuery { // WithTeamToStatus tells the query-builder to eager-load the nodes that are connected to // the "TeamToStatus" edge. The optional arguments are used to configure the query builder of the edge. func (tq *TeamQuery) WithTeamToStatus(opts ...func(*StatusQuery)) *TeamQuery { - query := &StatusQuery{config: tq.config} + query := (&StatusClient{config: tq.config}).Query() for _, opt := range opts { opt(query) } @@ -377,7 +386,7 @@ func (tq *TeamQuery) WithTeamToStatus(opts ...func(*StatusQuery)) *TeamQuery { // WithTeamToProvisionedNetwork tells the query-builder to eager-load the nodes that are connected to // the "TeamToProvisionedNetwork" edge. The optional arguments are used to configure the query builder of the edge. func (tq *TeamQuery) WithTeamToProvisionedNetwork(opts ...func(*ProvisionedNetworkQuery)) *TeamQuery { - query := &ProvisionedNetworkQuery{config: tq.config} + query := (&ProvisionedNetworkClient{config: tq.config}).Query() for _, opt := range opts { opt(query) } @@ -388,7 +397,7 @@ func (tq *TeamQuery) WithTeamToProvisionedNetwork(opts ...func(*ProvisionedNetwo // WithTeamToPlan tells the query-builder to eager-load the nodes that are connected to // the "TeamToPlan" edge. The optional arguments are used to configure the query builder of the edge. func (tq *TeamQuery) WithTeamToPlan(opts ...func(*PlanQuery)) *TeamQuery { - query := &PlanQuery{config: tq.config} + query := (&PlanClient{config: tq.config}).Query() for _, opt := range opts { opt(query) } @@ -410,17 +419,13 @@ func (tq *TeamQuery) WithTeamToPlan(opts ...func(*PlanQuery)) *TeamQuery { // GroupBy(team.FieldTeamNumber). // Aggregate(ent.Count()). // Scan(ctx, &v) -// func (tq *TeamQuery) GroupBy(field string, fields ...string) *TeamGroupBy { - group := &TeamGroupBy{config: tq.config} - group.fields = append([]string{field}, fields...) - group.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := tq.prepareQuery(ctx); err != nil { - return nil, err - } - return tq.sqlQuery(ctx), nil - } - return group + tq.ctx.Fields = append([]string{field}, fields...) + grbuild := &TeamGroupBy{build: tq} + grbuild.flds = &tq.ctx.Fields + grbuild.label = team.Label + grbuild.scan = grbuild.Scan + return grbuild } // Select allows the selection one or more fields/columns for the given query, @@ -435,14 +440,31 @@ func (tq *TeamQuery) GroupBy(field string, fields ...string) *TeamGroupBy { // client.Team.Query(). // Select(team.FieldTeamNumber). // Scan(ctx, &v) -// func (tq *TeamQuery) Select(fields ...string) *TeamSelect { - tq.fields = append(tq.fields, fields...) - return &TeamSelect{TeamQuery: tq} + tq.ctx.Fields = append(tq.ctx.Fields, fields...) + sbuild := &TeamSelect{TeamQuery: tq} + sbuild.label = team.Label + sbuild.flds, sbuild.scan = &tq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a TeamSelect configured with the given aggregations. +func (tq *TeamQuery) Aggregate(fns ...AggregateFunc) *TeamSelect { + return tq.Select().Aggregate(fns...) } func (tq *TeamQuery) prepareQuery(ctx context.Context) error { - for _, f := range tq.fields { + for _, inter := range tq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, tq); err != nil { + return err + } + } + } + for _, f := range tq.ctx.Fields { if !team.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -457,7 +479,7 @@ func (tq *TeamQuery) prepareQuery(ctx context.Context) error { return nil } -func (tq *TeamQuery) sqlAll(ctx context.Context) ([]*Team, error) { +func (tq *TeamQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Team, error) { var ( nodes = []*Team{} withFKs = tq.withFKs @@ -475,178 +497,214 @@ func (tq *TeamQuery) sqlAll(ctx context.Context) ([]*Team, error) { if withFKs { _spec.Node.Columns = append(_spec.Node.Columns, team.ForeignKeys...) } - _spec.ScanValues = func(columns []string) ([]interface{}, error) { + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Team).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { node := &Team{config: tq.config} nodes = append(nodes, node) - return node.scanValues(columns) - } - _spec.Assign = func(columns []string, values []interface{}) error { - if len(nodes) == 0 { - return fmt.Errorf("ent: Assign called without calling ScanValues") - } - node := nodes[len(nodes)-1] node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(tq.modifiers) > 0 { + _spec.Modifiers = tq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } if err := sqlgraph.QueryNodes(ctx, tq.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := tq.withTeamToBuild; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*Team) - for i := range nodes { - if nodes[i].team_team_to_build == nil { - continue - } - fk := *nodes[i].team_team_to_build - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + if err := tq.loadTeamToBuild(ctx, query, nodes, nil, + func(n *Team, e *Build) { n.Edges.TeamToBuild = e }); err != nil { + return nil, err } - query.Where(build.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + } + if query := tq.withTeamToStatus; query != nil { + if err := tq.loadTeamToStatus(ctx, query, nodes, nil, + func(n *Team, e *Status) { n.Edges.TeamToStatus = e }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "team_team_to_build" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.TeamToBuild = n - } + } + if query := tq.withTeamToProvisionedNetwork; query != nil { + if err := tq.loadTeamToProvisionedNetwork(ctx, query, nodes, + func(n *Team) { n.Edges.TeamToProvisionedNetwork = []*ProvisionedNetwork{} }, + func(n *Team, e *ProvisionedNetwork) { + n.Edges.TeamToProvisionedNetwork = append(n.Edges.TeamToProvisionedNetwork, e) + }); err != nil { + return nil, err } } - - if query := tq.withTeamToStatus; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*Team) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] + if query := tq.withTeamToPlan; query != nil { + if err := tq.loadTeamToPlan(ctx, query, nodes, nil, + func(n *Team, e *Plan) { n.Edges.TeamToPlan = e }); err != nil { + return nil, err } - query.withFKs = true - query.Where(predicate.Status(func(s *sql.Selector) { - s.Where(sql.InValues(team.TeamToStatusColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { + } + for name, query := range tq.withNamedTeamToProvisionedNetwork { + if err := tq.loadTeamToProvisionedNetwork(ctx, query, nodes, + func(n *Team) { n.appendNamedTeamToProvisionedNetwork(name) }, + func(n *Team, e *ProvisionedNetwork) { n.appendNamedTeamToProvisionedNetwork(name, e) }); err != nil { return nil, err } - for _, n := range neighbors { - fk := n.team_team_to_status - if fk == nil { - return nil, fmt.Errorf(`foreign-key "team_team_to_status" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "team_team_to_status" returned %v for node %v`, *fk, n.ID) - } - node.Edges.TeamToStatus = n + } + for i := range tq.loadTotal { + if err := tq.loadTotal[i](ctx, nodes); err != nil { + return nil, err } } + return nodes, nil +} - if query := tq.withTeamToProvisionedNetwork; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*Team) +func (tq *TeamQuery) loadTeamToBuild(ctx context.Context, query *BuildQuery, nodes []*Team, init func(*Team), assign func(*Team, *Build)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*Team) + for i := range nodes { + if nodes[i].team_team_to_build == nil { + continue + } + fk := *nodes[i].team_team_to_build + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(build.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "team_team_to_build" returned %v`, n.ID) + } for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.TeamToProvisionedNetwork = []*ProvisionedNetwork{} + assign(nodes[i], n) } - query.withFKs = true - query.Where(predicate.ProvisionedNetwork(func(s *sql.Selector) { - s.Where(sql.InValues(team.TeamToProvisionedNetworkColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + } + return nil +} +func (tq *TeamQuery) loadTeamToStatus(ctx context.Context, query *StatusQuery, nodes []*Team, init func(*Team), assign func(*Team, *Status)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Team) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + } + query.withFKs = true + query.Where(predicate.Status(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(team.TeamToStatusColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.team_team_to_status + if fk == nil { + return fmt.Errorf(`foreign-key "team_team_to_status" is nil for node %v`, n.ID) } - for _, n := range neighbors { - fk := n.provisioned_network_provisioned_network_to_team - if fk == nil { - return nil, fmt.Errorf(`foreign-key "provisioned_network_provisioned_network_to_team" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "provisioned_network_provisioned_network_to_team" returned %v for node %v`, *fk, n.ID) - } - node.Edges.TeamToProvisionedNetwork = append(node.Edges.TeamToProvisionedNetwork, n) + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "team_team_to_status" returned %v for node %v`, *fk, n.ID) } + assign(node, n) } - - if query := tq.withTeamToPlan; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*Team) - for i := range nodes { - if nodes[i].plan_plan_to_team == nil { - continue - } - fk := *nodes[i].plan_plan_to_team - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) + return nil +} +func (tq *TeamQuery) loadTeamToProvisionedNetwork(ctx context.Context, query *ProvisionedNetworkQuery, nodes []*Team, init func(*Team), assign func(*Team, *ProvisionedNetwork)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Team) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) } - query.Where(plan.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + } + query.withFKs = true + query.Where(predicate.ProvisionedNetwork(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(team.TeamToProvisionedNetworkColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.provisioned_network_provisioned_network_to_team + if fk == nil { + return fmt.Errorf(`foreign-key "provisioned_network_provisioned_network_to_team" is nil for node %v`, n.ID) } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "plan_plan_to_team" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.TeamToPlan = n - } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "provisioned_network_provisioned_network_to_team" returned %v for node %v`, *fk, n.ID) } + assign(node, n) } - - return nodes, nil + return nil +} +func (tq *TeamQuery) loadTeamToPlan(ctx context.Context, query *PlanQuery, nodes []*Team, init func(*Team), assign func(*Team, *Plan)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*Team) + for i := range nodes { + if nodes[i].plan_plan_to_team == nil { + continue + } + fk := *nodes[i].plan_plan_to_team + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(plan.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "plan_plan_to_team" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil } func (tq *TeamQuery) sqlCount(ctx context.Context) (int, error) { _spec := tq.querySpec() - _spec.Node.Columns = tq.fields - if len(tq.fields) > 0 { - _spec.Unique = tq.unique != nil && *tq.unique + if len(tq.modifiers) > 0 { + _spec.Modifiers = tq.modifiers } - return sqlgraph.CountNodes(ctx, tq.driver, _spec) -} - -func (tq *TeamQuery) sqlExist(ctx context.Context) (bool, error) { - n, err := tq.sqlCount(ctx) - if err != nil { - return false, fmt.Errorf("ent: check existence: %w", err) + _spec.Node.Columns = tq.ctx.Fields + if len(tq.ctx.Fields) > 0 { + _spec.Unique = tq.ctx.Unique != nil && *tq.ctx.Unique } - return n > 0, nil + return sqlgraph.CountNodes(ctx, tq.driver, _spec) } func (tq *TeamQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: team.Table, - Columns: team.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: team.FieldID, - }, - }, - From: tq.sql, - Unique: true, - } - if unique := tq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(team.Table, team.Columns, sqlgraph.NewFieldSpec(team.FieldID, field.TypeUUID)) + _spec.From = tq.sql + if unique := tq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if tq.path != nil { + _spec.Unique = true } - if fields := tq.fields; len(fields) > 0 { + if fields := tq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, team.FieldID) for i := range fields { @@ -662,10 +720,10 @@ func (tq *TeamQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := tq.limit; limit != nil { + if limit := tq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := tq.offset; offset != nil { + if offset := tq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := tq.order; len(ps) > 0 { @@ -681,7 +739,7 @@ func (tq *TeamQuery) querySpec() *sqlgraph.QuerySpec { func (tq *TeamQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(tq.driver.Dialect()) t1 := builder.Table(team.Table) - columns := tq.fields + columns := tq.ctx.Fields if len(columns) == 0 { columns = team.Columns } @@ -690,7 +748,7 @@ func (tq *TeamQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = tq.sql selector.Select(selector.Columns(columns...)...) } - if tq.unique != nil && *tq.unique { + if tq.ctx.Unique != nil && *tq.ctx.Unique { selector.Distinct() } for _, p := range tq.predicates { @@ -699,25 +757,35 @@ func (tq *TeamQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range tq.order { p(selector) } - if offset := tq.offset; offset != nil { + if offset := tq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := tq.limit; limit != nil { + if limit := tq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector } +// WithNamedTeamToProvisionedNetwork tells the query-builder to eager-load the nodes that are connected to the "TeamToProvisionedNetwork" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (tq *TeamQuery) WithNamedTeamToProvisionedNetwork(name string, opts ...func(*ProvisionedNetworkQuery)) *TeamQuery { + query := (&ProvisionedNetworkClient{config: tq.config}).Query() + for _, opt := range opts { + opt(query) + } + if tq.withNamedTeamToProvisionedNetwork == nil { + tq.withNamedTeamToProvisionedNetwork = make(map[string]*ProvisionedNetworkQuery) + } + tq.withNamedTeamToProvisionedNetwork[name] = query + return tq +} + // TeamGroupBy is the group-by builder for Team entities. type TeamGroupBy struct { - config - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) + selector + build *TeamQuery } // Aggregate adds the given aggregation functions to the group-by query. @@ -726,471 +794,77 @@ func (tgb *TeamGroupBy) Aggregate(fns ...AggregateFunc) *TeamGroupBy { return tgb } -// Scan applies the group-by query and scans the result into the given value. -func (tgb *TeamGroupBy) Scan(ctx context.Context, v interface{}) error { - query, err := tgb.path(ctx) - if err != nil { +// Scan applies the selector query and scans the result into the given value. +func (tgb *TeamGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, tgb.build.ctx, "GroupBy") + if err := tgb.build.prepareQuery(ctx); err != nil { return err } - tgb.sql = query - return tgb.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (tgb *TeamGroupBy) ScanX(ctx context.Context, v interface{}) { - if err := tgb.Scan(ctx, v); err != nil { - panic(err) - } + return scanWithInterceptors[*TeamQuery, *TeamGroupBy](ctx, tgb.build, tgb, tgb.build.inters, v) } -// Strings returns list of strings from group-by. -// It is only allowed when executing a group-by query with one field. -func (tgb *TeamGroupBy) Strings(ctx context.Context) ([]string, error) { - if len(tgb.fields) > 1 { - return nil, errors.New("ent: TeamGroupBy.Strings is not achievable when grouping more than 1 field") - } - var v []string - if err := tgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (tgb *TeamGroupBy) StringsX(ctx context.Context) []string { - v, err := tgb.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (tgb *TeamGroupBy) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = tgb.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{team.Label} - default: - err = fmt.Errorf("ent: TeamGroupBy.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (tgb *TeamGroupBy) StringX(ctx context.Context) string { - v, err := tgb.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from group-by. -// It is only allowed when executing a group-by query with one field. -func (tgb *TeamGroupBy) Ints(ctx context.Context) ([]int, error) { - if len(tgb.fields) > 1 { - return nil, errors.New("ent: TeamGroupBy.Ints is not achievable when grouping more than 1 field") - } - var v []int - if err := tgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (tgb *TeamGroupBy) IntsX(ctx context.Context) []int { - v, err := tgb.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (tgb *TeamGroupBy) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = tgb.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{team.Label} - default: - err = fmt.Errorf("ent: TeamGroupBy.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (tgb *TeamGroupBy) IntX(ctx context.Context) int { - v, err := tgb.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from group-by. -// It is only allowed when executing a group-by query with one field. -func (tgb *TeamGroupBy) Float64s(ctx context.Context) ([]float64, error) { - if len(tgb.fields) > 1 { - return nil, errors.New("ent: TeamGroupBy.Float64s is not achievable when grouping more than 1 field") - } - var v []float64 - if err := tgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (tgb *TeamGroupBy) Float64sX(ctx context.Context) []float64 { - v, err := tgb.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (tgb *TeamGroupBy) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = tgb.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{team.Label} - default: - err = fmt.Errorf("ent: TeamGroupBy.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (tgb *TeamGroupBy) Float64X(ctx context.Context) float64 { - v, err := tgb.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from group-by. -// It is only allowed when executing a group-by query with one field. -func (tgb *TeamGroupBy) Bools(ctx context.Context) ([]bool, error) { - if len(tgb.fields) > 1 { - return nil, errors.New("ent: TeamGroupBy.Bools is not achievable when grouping more than 1 field") - } - var v []bool - if err := tgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (tgb *TeamGroupBy) BoolsX(ctx context.Context) []bool { - v, err := tgb.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (tgb *TeamGroupBy) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = tgb.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{team.Label} - default: - err = fmt.Errorf("ent: TeamGroupBy.Bools returned %d results when one was expected", len(v)) - } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (tgb *TeamGroupBy) BoolX(ctx context.Context) bool { - v, err := tgb.Bool(ctx) - if err != nil { - panic(err) +func (tgb *TeamGroupBy) sqlScan(ctx context.Context, root *TeamQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(tgb.fns)) + for _, fn := range tgb.fns { + aggregation = append(aggregation, fn(selector)) } - return v -} - -func (tgb *TeamGroupBy) sqlScan(ctx context.Context, v interface{}) error { - for _, f := range tgb.fields { - if !team.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*tgb.flds)+len(tgb.fns)) + for _, f := range *tgb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := tgb.sqlQuery() + selector.GroupBy(selector.Columns(*tgb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := tgb.driver.Query(ctx, query, args, rows); err != nil { + if err := tgb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (tgb *TeamGroupBy) sqlQuery() *sql.Selector { - selector := tgb.sql.Select() - aggregation := make([]string, 0, len(tgb.fns)) - for _, fn := range tgb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(tgb.fields)+len(tgb.fns)) - for _, f := range tgb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(tgb.fields...)...) -} - // TeamSelect is the builder for selecting fields of Team entities. type TeamSelect struct { *TeamQuery - // intermediate query (i.e. traversal path). - sql *sql.Selector + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (ts *TeamSelect) Aggregate(fns ...AggregateFunc) *TeamSelect { + ts.fns = append(ts.fns, fns...) + return ts } // Scan applies the selector query and scans the result into the given value. -func (ts *TeamSelect) Scan(ctx context.Context, v interface{}) error { +func (ts *TeamSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ts.ctx, "Select") if err := ts.prepareQuery(ctx); err != nil { return err } - ts.sql = ts.TeamQuery.sqlQuery(ctx) - return ts.sqlScan(ctx, v) + return scanWithInterceptors[*TeamQuery, *TeamSelect](ctx, ts.TeamQuery, ts, ts.inters, v) } -// ScanX is like Scan, but panics if an error occurs. -func (ts *TeamSelect) ScanX(ctx context.Context, v interface{}) { - if err := ts.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from a selector. It is only allowed when selecting one field. -func (ts *TeamSelect) Strings(ctx context.Context) ([]string, error) { - if len(ts.fields) > 1 { - return nil, errors.New("ent: TeamSelect.Strings is not achievable when selecting more than 1 field") - } - var v []string - if err := ts.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (ts *TeamSelect) StringsX(ctx context.Context) []string { - v, err := ts.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a selector. It is only allowed when selecting one field. -func (ts *TeamSelect) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = ts.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{team.Label} - default: - err = fmt.Errorf("ent: TeamSelect.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (ts *TeamSelect) StringX(ctx context.Context) string { - v, err := ts.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from a selector. It is only allowed when selecting one field. -func (ts *TeamSelect) Ints(ctx context.Context) ([]int, error) { - if len(ts.fields) > 1 { - return nil, errors.New("ent: TeamSelect.Ints is not achievable when selecting more than 1 field") - } - var v []int - if err := ts.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (ts *TeamSelect) IntsX(ctx context.Context) []int { - v, err := ts.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a selector. It is only allowed when selecting one field. -func (ts *TeamSelect) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = ts.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{team.Label} - default: - err = fmt.Errorf("ent: TeamSelect.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (ts *TeamSelect) IntX(ctx context.Context) int { - v, err := ts.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. -func (ts *TeamSelect) Float64s(ctx context.Context) ([]float64, error) { - if len(ts.fields) > 1 { - return nil, errors.New("ent: TeamSelect.Float64s is not achievable when selecting more than 1 field") - } - var v []float64 - if err := ts.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (ts *TeamSelect) Float64sX(ctx context.Context) []float64 { - v, err := ts.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. -func (ts *TeamSelect) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = ts.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{team.Label} - default: - err = fmt.Errorf("ent: TeamSelect.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (ts *TeamSelect) Float64X(ctx context.Context) float64 { - v, err := ts.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from a selector. It is only allowed when selecting one field. -func (ts *TeamSelect) Bools(ctx context.Context) ([]bool, error) { - if len(ts.fields) > 1 { - return nil, errors.New("ent: TeamSelect.Bools is not achievable when selecting more than 1 field") - } - var v []bool - if err := ts.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (ts *TeamSelect) BoolsX(ctx context.Context) []bool { - v, err := ts.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a selector. It is only allowed when selecting one field. -func (ts *TeamSelect) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = ts.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{team.Label} - default: - err = fmt.Errorf("ent: TeamSelect.Bools returned %d results when one was expected", len(v)) +func (ts *TeamSelect) sqlScan(ctx context.Context, root *TeamQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(ts.fns)) + for _, fn := range ts.fns { + aggregation = append(aggregation, fn(selector)) } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (ts *TeamSelect) BoolX(ctx context.Context) bool { - v, err := ts.Bool(ctx) - if err != nil { - panic(err) + switch n := len(*ts.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) } - return v -} - -func (ts *TeamSelect) sqlScan(ctx context.Context, v interface{}) error { rows := &sql.Rows{} - query, args := ts.sql.Query() + query, args := selector.Query() if err := ts.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/ent/team_update.go b/ent/team_update.go index 33bbf1be..d57f3edf 100755 --- a/ent/team_update.go +++ b/ent/team_update.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -39,6 +39,14 @@ func (tu *TeamUpdate) SetTeamNumber(i int) *TeamUpdate { return tu } +// SetNillableTeamNumber sets the "team_number" field if the given value is not nil. +func (tu *TeamUpdate) SetNillableTeamNumber(i *int) *TeamUpdate { + if i != nil { + tu.SetTeamNumber(*i) + } + return tu +} + // AddTeamNumber adds i to the "team_number" field. func (tu *TeamUpdate) AddTeamNumber(i int) *TeamUpdate { tu.mutation.AddTeamNumber(i) @@ -161,40 +169,7 @@ func (tu *TeamUpdate) ClearTeamToPlan() *TeamUpdate { // Save executes the query and returns the number of nodes affected by the update operation. func (tu *TeamUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(tu.hooks) == 0 { - if err = tu.check(); err != nil { - return 0, err - } - affected, err = tu.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*TeamMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = tu.check(); err != nil { - return 0, err - } - tu.mutation = mutation - affected, err = tu.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(tu.hooks) - 1; i >= 0; i-- { - if tu.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = tu.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, tu.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, tu.sqlSave, tu.mutation, tu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -228,16 +203,10 @@ func (tu *TeamUpdate) check() error { } func (tu *TeamUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: team.Table, - Columns: team.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: team.FieldID, - }, - }, + if err := tu.check(); err != nil { + return n, err } + _spec := sqlgraph.NewUpdateSpec(team.Table, team.Columns, sqlgraph.NewFieldSpec(team.FieldID, field.TypeUUID)) if ps := tu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -246,25 +215,13 @@ func (tu *TeamUpdate) sqlSave(ctx context.Context) (n int, err error) { } } if value, ok := tu.mutation.TeamNumber(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: team.FieldTeamNumber, - }) + _spec.SetField(team.FieldTeamNumber, field.TypeInt, value) } if value, ok := tu.mutation.AddedTeamNumber(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: team.FieldTeamNumber, - }) + _spec.AddField(team.FieldTeamNumber, field.TypeInt, value) } if value, ok := tu.mutation.Vars(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: team.FieldVars, - }) + _spec.SetField(team.FieldVars, field.TypeJSON, value) } if tu.mutation.TeamToBuildCleared() { edge := &sqlgraph.EdgeSpec{ @@ -274,10 +231,7 @@ func (tu *TeamUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{team.TeamToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -290,10 +244,7 @@ func (tu *TeamUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{team.TeamToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -309,10 +260,7 @@ func (tu *TeamUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{team.TeamToStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -325,10 +273,7 @@ func (tu *TeamUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{team.TeamToStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -344,10 +289,7 @@ func (tu *TeamUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{team.TeamToProvisionedNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionednetwork.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -360,10 +302,7 @@ func (tu *TeamUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{team.TeamToProvisionedNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionednetwork.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -379,10 +318,7 @@ func (tu *TeamUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{team.TeamToProvisionedNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionednetwork.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -398,10 +334,7 @@ func (tu *TeamUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{team.TeamToPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -414,10 +347,7 @@ func (tu *TeamUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{team.TeamToPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -429,10 +359,11 @@ func (tu *TeamUpdate) sqlSave(ctx context.Context) (n int, err error) { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{team.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return 0, err } + tu.mutation.done = true return n, nil } @@ -451,6 +382,14 @@ func (tuo *TeamUpdateOne) SetTeamNumber(i int) *TeamUpdateOne { return tuo } +// SetNillableTeamNumber sets the "team_number" field if the given value is not nil. +func (tuo *TeamUpdateOne) SetNillableTeamNumber(i *int) *TeamUpdateOne { + if i != nil { + tuo.SetTeamNumber(*i) + } + return tuo +} + // AddTeamNumber adds i to the "team_number" field. func (tuo *TeamUpdateOne) AddTeamNumber(i int) *TeamUpdateOne { tuo.mutation.AddTeamNumber(i) @@ -571,6 +510,12 @@ func (tuo *TeamUpdateOne) ClearTeamToPlan() *TeamUpdateOne { return tuo } +// Where appends a list predicates to the TeamUpdate builder. +func (tuo *TeamUpdateOne) Where(ps ...predicate.Team) *TeamUpdateOne { + tuo.mutation.Where(ps...) + return tuo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (tuo *TeamUpdateOne) Select(field string, fields ...string) *TeamUpdateOne { @@ -580,40 +525,7 @@ func (tuo *TeamUpdateOne) Select(field string, fields ...string) *TeamUpdateOne // Save executes the query and returns the updated Team entity. func (tuo *TeamUpdateOne) Save(ctx context.Context) (*Team, error) { - var ( - err error - node *Team - ) - if len(tuo.hooks) == 0 { - if err = tuo.check(); err != nil { - return nil, err - } - node, err = tuo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*TeamMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = tuo.check(); err != nil { - return nil, err - } - tuo.mutation = mutation - node, err = tuo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(tuo.hooks) - 1; i >= 0; i-- { - if tuo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = tuo.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, tuo.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, tuo.sqlSave, tuo.mutation, tuo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -647,16 +559,10 @@ func (tuo *TeamUpdateOne) check() error { } func (tuo *TeamUpdateOne) sqlSave(ctx context.Context) (_node *Team, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: team.Table, - Columns: team.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: team.FieldID, - }, - }, + if err := tuo.check(); err != nil { + return _node, err } + _spec := sqlgraph.NewUpdateSpec(team.Table, team.Columns, sqlgraph.NewFieldSpec(team.FieldID, field.TypeUUID)) id, ok := tuo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Team.id" for update`)} @@ -682,25 +588,13 @@ func (tuo *TeamUpdateOne) sqlSave(ctx context.Context) (_node *Team, err error) } } if value, ok := tuo.mutation.TeamNumber(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: team.FieldTeamNumber, - }) + _spec.SetField(team.FieldTeamNumber, field.TypeInt, value) } if value, ok := tuo.mutation.AddedTeamNumber(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Value: value, - Column: team.FieldTeamNumber, - }) + _spec.AddField(team.FieldTeamNumber, field.TypeInt, value) } if value, ok := tuo.mutation.Vars(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeJSON, - Value: value, - Column: team.FieldVars, - }) + _spec.SetField(team.FieldVars, field.TypeJSON, value) } if tuo.mutation.TeamToBuildCleared() { edge := &sqlgraph.EdgeSpec{ @@ -710,10 +604,7 @@ func (tuo *TeamUpdateOne) sqlSave(ctx context.Context) (_node *Team, err error) Columns: []string{team.TeamToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -726,10 +617,7 @@ func (tuo *TeamUpdateOne) sqlSave(ctx context.Context) (_node *Team, err error) Columns: []string{team.TeamToBuildColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: build.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(build.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -745,10 +633,7 @@ func (tuo *TeamUpdateOne) sqlSave(ctx context.Context) (_node *Team, err error) Columns: []string{team.TeamToStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -761,10 +646,7 @@ func (tuo *TeamUpdateOne) sqlSave(ctx context.Context) (_node *Team, err error) Columns: []string{team.TeamToStatusColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: status.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(status.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -780,10 +662,7 @@ func (tuo *TeamUpdateOne) sqlSave(ctx context.Context) (_node *Team, err error) Columns: []string{team.TeamToProvisionedNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionednetwork.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -796,10 +675,7 @@ func (tuo *TeamUpdateOne) sqlSave(ctx context.Context) (_node *Team, err error) Columns: []string{team.TeamToProvisionedNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionednetwork.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -815,10 +691,7 @@ func (tuo *TeamUpdateOne) sqlSave(ctx context.Context) (_node *Team, err error) Columns: []string{team.TeamToProvisionedNetworkColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: provisionednetwork.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(provisionednetwork.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -834,10 +707,7 @@ func (tuo *TeamUpdateOne) sqlSave(ctx context.Context) (_node *Team, err error) Columns: []string{team.TeamToPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -850,10 +720,7 @@ func (tuo *TeamUpdateOne) sqlSave(ctx context.Context) (_node *Team, err error) Columns: []string{team.TeamToPlanColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: plan.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(plan.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -868,9 +735,10 @@ func (tuo *TeamUpdateOne) sqlSave(ctx context.Context) (_node *Team, err error) if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{team.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } + tuo.mutation.done = true return _node, nil } diff --git a/ent/template/ent.tmpl b/ent/template/ent.tmpl index 8f72ebef..ed0b239b 100644 --- a/ent/template/ent.tmpl +++ b/ent/template/ent.tmpl @@ -11,15 +11,11 @@ in the LICENSE file in the root directory of this source tree. {{ $pkg := base $.Config.Package }} {{ template "header" $ }} -{{ template "import" $ }} - -import ( - {{- range $import := $.SiblingImports }} - {{ $import.Alias }} "{{ $import.Path }}" - {{- end }} -) +{{ with extend $ "Imports" $.SiblingImports }} + {{ template "import" . }} +{{ end }} -// {{ $.Name }} is the model entity for the {{ $.Name }} schema. +{{ template "model/comment" $ }} {{- with $tmpls := matchTemplate "model/comment/additional/*" }} {{- range $tmpl := $tmpls }} {{- xtemplate $tmpl $ }} @@ -27,13 +23,15 @@ import ( {{- end }} type {{ $.Name }} struct { config {{ template "model/omittags" $ }} - // ID of the ent. - {{- if $.ID.Comment }} - {{- range $line := split $.ID.Comment "\n" }} - // {{ $line }} + {{- if $.HasOneFieldID }} + // ID of the ent. + {{- if $.ID.Comment }} + {{- range $line := split $.ID.Comment "\n" }} + // {{ $line }} + {{- end }} {{- end }} + ID {{ $.ID.Type }} {{ with $.Annotations.Fields.StructTag.id }}`{{ . }}`{{ else }}`{{ $.ID.StructTag }}`{{ end }} {{- end }} - ID {{ $.ID.Type }} {{ with $.Annotations.Fields.StructTag.id }}`{{ . }}`{{ else }}`{{ $.ID.StructTag }}`{{ end }} {{- range $f := $.Fields }} {{- $tag := $f.StructTag }}{{ with $tags := $.Annotations.Fields.StructTag }}{{ with index $tags $f.Name }}{{ $tag = . }}{{ end }}{{ end }} {{- template "model/fieldcomment" $f }} @@ -44,13 +42,14 @@ type {{ $.Name }} struct { // The values are being populated by the {{ $.Name }}Query when eager-loading is set. Edges {{ $.Name }}Edges {{ template "model/edgetags" $ }} + // vvvvvvvvvvvv CUSTOM vvvvvvvvvvvv // Edges put into the main struct to be loaded via hcl {{- range $e := . }} // {{ $e.StructField }} holds the value of the {{ $e.Name }} edge. HCL{{ $e.StructField }} {{ if not $e.Unique }}[]{{ end }}*{{ $e.Type.Name }} {{ with $e.StructTag }}`{{ . }}`{{ end }} {{- end }} - // - + // ^^^^^^^^^^^^ CUSTOM ^^^^^^^^^^^^^ + {{- end -}} {{- /* Additional fields to add by the storage driver. */}} {{- $tmpl := printf "dialect/%s/model/fields" $.Storage }} @@ -58,19 +57,21 @@ type {{ $.Name }} struct { {{- xtemplate $tmpl . }} {{- end }} {{- /* Additional fields to add by the user. */}} - {{ template "model/fields/additional" $ }} + {{- template "model/fields/additional" $ }} } {{- with $.Edges }} // {{ $.Name }}Edges holds the relations/edges for other nodes in the graph. type {{ $.Name }}Edges struct { {{- range $e := . }} - // {{ $e.StructField }} holds the value of the {{ $e.Name }} edge. + {{- template "model/edgecomment" $e }} {{ $e.StructField }} {{ if not $e.Unique }}[]{{ end }}*{{ $e.Type.Name }} {{ with $e.StructTag }}`{{ . }}`{{ end }} {{- end }} // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. loadedTypes [{{ len . }}]bool + {{- /* Additional fields to add by the user. */}} + {{- template "model/edges/fields/additional" $ }} } {{- range $i, $e := . }} @@ -80,8 +81,7 @@ type {{ $.Name }}Edges struct { if e.loadedTypes[{{ $i }}] { {{- if $e.Unique }} if e.{{ $e.StructField }} == nil { - // The edge {{ $e.Name }} was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: {{ $e.Type.Package }}.Label} } {{- end }} @@ -101,7 +101,7 @@ type {{ $.Name }}Edges struct { {{ $func := print "Query" $e.StructField }} // {{ $func }} queries the "{{ $e.Name }}" edge of the {{ $.Name }} entity. func ({{ $receiver }} *{{ $.Name }}) {{ $func }}() *{{ $e.Type.QueryName }} { - return (&{{ $.Name }}Client{config: {{ $receiver }}.config}).{{ $func }}({{ $receiver }}) + return New{{ $.ClientName }}({{ $receiver }}.config).{{ $func }}({{ $receiver }}) } {{ end }} @@ -109,17 +109,17 @@ type {{ $.Name }}Edges struct { // Note that you need to call {{ $.Name }}.Unwrap() before calling this method if this {{ $.Name }} // was returned from a transaction, and the transaction was committed or rolled back. func ({{ $receiver }} *{{ $.Name }}) Update() *{{ $.UpdateOneName }} { - return (&{{ $.Name }}Client{config: {{ $receiver }}.config}).UpdateOne({{ $receiver }}) + return New{{ $.ClientName }}({{ $receiver }}.config).UpdateOne({{ $receiver }}) } // Unwrap unwraps the {{ $.Name }} entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. func ({{ $receiver }} *{{ $.Name }}) Unwrap() *{{ $.Name }} { - tx, ok := {{ $receiver }}.config.driver.(*txDriver) + _tx, ok := {{ $receiver }}.config.driver.(*txDriver) if !ok { panic("{{ $pkg }}: {{ $.Name }} is not a transactional entity") } - {{ $receiver }}.config.driver = tx.drv + {{ $receiver }}.config.driver = _tx.drv return {{ $receiver }} } @@ -135,12 +135,6 @@ type {{ $slice }} []*{{ $.Name }} {{ $tmpl := printf "dialect/%s/decode/many" $.Storage }} {{ xtemplate $tmpl . }} {{ end }} - -func ({{ $receiver }} {{ $slice }}) config(cfg config) { - for _i := range {{ $receiver }} { - {{ $receiver }}[_i].config = cfg - } -} {{ end }} {{/* A template to generate a fmt.Stringer implementation. */}} @@ -151,15 +145,20 @@ func ({{ $receiver }} {{ $slice }}) config(cfg config) { func ({{ $receiver }} *{{ $.Name }}) String() string { var builder strings.Builder builder.WriteString("{{ $.Name }}(") - builder.WriteString(fmt.Sprintf("id=%v", {{ $receiver }}.ID)) + {{- if $.HasOneFieldID }} + builder.WriteString(fmt.Sprintf("id=%v{{ if $.Fields }}, {{ end }}", {{ $receiver }}.ID)) + {{- end }} {{- range $i, $f := $.Fields }} + {{- if ne $i 0 }} + builder.WriteString(", ") + {{- end }} {{- if $f.Sensitive }} - builder.WriteString(", {{ $f.Name }}={{ print "" }}") + builder.WriteString("{{ $f.Name }}={{ print "" }}") {{- else }} {{- $sf := printf "%s.%s" $receiver $f.StructField }} {{- if $f.Nillable }} if v := {{ $sf }}; v != nil { - builder.WriteString(", {{ $f.Name }}=") + builder.WriteString("{{ $f.Name }}=") {{- if and $f.IsTime (not $f.HasGoType) }} builder.WriteString(v.Format(time.ANSIC)) {{- else if and $f.IsString (not $f.HasGoType) }} @@ -169,7 +168,7 @@ func ({{ $receiver }} {{ $slice }}) config(cfg config) { {{- end }} } {{- else }} - builder.WriteString(", {{ $f.Name }}=") + builder.WriteString("{{ $f.Name }}=") {{- if and $f.IsTime (not $f.HasGoType) }} builder.WriteString({{ $sf }}.Format(time.ANSIC)) {{- else if and $f.IsString (not $f.HasGoType) }} @@ -185,6 +184,17 @@ func ({{ $receiver }} {{ $slice }}) config(cfg config) { } {{ end }} +{{/* A template for generating the Entity's comment. */}} +{{- define "model/comment" }} + {{- if and $.Annotations.Comment $.Annotations.Comment.Text }} + {{- range $line := split $.Annotations.Comment.Text "\n" }} + // {{ $line }} + {{- end }} + {{- else }} + // {{ $.Name }} is the model entity for the {{ $.Name }} schema. + {{- end }} +{{- end }} + {{/* A template for generating the tag of the Edges struct-field. */}} {{- define "model/edgetags" }} {{- $tag := `json:"edges"` }} @@ -206,15 +216,27 @@ func ({{ $receiver }} {{ $slice }}) config(cfg config) { {{/* A template for setting the field comment. */}} {{- define "model/fieldcomment" }} - // {{ $.StructField }} holds the value of the "{{ $.Name }}" field. {{- if $.Comment }} {{- range $line := split $.Comment "\n" }} // {{ $line }} {{- end }} + {{- else }} + // {{ $.StructField }} holds the value of the "{{ $.Name }}" field. {{- end }} {{- end }} -{{/* A template for adding additional methods or helpers for the generated model.*/}} +{{/* A template for setting the edge comment. */}} +{{- define "model/edgecomment" }} + {{- if $.Comment }} + {{- range $line := split $.Comment "\n" }} + // {{ $line }} + {{- end }} + {{- else }} + // {{ $.StructField }} holds the value of the {{ $.Name }} edge. + {{- end }} +{{- end }} + +{{/* A template for adding additional methods or helpers for the generated model. */}} {{ define "model/additional" }} {{- with $tmpls := matchTemplate "model/additional/*" }} {{- range $tmpl := $tmpls }} @@ -227,4 +249,19 @@ func ({{ $receiver }} {{ $slice }}) config(cfg config) { {{- xtemplate $tmpl $ }} {{- end }} {{- end }} +{{ end }} + +{{/* A template for adding additional fields for the Edges object of the generated model. */}} +{{ define "model/edges/fields/additional" }} + {{- with $tmpls := matchTemplate "model/edges/fields/additional/*" }} + {{- range $tmpl := $tmpls }} + {{- xtemplate $tmpl $ }} + {{- end }} + {{- end }} + + {{- with $tmpls := matchTemplate (printf "dialect/%s/model/edges/fields/additional/*" $.Storage) }} + {{- range $tmpl := $tmpls }} + {{- xtemplate $tmpl $ }} + {{- end }} + {{- end }} {{ end }} \ No newline at end of file diff --git a/ent/token.go b/ent/token.go index af351dfa..735ca7a5 100755 --- a/ent/token.go +++ b/ent/token.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -6,6 +6,7 @@ import ( "fmt" "strings" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/gen0cide/laforge/ent/authuser" "github.com/gen0cide/laforge/ent/token" @@ -25,11 +26,13 @@ type Token struct { // The values are being populated by the TokenQuery when eager-loading is set. Edges TokenEdges `json:"edges"` + // vvvvvvvvvvvv CUSTOM vvvvvvvvvvvv // Edges put into the main struct to be loaded via hcl // TokenToAuthUser holds the value of the TokenToAuthUser edge. HCLTokenToAuthUser *AuthUser `json:"TokenToAuthUser,omitempty"` - // + // ^^^^^^^^^^^^ CUSTOM ^^^^^^^^^^^^^ auth_user_auth_user_to_token *uuid.UUID + selectValues sql.SelectValues } // TokenEdges holds the relations/edges for other nodes in the graph. @@ -39,6 +42,8 @@ type TokenEdges struct { // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. loadedTypes [1]bool + // totalCount holds the count of the edges above. + totalCount [1]map[string]int } // TokenToAuthUserOrErr returns the TokenToAuthUser value or an error if the edge @@ -46,8 +51,7 @@ type TokenEdges struct { func (e TokenEdges) TokenToAuthUserOrErr() (*AuthUser, error) { if e.loadedTypes[0] { if e.TokenToAuthUser == nil { - // The edge TokenToAuthUser was loaded in eager-loading, - // but was not found. + // Edge was loaded but was not found. return nil, &NotFoundError{label: authuser.Label} } return e.TokenToAuthUser, nil @@ -56,8 +60,8 @@ func (e TokenEdges) TokenToAuthUserOrErr() (*AuthUser, error) { } // scanValues returns the types for scanning values from sql.Rows. -func (*Token) scanValues(columns []string) ([]interface{}, error) { - values := make([]interface{}, len(columns)) +func (*Token) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) for i := range columns { switch columns[i] { case token.FieldExpireAt: @@ -69,7 +73,7 @@ func (*Token) scanValues(columns []string) ([]interface{}, error) { case token.ForeignKeys[0]: // auth_user_auth_user_to_token values[i] = &sql.NullScanner{S: new(uuid.UUID)} default: - return nil, fmt.Errorf("unexpected column %q for type Token", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -77,7 +81,7 @@ func (*Token) scanValues(columns []string) ([]interface{}, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the Token fields. -func (t *Token) assignValues(columns []string, values []interface{}) error { +func (t *Token) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -108,31 +112,39 @@ func (t *Token) assignValues(columns []string, values []interface{}) error { t.auth_user_auth_user_to_token = new(uuid.UUID) *t.auth_user_auth_user_to_token = *value.S.(*uuid.UUID) } + default: + t.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the Token. +// This includes values selected through modifiers, order, etc. +func (t *Token) Value(name string) (ent.Value, error) { + return t.selectValues.Get(name) +} + // QueryTokenToAuthUser queries the "TokenToAuthUser" edge of the Token entity. func (t *Token) QueryTokenToAuthUser() *AuthUserQuery { - return (&TokenClient{config: t.config}).QueryTokenToAuthUser(t) + return NewTokenClient(t.config).QueryTokenToAuthUser(t) } // Update returns a builder for updating this Token. // Note that you need to call Token.Unwrap() before calling this method if this Token // was returned from a transaction, and the transaction was committed or rolled back. func (t *Token) Update() *TokenUpdateOne { - return (&TokenClient{config: t.config}).UpdateOne(t) + return NewTokenClient(t.config).UpdateOne(t) } // Unwrap unwraps the Token entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. func (t *Token) Unwrap() *Token { - tx, ok := t.config.driver.(*txDriver) + _tx, ok := t.config.driver.(*txDriver) if !ok { panic("ent: Token is not a transactional entity") } - t.config.driver = tx.drv + t.config.driver = _tx.drv return t } @@ -140,10 +152,11 @@ func (t *Token) Unwrap() *Token { func (t *Token) String() string { var builder strings.Builder builder.WriteString("Token(") - builder.WriteString(fmt.Sprintf("id=%v", t.ID)) - builder.WriteString(", token=") + builder.WriteString(fmt.Sprintf("id=%v, ", t.ID)) + builder.WriteString("token=") builder.WriteString(t.Token) - builder.WriteString(", expire_at=") + builder.WriteString(", ") + builder.WriteString("expire_at=") builder.WriteString(fmt.Sprintf("%v", t.ExpireAt)) builder.WriteByte(')') return builder.String() @@ -151,9 +164,3 @@ func (t *Token) String() string { // Tokens is a parsable slice of Token. type Tokens []*Token - -func (t Tokens) config(cfg config) { - for _i := range t { - t[_i].config = cfg - } -} diff --git a/ent/token/token.go b/ent/token/token.go index 7ddfbea5..af2fd97f 100755 --- a/ent/token/token.go +++ b/ent/token/token.go @@ -1,8 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package token import ( + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -60,3 +62,35 @@ var ( // DefaultID holds the default value on creation for the "id" field. DefaultID func() uuid.UUID ) + +// OrderOption defines the ordering options for the Token queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByToken orders the results by the token field. +func ByToken(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldToken, opts...).ToFunc() +} + +// ByExpireAt orders the results by the expire_at field. +func ByExpireAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldExpireAt, opts...).ToFunc() +} + +// ByTokenToAuthUserField orders the results by TokenToAuthUser field. +func ByTokenToAuthUserField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newTokenToAuthUserStep(), sql.OrderByField(field, opts...)) + } +} +func newTokenToAuthUserStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(TokenToAuthUserInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, TokenToAuthUserTable, TokenToAuthUserColumn), + ) +} diff --git a/ent/token/where.go b/ent/token/where.go index 1015f9a5..60ad30a6 100755 --- a/ent/token/where.go +++ b/ent/token/where.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package token @@ -11,286 +11,162 @@ import ( // ID filters vertices based on their ID field. func ID(id uuid.UUID) predicate.Token { - return predicate.Token(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Token(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id uuid.UUID) predicate.Token { - return predicate.Token(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Token(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id uuid.UUID) predicate.Token { - return predicate.Token(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.Token(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...uuid.UUID) predicate.Token { - return predicate.Token(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.Token(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...uuid.UUID) predicate.Token { - return predicate.Token(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.Token(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id uuid.UUID) predicate.Token { - return predicate.Token(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.Token(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id uuid.UUID) predicate.Token { - return predicate.Token(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.Token(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id uuid.UUID) predicate.Token { - return predicate.Token(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.Token(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id uuid.UUID) predicate.Token { - return predicate.Token(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.Token(sql.FieldLTE(FieldID, id)) } // Token applies equality check predicate on the "token" field. It's identical to TokenEQ. func Token(v string) predicate.Token { - return predicate.Token(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldToken), v)) - }) + return predicate.Token(sql.FieldEQ(FieldToken, v)) } // ExpireAt applies equality check predicate on the "expire_at" field. It's identical to ExpireAtEQ. func ExpireAt(v int64) predicate.Token { - return predicate.Token(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldExpireAt), v)) - }) + return predicate.Token(sql.FieldEQ(FieldExpireAt, v)) } // TokenEQ applies the EQ predicate on the "token" field. func TokenEQ(v string) predicate.Token { - return predicate.Token(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldToken), v)) - }) + return predicate.Token(sql.FieldEQ(FieldToken, v)) } // TokenNEQ applies the NEQ predicate on the "token" field. func TokenNEQ(v string) predicate.Token { - return predicate.Token(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldToken), v)) - }) + return predicate.Token(sql.FieldNEQ(FieldToken, v)) } // TokenIn applies the In predicate on the "token" field. func TokenIn(vs ...string) predicate.Token { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Token(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldToken), v...)) - }) + return predicate.Token(sql.FieldIn(FieldToken, vs...)) } // TokenNotIn applies the NotIn predicate on the "token" field. func TokenNotIn(vs ...string) predicate.Token { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Token(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldToken), v...)) - }) + return predicate.Token(sql.FieldNotIn(FieldToken, vs...)) } // TokenGT applies the GT predicate on the "token" field. func TokenGT(v string) predicate.Token { - return predicate.Token(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldToken), v)) - }) + return predicate.Token(sql.FieldGT(FieldToken, v)) } // TokenGTE applies the GTE predicate on the "token" field. func TokenGTE(v string) predicate.Token { - return predicate.Token(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldToken), v)) - }) + return predicate.Token(sql.FieldGTE(FieldToken, v)) } // TokenLT applies the LT predicate on the "token" field. func TokenLT(v string) predicate.Token { - return predicate.Token(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldToken), v)) - }) + return predicate.Token(sql.FieldLT(FieldToken, v)) } // TokenLTE applies the LTE predicate on the "token" field. func TokenLTE(v string) predicate.Token { - return predicate.Token(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldToken), v)) - }) + return predicate.Token(sql.FieldLTE(FieldToken, v)) } // TokenContains applies the Contains predicate on the "token" field. func TokenContains(v string) predicate.Token { - return predicate.Token(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldToken), v)) - }) + return predicate.Token(sql.FieldContains(FieldToken, v)) } // TokenHasPrefix applies the HasPrefix predicate on the "token" field. func TokenHasPrefix(v string) predicate.Token { - return predicate.Token(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldToken), v)) - }) + return predicate.Token(sql.FieldHasPrefix(FieldToken, v)) } // TokenHasSuffix applies the HasSuffix predicate on the "token" field. func TokenHasSuffix(v string) predicate.Token { - return predicate.Token(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldToken), v)) - }) + return predicate.Token(sql.FieldHasSuffix(FieldToken, v)) } // TokenEqualFold applies the EqualFold predicate on the "token" field. func TokenEqualFold(v string) predicate.Token { - return predicate.Token(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldToken), v)) - }) + return predicate.Token(sql.FieldEqualFold(FieldToken, v)) } // TokenContainsFold applies the ContainsFold predicate on the "token" field. func TokenContainsFold(v string) predicate.Token { - return predicate.Token(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldToken), v)) - }) + return predicate.Token(sql.FieldContainsFold(FieldToken, v)) } // ExpireAtEQ applies the EQ predicate on the "expire_at" field. func ExpireAtEQ(v int64) predicate.Token { - return predicate.Token(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldExpireAt), v)) - }) + return predicate.Token(sql.FieldEQ(FieldExpireAt, v)) } // ExpireAtNEQ applies the NEQ predicate on the "expire_at" field. func ExpireAtNEQ(v int64) predicate.Token { - return predicate.Token(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldExpireAt), v)) - }) + return predicate.Token(sql.FieldNEQ(FieldExpireAt, v)) } // ExpireAtIn applies the In predicate on the "expire_at" field. func ExpireAtIn(vs ...int64) predicate.Token { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Token(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldExpireAt), v...)) - }) + return predicate.Token(sql.FieldIn(FieldExpireAt, vs...)) } // ExpireAtNotIn applies the NotIn predicate on the "expire_at" field. func ExpireAtNotIn(vs ...int64) predicate.Token { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Token(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldExpireAt), v...)) - }) + return predicate.Token(sql.FieldNotIn(FieldExpireAt, vs...)) } // ExpireAtGT applies the GT predicate on the "expire_at" field. func ExpireAtGT(v int64) predicate.Token { - return predicate.Token(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldExpireAt), v)) - }) + return predicate.Token(sql.FieldGT(FieldExpireAt, v)) } // ExpireAtGTE applies the GTE predicate on the "expire_at" field. func ExpireAtGTE(v int64) predicate.Token { - return predicate.Token(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldExpireAt), v)) - }) + return predicate.Token(sql.FieldGTE(FieldExpireAt, v)) } // ExpireAtLT applies the LT predicate on the "expire_at" field. func ExpireAtLT(v int64) predicate.Token { - return predicate.Token(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldExpireAt), v)) - }) + return predicate.Token(sql.FieldLT(FieldExpireAt, v)) } // ExpireAtLTE applies the LTE predicate on the "expire_at" field. func ExpireAtLTE(v int64) predicate.Token { - return predicate.Token(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldExpireAt), v)) - }) + return predicate.Token(sql.FieldLTE(FieldExpireAt, v)) } // HasTokenToAuthUser applies the HasEdge predicate on the "TokenToAuthUser" edge. @@ -298,7 +174,6 @@ func HasTokenToAuthUser() predicate.Token { return predicate.Token(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(TokenToAuthUserTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, true, TokenToAuthUserTable, TokenToAuthUserColumn), ) sqlgraph.HasNeighbors(s, step) @@ -308,11 +183,7 @@ func HasTokenToAuthUser() predicate.Token { // HasTokenToAuthUserWith applies the HasEdge predicate on the "TokenToAuthUser" edge with a given conditions (other predicates). func HasTokenToAuthUserWith(preds ...predicate.AuthUser) predicate.Token { return predicate.Token(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(TokenToAuthUserInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, TokenToAuthUserTable, TokenToAuthUserColumn), - ) + step := newTokenToAuthUserStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -323,32 +194,15 @@ func HasTokenToAuthUserWith(preds ...predicate.AuthUser) predicate.Token { // And groups predicates with the AND operator between them. func And(predicates ...predicate.Token) predicate.Token { - return predicate.Token(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Token(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.Token) predicate.Token { - return predicate.Token(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Token(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.Token) predicate.Token { - return predicate.Token(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.Token(sql.NotPredicates(p)) } diff --git a/ent/token_create.go b/ent/token_create.go index 2af17155..7d4ace5c 100755 --- a/ent/token_create.go +++ b/ent/token_create.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -65,44 +65,8 @@ func (tc *TokenCreate) Mutation() *TokenMutation { // Save creates the Token in the database. func (tc *TokenCreate) Save(ctx context.Context) (*Token, error) { - var ( - err error - node *Token - ) tc.defaults() - if len(tc.hooks) == 0 { - if err = tc.check(); err != nil { - return nil, err - } - node, err = tc.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*TokenMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = tc.check(); err != nil { - return nil, err - } - tc.mutation = mutation - if node, err = tc.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(tc.hooks) - 1; i >= 0; i-- { - if tc.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = tc.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, tc.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, tc.sqlSave, tc.mutation, tc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -150,10 +114,13 @@ func (tc *TokenCreate) check() error { } func (tc *TokenCreate) sqlSave(ctx context.Context) (*Token, error) { + if err := tc.check(); err != nil { + return nil, err + } _node, _spec := tc.createSpec() if err := sqlgraph.CreateNode(ctx, tc.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } @@ -164,38 +131,26 @@ func (tc *TokenCreate) sqlSave(ctx context.Context) (*Token, error) { return nil, err } } + tc.mutation.id = &_node.ID + tc.mutation.done = true return _node, nil } func (tc *TokenCreate) createSpec() (*Token, *sqlgraph.CreateSpec) { var ( _node = &Token{config: tc.config} - _spec = &sqlgraph.CreateSpec{ - Table: token.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: token.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(token.Table, sqlgraph.NewFieldSpec(token.FieldID, field.TypeUUID)) ) if id, ok := tc.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } if value, ok := tc.mutation.Token(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: token.FieldToken, - }) + _spec.SetField(token.FieldToken, field.TypeString, value) _node.Token = value } if value, ok := tc.mutation.ExpireAt(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: token.FieldExpireAt, - }) + _spec.SetField(token.FieldExpireAt, field.TypeInt64, value) _node.ExpireAt = value } if nodes := tc.mutation.TokenToAuthUserIDs(); len(nodes) > 0 { @@ -206,10 +161,7 @@ func (tc *TokenCreate) createSpec() (*Token, *sqlgraph.CreateSpec) { Columns: []string{token.TokenToAuthUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: authuser.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(authuser.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -224,11 +176,15 @@ func (tc *TokenCreate) createSpec() (*Token, *sqlgraph.CreateSpec) { // TokenCreateBulk is the builder for creating many Token entities in bulk. type TokenCreateBulk struct { config + err error builders []*TokenCreate } // Save creates the Token entities in the database. func (tcb *TokenCreateBulk) Save(ctx context.Context) ([]*Token, error) { + if tcb.err != nil { + return nil, tcb.err + } specs := make([]*sqlgraph.CreateSpec, len(tcb.builders)) nodes := make([]*Token, len(tcb.builders)) mutators := make([]Mutator, len(tcb.builders)) @@ -245,8 +201,8 @@ func (tcb *TokenCreateBulk) Save(ctx context.Context) ([]*Token, error) { return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, tcb.builders[i+1].mutation) } else { @@ -254,7 +210,7 @@ func (tcb *TokenCreateBulk) Save(ctx context.Context) ([]*Token, error) { // Invoke the actual operation on the latest mutation in the chain. if err = sqlgraph.BatchCreate(ctx, tcb.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } } } diff --git a/ent/token_delete.go b/ent/token_delete.go index 7bb261a1..5502d2bd 100755 --- a/ent/token_delete.go +++ b/ent/token_delete.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (td *TokenDelete) Where(ps ...predicate.Token) *TokenDelete { // Exec executes the deletion query and returns how many vertices were deleted. func (td *TokenDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(td.hooks) == 0 { - affected, err = td.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*TokenMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - td.mutation = mutation - affected, err = td.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(td.hooks) - 1; i >= 0; i-- { - if td.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = td.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, td.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, td.sqlExec, td.mutation, td.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (td *TokenDelete) ExecX(ctx context.Context) int { } func (td *TokenDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: token.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: token.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(token.Table, sqlgraph.NewFieldSpec(token.FieldID, field.TypeUUID)) if ps := td.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -84,7 +48,12 @@ func (td *TokenDelete) sqlExec(ctx context.Context) (int, error) { } } } - return sqlgraph.DeleteNodes(ctx, td.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, td.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + td.mutation.done = true + return affected, err } // TokenDeleteOne is the builder for deleting a single Token entity. @@ -92,6 +61,12 @@ type TokenDeleteOne struct { td *TokenDelete } +// Where appends a list predicates to the TokenDelete builder. +func (tdo *TokenDeleteOne) Where(ps ...predicate.Token) *TokenDeleteOne { + tdo.td.mutation.Where(ps...) + return tdo +} + // Exec executes the deletion query. func (tdo *TokenDeleteOne) Exec(ctx context.Context) error { n, err := tdo.td.Exec(ctx) @@ -107,5 +82,7 @@ func (tdo *TokenDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (tdo *TokenDeleteOne) ExecX(ctx context.Context) { - tdo.td.ExecX(ctx) + if err := tdo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/ent/token_query.go b/ent/token_query.go index 8e433971..7cfec2cf 100755 --- a/ent/token_query.go +++ b/ent/token_query.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "errors" "fmt" "math" @@ -20,15 +19,14 @@ import ( // TokenQuery is the builder for querying Token entities. type TokenQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string - predicates []predicate.Token - // eager-loading edges. + ctx *QueryContext + order []token.OrderOption + inters []Interceptor + predicates []predicate.Token withTokenToAuthUser *AuthUserQuery withFKs bool + modifiers []func(*sql.Selector) + loadTotal []func(context.Context, []*Token) error // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -40,34 +38,34 @@ func (tq *TokenQuery) Where(ps ...predicate.Token) *TokenQuery { return tq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (tq *TokenQuery) Limit(limit int) *TokenQuery { - tq.limit = &limit + tq.ctx.Limit = &limit return tq } -// Offset adds an offset step to the query. +// Offset to start from. func (tq *TokenQuery) Offset(offset int) *TokenQuery { - tq.offset = &offset + tq.ctx.Offset = &offset return tq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (tq *TokenQuery) Unique(unique bool) *TokenQuery { - tq.unique = &unique + tq.ctx.Unique = &unique return tq } -// Order adds an order step to the query. -func (tq *TokenQuery) Order(o ...OrderFunc) *TokenQuery { +// Order specifies how the records should be ordered. +func (tq *TokenQuery) Order(o ...token.OrderOption) *TokenQuery { tq.order = append(tq.order, o...) return tq } // QueryTokenToAuthUser chains the current query on the "TokenToAuthUser" edge. func (tq *TokenQuery) QueryTokenToAuthUser() *AuthUserQuery { - query := &AuthUserQuery{config: tq.config} + query := (&AuthUserClient{config: tq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := tq.prepareQuery(ctx); err != nil { return nil, err @@ -90,7 +88,7 @@ func (tq *TokenQuery) QueryTokenToAuthUser() *AuthUserQuery { // First returns the first Token entity from the query. // Returns a *NotFoundError when no Token was found. func (tq *TokenQuery) First(ctx context.Context) (*Token, error) { - nodes, err := tq.Limit(1).All(ctx) + nodes, err := tq.Limit(1).All(setContextOp(ctx, tq.ctx, "First")) if err != nil { return nil, err } @@ -113,7 +111,7 @@ func (tq *TokenQuery) FirstX(ctx context.Context) *Token { // Returns a *NotFoundError when no Token ID was found. func (tq *TokenQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = tq.Limit(1).IDs(ctx); err != nil { + if ids, err = tq.Limit(1).IDs(setContextOp(ctx, tq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -136,7 +134,7 @@ func (tq *TokenQuery) FirstIDX(ctx context.Context) uuid.UUID { // Returns a *NotSingularError when more than one Token entity is found. // Returns a *NotFoundError when no Token entities are found. func (tq *TokenQuery) Only(ctx context.Context) (*Token, error) { - nodes, err := tq.Limit(2).All(ctx) + nodes, err := tq.Limit(2).All(setContextOp(ctx, tq.ctx, "Only")) if err != nil { return nil, err } @@ -164,7 +162,7 @@ func (tq *TokenQuery) OnlyX(ctx context.Context) *Token { // Returns a *NotFoundError when no entities are found. func (tq *TokenQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = tq.Limit(2).IDs(ctx); err != nil { + if ids, err = tq.Limit(2).IDs(setContextOp(ctx, tq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -189,10 +187,12 @@ func (tq *TokenQuery) OnlyIDX(ctx context.Context) uuid.UUID { // All executes the query and returns a list of Tokens. func (tq *TokenQuery) All(ctx context.Context) ([]*Token, error) { + ctx = setContextOp(ctx, tq.ctx, "All") if err := tq.prepareQuery(ctx); err != nil { return nil, err } - return tq.sqlAll(ctx) + qr := querierAll[[]*Token, *TokenQuery]() + return withInterceptors[[]*Token](ctx, tq, qr, tq.inters) } // AllX is like All, but panics if an error occurs. @@ -205,9 +205,12 @@ func (tq *TokenQuery) AllX(ctx context.Context) []*Token { } // IDs executes the query and returns a list of Token IDs. -func (tq *TokenQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { - var ids []uuid.UUID - if err := tq.Select(token.FieldID).Scan(ctx, &ids); err != nil { +func (tq *TokenQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if tq.ctx.Unique == nil && tq.path != nil { + tq.Unique(true) + } + ctx = setContextOp(ctx, tq.ctx, "IDs") + if err = tq.Select(token.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -224,10 +227,11 @@ func (tq *TokenQuery) IDsX(ctx context.Context) []uuid.UUID { // Count returns the count of the given query. func (tq *TokenQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, tq.ctx, "Count") if err := tq.prepareQuery(ctx); err != nil { return 0, err } - return tq.sqlCount(ctx) + return withInterceptors[int](ctx, tq, querierCount[*TokenQuery](), tq.inters) } // CountX is like Count, but panics if an error occurs. @@ -241,10 +245,15 @@ func (tq *TokenQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (tq *TokenQuery) Exist(ctx context.Context) (bool, error) { - if err := tq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, tq.ctx, "Exist") + switch _, err := tq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return tq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -264,22 +273,21 @@ func (tq *TokenQuery) Clone() *TokenQuery { } return &TokenQuery{ config: tq.config, - limit: tq.limit, - offset: tq.offset, - order: append([]OrderFunc{}, tq.order...), + ctx: tq.ctx.Clone(), + order: append([]token.OrderOption{}, tq.order...), + inters: append([]Interceptor{}, tq.inters...), predicates: append([]predicate.Token{}, tq.predicates...), withTokenToAuthUser: tq.withTokenToAuthUser.Clone(), // clone intermediate query. - sql: tq.sql.Clone(), - path: tq.path, - unique: tq.unique, + sql: tq.sql.Clone(), + path: tq.path, } } // WithTokenToAuthUser tells the query-builder to eager-load the nodes that are connected to // the "TokenToAuthUser" edge. The optional arguments are used to configure the query builder of the edge. func (tq *TokenQuery) WithTokenToAuthUser(opts ...func(*AuthUserQuery)) *TokenQuery { - query := &AuthUserQuery{config: tq.config} + query := (&AuthUserClient{config: tq.config}).Query() for _, opt := range opts { opt(query) } @@ -301,17 +309,13 @@ func (tq *TokenQuery) WithTokenToAuthUser(opts ...func(*AuthUserQuery)) *TokenQu // GroupBy(token.FieldToken). // Aggregate(ent.Count()). // Scan(ctx, &v) -// func (tq *TokenQuery) GroupBy(field string, fields ...string) *TokenGroupBy { - group := &TokenGroupBy{config: tq.config} - group.fields = append([]string{field}, fields...) - group.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := tq.prepareQuery(ctx); err != nil { - return nil, err - } - return tq.sqlQuery(ctx), nil - } - return group + tq.ctx.Fields = append([]string{field}, fields...) + grbuild := &TokenGroupBy{build: tq} + grbuild.flds = &tq.ctx.Fields + grbuild.label = token.Label + grbuild.scan = grbuild.Scan + return grbuild } // Select allows the selection one or more fields/columns for the given query, @@ -326,14 +330,31 @@ func (tq *TokenQuery) GroupBy(field string, fields ...string) *TokenGroupBy { // client.Token.Query(). // Select(token.FieldToken). // Scan(ctx, &v) -// func (tq *TokenQuery) Select(fields ...string) *TokenSelect { - tq.fields = append(tq.fields, fields...) - return &TokenSelect{TokenQuery: tq} + tq.ctx.Fields = append(tq.ctx.Fields, fields...) + sbuild := &TokenSelect{TokenQuery: tq} + sbuild.label = token.Label + sbuild.flds, sbuild.scan = &tq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a TokenSelect configured with the given aggregations. +func (tq *TokenQuery) Aggregate(fns ...AggregateFunc) *TokenSelect { + return tq.Select().Aggregate(fns...) } func (tq *TokenQuery) prepareQuery(ctx context.Context) error { - for _, f := range tq.fields { + for _, inter := range tq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, tq); err != nil { + return err + } + } + } + for _, f := range tq.ctx.Fields { if !token.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -348,7 +369,7 @@ func (tq *TokenQuery) prepareQuery(ctx context.Context) error { return nil } -func (tq *TokenQuery) sqlAll(ctx context.Context) ([]*Token, error) { +func (tq *TokenQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Token, error) { var ( nodes = []*Token{} withFKs = tq.withFKs @@ -363,92 +384,95 @@ func (tq *TokenQuery) sqlAll(ctx context.Context) ([]*Token, error) { if withFKs { _spec.Node.Columns = append(_spec.Node.Columns, token.ForeignKeys...) } - _spec.ScanValues = func(columns []string) ([]interface{}, error) { + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Token).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { node := &Token{config: tq.config} nodes = append(nodes, node) - return node.scanValues(columns) - } - _spec.Assign = func(columns []string, values []interface{}) error { - if len(nodes) == 0 { - return fmt.Errorf("ent: Assign called without calling ScanValues") - } - node := nodes[len(nodes)-1] node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(tq.modifiers) > 0 { + _spec.Modifiers = tq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } if err := sqlgraph.QueryNodes(ctx, tq.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := tq.withTokenToAuthUser; query != nil { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*Token) - for i := range nodes { - if nodes[i].auth_user_auth_user_to_token == nil { - continue - } - fk := *nodes[i].auth_user_auth_user_to_token - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) - } - query.Where(authuser.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { + if err := tq.loadTokenToAuthUser(ctx, query, nodes, nil, + func(n *Token, e *AuthUser) { n.Edges.TokenToAuthUser = e }); err != nil { return nil, err } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "auth_user_auth_user_to_token" returned %v`, n.ID) - } - for i := range nodes { - nodes[i].Edges.TokenToAuthUser = n - } + } + for i := range tq.loadTotal { + if err := tq.loadTotal[i](ctx, nodes); err != nil { + return nil, err } } - return nodes, nil } -func (tq *TokenQuery) sqlCount(ctx context.Context) (int, error) { - _spec := tq.querySpec() - _spec.Node.Columns = tq.fields - if len(tq.fields) > 0 { - _spec.Unique = tq.unique != nil && *tq.unique +func (tq *TokenQuery) loadTokenToAuthUser(ctx context.Context, query *AuthUserQuery, nodes []*Token, init func(*Token), assign func(*Token, *AuthUser)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*Token) + for i := range nodes { + if nodes[i].auth_user_auth_user_to_token == nil { + continue + } + fk := *nodes[i].auth_user_auth_user_to_token + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) } - return sqlgraph.CountNodes(ctx, tq.driver, _spec) + if len(ids) == 0 { + return nil + } + query.Where(authuser.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "auth_user_auth_user_to_token" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil } -func (tq *TokenQuery) sqlExist(ctx context.Context) (bool, error) { - n, err := tq.sqlCount(ctx) - if err != nil { - return false, fmt.Errorf("ent: check existence: %w", err) +func (tq *TokenQuery) sqlCount(ctx context.Context) (int, error) { + _spec := tq.querySpec() + if len(tq.modifiers) > 0 { + _spec.Modifiers = tq.modifiers } - return n > 0, nil + _spec.Node.Columns = tq.ctx.Fields + if len(tq.ctx.Fields) > 0 { + _spec.Unique = tq.ctx.Unique != nil && *tq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, tq.driver, _spec) } func (tq *TokenQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: token.Table, - Columns: token.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: token.FieldID, - }, - }, - From: tq.sql, - Unique: true, - } - if unique := tq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(token.Table, token.Columns, sqlgraph.NewFieldSpec(token.FieldID, field.TypeUUID)) + _spec.From = tq.sql + if unique := tq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if tq.path != nil { + _spec.Unique = true } - if fields := tq.fields; len(fields) > 0 { + if fields := tq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, token.FieldID) for i := range fields { @@ -464,10 +488,10 @@ func (tq *TokenQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := tq.limit; limit != nil { + if limit := tq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := tq.offset; offset != nil { + if offset := tq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := tq.order; len(ps) > 0 { @@ -483,7 +507,7 @@ func (tq *TokenQuery) querySpec() *sqlgraph.QuerySpec { func (tq *TokenQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(tq.driver.Dialect()) t1 := builder.Table(token.Table) - columns := tq.fields + columns := tq.ctx.Fields if len(columns) == 0 { columns = token.Columns } @@ -492,7 +516,7 @@ func (tq *TokenQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = tq.sql selector.Select(selector.Columns(columns...)...) } - if tq.unique != nil && *tq.unique { + if tq.ctx.Unique != nil && *tq.ctx.Unique { selector.Distinct() } for _, p := range tq.predicates { @@ -501,12 +525,12 @@ func (tq *TokenQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range tq.order { p(selector) } - if offset := tq.offset; offset != nil { + if offset := tq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := tq.limit; limit != nil { + if limit := tq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -514,12 +538,8 @@ func (tq *TokenQuery) sqlQuery(ctx context.Context) *sql.Selector { // TokenGroupBy is the group-by builder for Token entities. type TokenGroupBy struct { - config - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) + selector + build *TokenQuery } // Aggregate adds the given aggregation functions to the group-by query. @@ -528,471 +548,77 @@ func (tgb *TokenGroupBy) Aggregate(fns ...AggregateFunc) *TokenGroupBy { return tgb } -// Scan applies the group-by query and scans the result into the given value. -func (tgb *TokenGroupBy) Scan(ctx context.Context, v interface{}) error { - query, err := tgb.path(ctx) - if err != nil { +// Scan applies the selector query and scans the result into the given value. +func (tgb *TokenGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, tgb.build.ctx, "GroupBy") + if err := tgb.build.prepareQuery(ctx); err != nil { return err } - tgb.sql = query - return tgb.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (tgb *TokenGroupBy) ScanX(ctx context.Context, v interface{}) { - if err := tgb.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from group-by. -// It is only allowed when executing a group-by query with one field. -func (tgb *TokenGroupBy) Strings(ctx context.Context) ([]string, error) { - if len(tgb.fields) > 1 { - return nil, errors.New("ent: TokenGroupBy.Strings is not achievable when grouping more than 1 field") - } - var v []string - if err := tgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (tgb *TokenGroupBy) StringsX(ctx context.Context) []string { - v, err := tgb.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (tgb *TokenGroupBy) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = tgb.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{token.Label} - default: - err = fmt.Errorf("ent: TokenGroupBy.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (tgb *TokenGroupBy) StringX(ctx context.Context) string { - v, err := tgb.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from group-by. -// It is only allowed when executing a group-by query with one field. -func (tgb *TokenGroupBy) Ints(ctx context.Context) ([]int, error) { - if len(tgb.fields) > 1 { - return nil, errors.New("ent: TokenGroupBy.Ints is not achievable when grouping more than 1 field") - } - var v []int - if err := tgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (tgb *TokenGroupBy) IntsX(ctx context.Context) []int { - v, err := tgb.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (tgb *TokenGroupBy) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = tgb.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{token.Label} - default: - err = fmt.Errorf("ent: TokenGroupBy.Ints returned %d results when one was expected", len(v)) - } - return + return scanWithInterceptors[*TokenQuery, *TokenGroupBy](ctx, tgb.build, tgb, tgb.build.inters, v) } -// IntX is like Int, but panics if an error occurs. -func (tgb *TokenGroupBy) IntX(ctx context.Context) int { - v, err := tgb.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from group-by. -// It is only allowed when executing a group-by query with one field. -func (tgb *TokenGroupBy) Float64s(ctx context.Context) ([]float64, error) { - if len(tgb.fields) > 1 { - return nil, errors.New("ent: TokenGroupBy.Float64s is not achievable when grouping more than 1 field") - } - var v []float64 - if err := tgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (tgb *TokenGroupBy) Float64sX(ctx context.Context) []float64 { - v, err := tgb.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (tgb *TokenGroupBy) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = tgb.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{token.Label} - default: - err = fmt.Errorf("ent: TokenGroupBy.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (tgb *TokenGroupBy) Float64X(ctx context.Context) float64 { - v, err := tgb.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from group-by. -// It is only allowed when executing a group-by query with one field. -func (tgb *TokenGroupBy) Bools(ctx context.Context) ([]bool, error) { - if len(tgb.fields) > 1 { - return nil, errors.New("ent: TokenGroupBy.Bools is not achievable when grouping more than 1 field") - } - var v []bool - if err := tgb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (tgb *TokenGroupBy) BoolsX(ctx context.Context) []bool { - v, err := tgb.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (tgb *TokenGroupBy) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = tgb.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{token.Label} - default: - err = fmt.Errorf("ent: TokenGroupBy.Bools returned %d results when one was expected", len(v)) - } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (tgb *TokenGroupBy) BoolX(ctx context.Context) bool { - v, err := tgb.Bool(ctx) - if err != nil { - panic(err) +func (tgb *TokenGroupBy) sqlScan(ctx context.Context, root *TokenQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(tgb.fns)) + for _, fn := range tgb.fns { + aggregation = append(aggregation, fn(selector)) } - return v -} - -func (tgb *TokenGroupBy) sqlScan(ctx context.Context, v interface{}) error { - for _, f := range tgb.fields { - if !token.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*tgb.flds)+len(tgb.fns)) + for _, f := range *tgb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := tgb.sqlQuery() + selector.GroupBy(selector.Columns(*tgb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := tgb.driver.Query(ctx, query, args, rows); err != nil { + if err := tgb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (tgb *TokenGroupBy) sqlQuery() *sql.Selector { - selector := tgb.sql.Select() - aggregation := make([]string, 0, len(tgb.fns)) - for _, fn := range tgb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(tgb.fields)+len(tgb.fns)) - for _, f := range tgb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(tgb.fields...)...) -} - // TokenSelect is the builder for selecting fields of Token entities. type TokenSelect struct { *TokenQuery - // intermediate query (i.e. traversal path). - sql *sql.Selector + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (ts *TokenSelect) Aggregate(fns ...AggregateFunc) *TokenSelect { + ts.fns = append(ts.fns, fns...) + return ts } // Scan applies the selector query and scans the result into the given value. -func (ts *TokenSelect) Scan(ctx context.Context, v interface{}) error { +func (ts *TokenSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ts.ctx, "Select") if err := ts.prepareQuery(ctx); err != nil { return err } - ts.sql = ts.TokenQuery.sqlQuery(ctx) - return ts.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (ts *TokenSelect) ScanX(ctx context.Context, v interface{}) { - if err := ts.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from a selector. It is only allowed when selecting one field. -func (ts *TokenSelect) Strings(ctx context.Context) ([]string, error) { - if len(ts.fields) > 1 { - return nil, errors.New("ent: TokenSelect.Strings is not achievable when selecting more than 1 field") - } - var v []string - if err := ts.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (ts *TokenSelect) StringsX(ctx context.Context) []string { - v, err := ts.Strings(ctx) - if err != nil { - panic(err) - } - return v + return scanWithInterceptors[*TokenQuery, *TokenSelect](ctx, ts.TokenQuery, ts, ts.inters, v) } -// String returns a single string from a selector. It is only allowed when selecting one field. -func (ts *TokenSelect) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = ts.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{token.Label} - default: - err = fmt.Errorf("ent: TokenSelect.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (ts *TokenSelect) StringX(ctx context.Context) string { - v, err := ts.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from a selector. It is only allowed when selecting one field. -func (ts *TokenSelect) Ints(ctx context.Context) ([]int, error) { - if len(ts.fields) > 1 { - return nil, errors.New("ent: TokenSelect.Ints is not achievable when selecting more than 1 field") - } - var v []int - if err := ts.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (ts *TokenSelect) IntsX(ctx context.Context) []int { - v, err := ts.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a selector. It is only allowed when selecting one field. -func (ts *TokenSelect) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = ts.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{token.Label} - default: - err = fmt.Errorf("ent: TokenSelect.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (ts *TokenSelect) IntX(ctx context.Context) int { - v, err := ts.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. -func (ts *TokenSelect) Float64s(ctx context.Context) ([]float64, error) { - if len(ts.fields) > 1 { - return nil, errors.New("ent: TokenSelect.Float64s is not achievable when selecting more than 1 field") - } - var v []float64 - if err := ts.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (ts *TokenSelect) Float64sX(ctx context.Context) []float64 { - v, err := ts.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. -func (ts *TokenSelect) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = ts.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{token.Label} - default: - err = fmt.Errorf("ent: TokenSelect.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (ts *TokenSelect) Float64X(ctx context.Context) float64 { - v, err := ts.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from a selector. It is only allowed when selecting one field. -func (ts *TokenSelect) Bools(ctx context.Context) ([]bool, error) { - if len(ts.fields) > 1 { - return nil, errors.New("ent: TokenSelect.Bools is not achievable when selecting more than 1 field") - } - var v []bool - if err := ts.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (ts *TokenSelect) BoolsX(ctx context.Context) []bool { - v, err := ts.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a selector. It is only allowed when selecting one field. -func (ts *TokenSelect) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = ts.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{token.Label} - default: - err = fmt.Errorf("ent: TokenSelect.Bools returned %d results when one was expected", len(v)) +func (ts *TokenSelect) sqlScan(ctx context.Context, root *TokenQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(ts.fns)) + for _, fn := range ts.fns { + aggregation = append(aggregation, fn(selector)) } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (ts *TokenSelect) BoolX(ctx context.Context) bool { - v, err := ts.Bool(ctx) - if err != nil { - panic(err) + switch n := len(*ts.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) } - return v -} - -func (ts *TokenSelect) sqlScan(ctx context.Context, v interface{}) error { rows := &sql.Rows{} - query, args := ts.sql.Query() + query, args := selector.Query() if err := ts.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/ent/token_update.go b/ent/token_update.go index 1e283e53..464431cd 100755 --- a/ent/token_update.go +++ b/ent/token_update.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -35,6 +35,14 @@ func (tu *TokenUpdate) SetToken(s string) *TokenUpdate { return tu } +// SetNillableToken sets the "token" field if the given value is not nil. +func (tu *TokenUpdate) SetNillableToken(s *string) *TokenUpdate { + if s != nil { + tu.SetToken(*s) + } + return tu +} + // SetExpireAt sets the "expire_at" field. func (tu *TokenUpdate) SetExpireAt(i int64) *TokenUpdate { tu.mutation.ResetExpireAt() @@ -42,6 +50,14 @@ func (tu *TokenUpdate) SetExpireAt(i int64) *TokenUpdate { return tu } +// SetNillableExpireAt sets the "expire_at" field if the given value is not nil. +func (tu *TokenUpdate) SetNillableExpireAt(i *int64) *TokenUpdate { + if i != nil { + tu.SetExpireAt(*i) + } + return tu +} + // AddExpireAt adds i to the "expire_at" field. func (tu *TokenUpdate) AddExpireAt(i int64) *TokenUpdate { tu.mutation.AddExpireAt(i) @@ -72,40 +88,7 @@ func (tu *TokenUpdate) ClearTokenToAuthUser() *TokenUpdate { // Save executes the query and returns the number of nodes affected by the update operation. func (tu *TokenUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(tu.hooks) == 0 { - if err = tu.check(); err != nil { - return 0, err - } - affected, err = tu.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*TokenMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = tu.check(); err != nil { - return 0, err - } - tu.mutation = mutation - affected, err = tu.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(tu.hooks) - 1; i >= 0; i-- { - if tu.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = tu.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, tu.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, tu.sqlSave, tu.mutation, tu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -139,16 +122,10 @@ func (tu *TokenUpdate) check() error { } func (tu *TokenUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: token.Table, - Columns: token.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: token.FieldID, - }, - }, + if err := tu.check(); err != nil { + return n, err } + _spec := sqlgraph.NewUpdateSpec(token.Table, token.Columns, sqlgraph.NewFieldSpec(token.FieldID, field.TypeUUID)) if ps := tu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -157,25 +134,13 @@ func (tu *TokenUpdate) sqlSave(ctx context.Context) (n int, err error) { } } if value, ok := tu.mutation.Token(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: token.FieldToken, - }) + _spec.SetField(token.FieldToken, field.TypeString, value) } if value, ok := tu.mutation.ExpireAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: token.FieldExpireAt, - }) + _spec.SetField(token.FieldExpireAt, field.TypeInt64, value) } if value, ok := tu.mutation.AddedExpireAt(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: token.FieldExpireAt, - }) + _spec.AddField(token.FieldExpireAt, field.TypeInt64, value) } if tu.mutation.TokenToAuthUserCleared() { edge := &sqlgraph.EdgeSpec{ @@ -185,10 +150,7 @@ func (tu *TokenUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{token.TokenToAuthUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: authuser.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(authuser.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -201,10 +163,7 @@ func (tu *TokenUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{token.TokenToAuthUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: authuser.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(authuser.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -216,10 +175,11 @@ func (tu *TokenUpdate) sqlSave(ctx context.Context) (n int, err error) { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{token.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return 0, err } + tu.mutation.done = true return n, nil } @@ -237,6 +197,14 @@ func (tuo *TokenUpdateOne) SetToken(s string) *TokenUpdateOne { return tuo } +// SetNillableToken sets the "token" field if the given value is not nil. +func (tuo *TokenUpdateOne) SetNillableToken(s *string) *TokenUpdateOne { + if s != nil { + tuo.SetToken(*s) + } + return tuo +} + // SetExpireAt sets the "expire_at" field. func (tuo *TokenUpdateOne) SetExpireAt(i int64) *TokenUpdateOne { tuo.mutation.ResetExpireAt() @@ -244,6 +212,14 @@ func (tuo *TokenUpdateOne) SetExpireAt(i int64) *TokenUpdateOne { return tuo } +// SetNillableExpireAt sets the "expire_at" field if the given value is not nil. +func (tuo *TokenUpdateOne) SetNillableExpireAt(i *int64) *TokenUpdateOne { + if i != nil { + tuo.SetExpireAt(*i) + } + return tuo +} + // AddExpireAt adds i to the "expire_at" field. func (tuo *TokenUpdateOne) AddExpireAt(i int64) *TokenUpdateOne { tuo.mutation.AddExpireAt(i) @@ -272,6 +248,12 @@ func (tuo *TokenUpdateOne) ClearTokenToAuthUser() *TokenUpdateOne { return tuo } +// Where appends a list predicates to the TokenUpdate builder. +func (tuo *TokenUpdateOne) Where(ps ...predicate.Token) *TokenUpdateOne { + tuo.mutation.Where(ps...) + return tuo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (tuo *TokenUpdateOne) Select(field string, fields ...string) *TokenUpdateOne { @@ -281,40 +263,7 @@ func (tuo *TokenUpdateOne) Select(field string, fields ...string) *TokenUpdateOn // Save executes the query and returns the updated Token entity. func (tuo *TokenUpdateOne) Save(ctx context.Context) (*Token, error) { - var ( - err error - node *Token - ) - if len(tuo.hooks) == 0 { - if err = tuo.check(); err != nil { - return nil, err - } - node, err = tuo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*TokenMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = tuo.check(); err != nil { - return nil, err - } - tuo.mutation = mutation - node, err = tuo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(tuo.hooks) - 1; i >= 0; i-- { - if tuo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = tuo.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, tuo.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, tuo.sqlSave, tuo.mutation, tuo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -348,16 +297,10 @@ func (tuo *TokenUpdateOne) check() error { } func (tuo *TokenUpdateOne) sqlSave(ctx context.Context) (_node *Token, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: token.Table, - Columns: token.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: token.FieldID, - }, - }, + if err := tuo.check(); err != nil { + return _node, err } + _spec := sqlgraph.NewUpdateSpec(token.Table, token.Columns, sqlgraph.NewFieldSpec(token.FieldID, field.TypeUUID)) id, ok := tuo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Token.id" for update`)} @@ -383,25 +326,13 @@ func (tuo *TokenUpdateOne) sqlSave(ctx context.Context) (_node *Token, err error } } if value, ok := tuo.mutation.Token(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: token.FieldToken, - }) + _spec.SetField(token.FieldToken, field.TypeString, value) } if value, ok := tuo.mutation.ExpireAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: token.FieldExpireAt, - }) + _spec.SetField(token.FieldExpireAt, field.TypeInt64, value) } if value, ok := tuo.mutation.AddedExpireAt(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: token.FieldExpireAt, - }) + _spec.AddField(token.FieldExpireAt, field.TypeInt64, value) } if tuo.mutation.TokenToAuthUserCleared() { edge := &sqlgraph.EdgeSpec{ @@ -411,10 +342,7 @@ func (tuo *TokenUpdateOne) sqlSave(ctx context.Context) (_node *Token, err error Columns: []string{token.TokenToAuthUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: authuser.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(authuser.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -427,10 +355,7 @@ func (tuo *TokenUpdateOne) sqlSave(ctx context.Context) (_node *Token, err error Columns: []string{token.TokenToAuthUserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: authuser.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(authuser.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -445,9 +370,10 @@ func (tuo *TokenUpdateOne) sqlSave(ctx context.Context) (_node *Token, err error if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{token.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } + tuo.mutation.done = true return _node, nil } diff --git a/ent/tx.go b/ent/tx.go index 1b5041eb..ba743ead 100755 --- a/ent/tx.go +++ b/ent/tx.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -90,12 +90,6 @@ type Tx struct { // lazily loaded. client *Client clientOnce sync.Once - - // completion callbacks. - mu sync.Mutex - onCommit []CommitHook - onRollback []RollbackHook - // ctx lives for the life of the transaction. It is // the same context used by the underlying connection. ctx context.Context @@ -140,9 +134,9 @@ func (tx *Tx) Commit() error { var fn Committer = CommitFunc(func(context.Context, *Tx) error { return txDriver.tx.Commit() }) - tx.mu.Lock() - hooks := append([]CommitHook(nil), tx.onCommit...) - tx.mu.Unlock() + txDriver.mu.Lock() + hooks := append([]CommitHook(nil), txDriver.onCommit...) + txDriver.mu.Unlock() for i := len(hooks) - 1; i >= 0; i-- { fn = hooks[i](fn) } @@ -151,9 +145,10 @@ func (tx *Tx) Commit() error { // OnCommit adds a hook to call on commit. func (tx *Tx) OnCommit(f CommitHook) { - tx.mu.Lock() - defer tx.mu.Unlock() - tx.onCommit = append(tx.onCommit, f) + txDriver := tx.config.driver.(*txDriver) + txDriver.mu.Lock() + txDriver.onCommit = append(txDriver.onCommit, f) + txDriver.mu.Unlock() } type ( @@ -195,9 +190,9 @@ func (tx *Tx) Rollback() error { var fn Rollbacker = RollbackFunc(func(context.Context, *Tx) error { return txDriver.tx.Rollback() }) - tx.mu.Lock() - hooks := append([]RollbackHook(nil), tx.onRollback...) - tx.mu.Unlock() + txDriver.mu.Lock() + hooks := append([]RollbackHook(nil), txDriver.onRollback...) + txDriver.mu.Unlock() for i := len(hooks) - 1; i >= 0; i-- { fn = hooks[i](fn) } @@ -206,9 +201,10 @@ func (tx *Tx) Rollback() error { // OnRollback adds a hook to call on rollback. func (tx *Tx) OnRollback(f RollbackHook) { - tx.mu.Lock() - defer tx.mu.Unlock() - tx.onRollback = append(tx.onRollback, f) + txDriver := tx.config.driver.(*txDriver) + txDriver.mu.Lock() + txDriver.onRollback = append(txDriver.onRollback, f) + txDriver.mu.Unlock() } // Client returns a Client that binds to current transaction. @@ -276,6 +272,10 @@ type txDriver struct { drv dialect.Driver // tx is the underlying transaction. tx dialect.Tx + // completion hooks. + mu sync.Mutex + onCommit []CommitHook + onRollback []RollbackHook } // newTx creates a new transactional driver. @@ -306,12 +306,12 @@ func (*txDriver) Commit() error { return nil } func (*txDriver) Rollback() error { return nil } // Exec calls tx.Exec. -func (tx *txDriver) Exec(ctx context.Context, query string, args, v interface{}) error { +func (tx *txDriver) Exec(ctx context.Context, query string, args, v any) error { return tx.tx.Exec(ctx, query, args, v) } // Query calls tx.Query. -func (tx *txDriver) Query(ctx context.Context, query string, args, v interface{}) error { +func (tx *txDriver) Query(ctx context.Context, query string, args, v any) error { return tx.tx.Query(ctx, query, args, v) } diff --git a/ent/user.go b/ent/user.go index ff230bec..0d92e26f 100755 --- a/ent/user.go +++ b/ent/user.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -6,6 +6,7 @@ import ( "fmt" "strings" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/gen0cide/laforge/ent/user" "github.com/google/uuid" @@ -22,23 +23,25 @@ type User struct { UUID string `json:"uuid,omitempty" hcl:"uuid,optional"` // Email holds the value of the "email" field. Email string `json:"email,omitempty" hcl:"email,attr"` - // HclID holds the value of the "hcl_id" field. - HclID string `json:"hcl_id,omitempty" hcl:"id,label"` + // HCLID holds the value of the "hcl_id" field. + HCLID string `json:"hcl_id,omitempty" hcl:"id,label"` // Edges holds the relations/edges for other nodes in the graph. // The values are being populated by the UserQuery when eager-loading is set. Edges UserEdges `json:"edges"` + // vvvvvvvvvvvv CUSTOM vvvvvvvvvvvv // Edges put into the main struct to be loaded via hcl // UserToTag holds the value of the UserToTag edge. HCLUserToTag []*Tag `json:"UserToTag,omitempty"` // UserToEnvironment holds the value of the UserToEnvironment edge. HCLUserToEnvironment []*Environment `json:"UserToEnvironment,omitempty"` - // + // ^^^^^^^^^^^^ CUSTOM ^^^^^^^^^^^^^ ansible_ansible_to_user *uuid.UUID command_command_to_user *uuid.UUID finding_finding_to_user *uuid.UUID host_host_to_user *uuid.UUID script_script_to_user *uuid.UUID + selectValues sql.SelectValues } // UserEdges holds the relations/edges for other nodes in the graph. @@ -50,6 +53,11 @@ type UserEdges struct { // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. loadedTypes [2]bool + // totalCount holds the count of the edges above. + totalCount [2]map[string]int + + namedUserToTag map[string][]*Tag + namedUserToEnvironment map[string][]*Environment } // UserToTagOrErr returns the UserToTag value or an error if the edge @@ -71,11 +79,11 @@ func (e UserEdges) UserToEnvironmentOrErr() ([]*Environment, error) { } // scanValues returns the types for scanning values from sql.Rows. -func (*User) scanValues(columns []string) ([]interface{}, error) { - values := make([]interface{}, len(columns)) +func (*User) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) for i := range columns { switch columns[i] { - case user.FieldName, user.FieldUUID, user.FieldEmail, user.FieldHclID: + case user.FieldName, user.FieldUUID, user.FieldEmail, user.FieldHCLID: values[i] = new(sql.NullString) case user.FieldID: values[i] = new(uuid.UUID) @@ -90,7 +98,7 @@ func (*User) scanValues(columns []string) ([]interface{}, error) { case user.ForeignKeys[4]: // script_script_to_user values[i] = &sql.NullScanner{S: new(uuid.UUID)} default: - return nil, fmt.Errorf("unexpected column %q for type User", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -98,7 +106,7 @@ func (*User) scanValues(columns []string) ([]interface{}, error) { // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the User fields. -func (u *User) assignValues(columns []string, values []interface{}) error { +func (u *User) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } @@ -128,11 +136,11 @@ func (u *User) assignValues(columns []string, values []interface{}) error { } else if value.Valid { u.Email = value.String } - case user.FieldHclID: + case user.FieldHCLID: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field hcl_id", values[i]) } else if value.Valid { - u.HclID = value.String + u.HCLID = value.String } case user.ForeignKeys[0]: if value, ok := values[i].(*sql.NullScanner); !ok { @@ -169,36 +177,44 @@ func (u *User) assignValues(columns []string, values []interface{}) error { u.script_script_to_user = new(uuid.UUID) *u.script_script_to_user = *value.S.(*uuid.UUID) } + default: + u.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the User. +// This includes values selected through modifiers, order, etc. +func (u *User) Value(name string) (ent.Value, error) { + return u.selectValues.Get(name) +} + // QueryUserToTag queries the "UserToTag" edge of the User entity. func (u *User) QueryUserToTag() *TagQuery { - return (&UserClient{config: u.config}).QueryUserToTag(u) + return NewUserClient(u.config).QueryUserToTag(u) } // QueryUserToEnvironment queries the "UserToEnvironment" edge of the User entity. func (u *User) QueryUserToEnvironment() *EnvironmentQuery { - return (&UserClient{config: u.config}).QueryUserToEnvironment(u) + return NewUserClient(u.config).QueryUserToEnvironment(u) } // Update returns a builder for updating this User. // Note that you need to call User.Unwrap() before calling this method if this User // was returned from a transaction, and the transaction was committed or rolled back. func (u *User) Update() *UserUpdateOne { - return (&UserClient{config: u.config}).UpdateOne(u) + return NewUserClient(u.config).UpdateOne(u) } // Unwrap unwraps the User entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. func (u *User) Unwrap() *User { - tx, ok := u.config.driver.(*txDriver) + _tx, ok := u.config.driver.(*txDriver) if !ok { panic("ent: User is not a transactional entity") } - u.config.driver = tx.drv + u.config.driver = _tx.drv return u } @@ -206,24 +222,69 @@ func (u *User) Unwrap() *User { func (u *User) String() string { var builder strings.Builder builder.WriteString("User(") - builder.WriteString(fmt.Sprintf("id=%v", u.ID)) - builder.WriteString(", name=") + builder.WriteString(fmt.Sprintf("id=%v, ", u.ID)) + builder.WriteString("name=") builder.WriteString(u.Name) - builder.WriteString(", uuid=") + builder.WriteString(", ") + builder.WriteString("uuid=") builder.WriteString(u.UUID) - builder.WriteString(", email=") + builder.WriteString(", ") + builder.WriteString("email=") builder.WriteString(u.Email) - builder.WriteString(", hcl_id=") - builder.WriteString(u.HclID) + builder.WriteString(", ") + builder.WriteString("hcl_id=") + builder.WriteString(u.HCLID) builder.WriteByte(')') return builder.String() } -// Users is a parsable slice of User. -type Users []*User +// NamedUserToTag returns the UserToTag named value or an error if the edge was not +// loaded in eager-loading with this name. +func (u *User) NamedUserToTag(name string) ([]*Tag, error) { + if u.Edges.namedUserToTag == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := u.Edges.namedUserToTag[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} -func (u Users) config(cfg config) { - for _i := range u { - u[_i].config = cfg +func (u *User) appendNamedUserToTag(name string, edges ...*Tag) { + if u.Edges.namedUserToTag == nil { + u.Edges.namedUserToTag = make(map[string][]*Tag) + } + if len(edges) == 0 { + u.Edges.namedUserToTag[name] = []*Tag{} + } else { + u.Edges.namedUserToTag[name] = append(u.Edges.namedUserToTag[name], edges...) } } + +// NamedUserToEnvironment returns the UserToEnvironment named value or an error if the edge was not +// loaded in eager-loading with this name. +func (u *User) NamedUserToEnvironment(name string) ([]*Environment, error) { + if u.Edges.namedUserToEnvironment == nil { + return nil, &NotLoadedError{edge: name} + } + nodes, ok := u.Edges.namedUserToEnvironment[name] + if !ok { + return nil, &NotLoadedError{edge: name} + } + return nodes, nil +} + +func (u *User) appendNamedUserToEnvironment(name string, edges ...*Environment) { + if u.Edges.namedUserToEnvironment == nil { + u.Edges.namedUserToEnvironment = make(map[string][]*Environment) + } + if len(edges) == 0 { + u.Edges.namedUserToEnvironment[name] = []*Environment{} + } else { + u.Edges.namedUserToEnvironment[name] = append(u.Edges.namedUserToEnvironment[name], edges...) + } +} + +// Users is a parsable slice of User. +type Users []*User diff --git a/ent/user/user.go b/ent/user/user.go index d08cd952..fd456ea5 100755 --- a/ent/user/user.go +++ b/ent/user/user.go @@ -1,8 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package user import ( + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -17,8 +19,8 @@ const ( FieldUUID = "uuid" // FieldEmail holds the string denoting the email field in the database. FieldEmail = "email" - // FieldHclID holds the string denoting the hcl_id field in the database. - FieldHclID = "hcl_id" + // FieldHCLID holds the string denoting the hcl_id field in the database. + FieldHCLID = "hcl_id" // EdgeUserToTag holds the string denoting the usertotag edge name in mutations. EdgeUserToTag = "UserToTag" // EdgeUserToEnvironment holds the string denoting the usertoenvironment edge name in mutations. @@ -45,7 +47,7 @@ var Columns = []string{ FieldName, FieldUUID, FieldEmail, - FieldHclID, + FieldHCLID, } // ForeignKeys holds the SQL foreign-keys that are owned by the "users" @@ -83,3 +85,73 @@ var ( // DefaultID holds the default value on creation for the "id" field. DefaultID func() uuid.UUID ) + +// OrderOption defines the ordering options for the User queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByUUID orders the results by the uuid field. +func ByUUID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUUID, opts...).ToFunc() +} + +// ByEmail orders the results by the email field. +func ByEmail(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldEmail, opts...).ToFunc() +} + +// ByHCLID orders the results by the hcl_id field. +func ByHCLID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldHCLID, opts...).ToFunc() +} + +// ByUserToTagCount orders the results by UserToTag count. +func ByUserToTagCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newUserToTagStep(), opts...) + } +} + +// ByUserToTag orders the results by UserToTag terms. +func ByUserToTag(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newUserToTagStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByUserToEnvironmentCount orders the results by UserToEnvironment count. +func ByUserToEnvironmentCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newUserToEnvironmentStep(), opts...) + } +} + +// ByUserToEnvironment orders the results by UserToEnvironment terms. +func ByUserToEnvironment(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newUserToEnvironmentStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newUserToTagStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(UserToTagInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, UserToTagTable, UserToTagColumn), + ) +} +func newUserToEnvironmentStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(UserToEnvironmentInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, UserToEnvironmentTable, UserToEnvironmentPrimaryKey...), + ) +} diff --git a/ent/user/where.go b/ent/user/where.go index cd3e3071..184327c2 100755 --- a/ent/user/where.go +++ b/ent/user/where.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package user @@ -11,557 +11,327 @@ import ( // ID filters vertices based on their ID field. func ID(id uuid.UUID) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.User(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id uuid.UUID) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.User(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id uuid.UUID) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.User(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...uuid.UUID) predicate.User { - return predicate.User(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.User(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...uuid.UUID) predicate.User { - return predicate.User(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(ids) == 0 { - s.Where(sql.False()) - return - } - v := make([]interface{}, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.User(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id uuid.UUID) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.User(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id uuid.UUID) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.User(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id uuid.UUID) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.User(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id uuid.UUID) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.User(sql.FieldLTE(FieldID, id)) } // Name applies equality check predicate on the "name" field. It's identical to NameEQ. func Name(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldName), v)) - }) + return predicate.User(sql.FieldEQ(FieldName, v)) } // UUID applies equality check predicate on the "uuid" field. It's identical to UUIDEQ. func UUID(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUUID), v)) - }) + return predicate.User(sql.FieldEQ(FieldUUID, v)) } // Email applies equality check predicate on the "email" field. It's identical to EmailEQ. func Email(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldEmail), v)) - }) + return predicate.User(sql.FieldEQ(FieldEmail, v)) } -// HclID applies equality check predicate on the "hcl_id" field. It's identical to HclIDEQ. -func HclID(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldHclID), v)) - }) +// HCLID applies equality check predicate on the "hcl_id" field. It's identical to HCLIDEQ. +func HCLID(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldHCLID, v)) } // NameEQ applies the EQ predicate on the "name" field. func NameEQ(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldName), v)) - }) + return predicate.User(sql.FieldEQ(FieldName, v)) } // NameNEQ applies the NEQ predicate on the "name" field. func NameNEQ(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldName), v)) - }) + return predicate.User(sql.FieldNEQ(FieldName, v)) } // NameIn applies the In predicate on the "name" field. func NameIn(vs ...string) predicate.User { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.User(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldName), v...)) - }) + return predicate.User(sql.FieldIn(FieldName, vs...)) } // NameNotIn applies the NotIn predicate on the "name" field. func NameNotIn(vs ...string) predicate.User { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.User(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldName), v...)) - }) + return predicate.User(sql.FieldNotIn(FieldName, vs...)) } // NameGT applies the GT predicate on the "name" field. func NameGT(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldName), v)) - }) + return predicate.User(sql.FieldGT(FieldName, v)) } // NameGTE applies the GTE predicate on the "name" field. func NameGTE(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldName), v)) - }) + return predicate.User(sql.FieldGTE(FieldName, v)) } // NameLT applies the LT predicate on the "name" field. func NameLT(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldName), v)) - }) + return predicate.User(sql.FieldLT(FieldName, v)) } // NameLTE applies the LTE predicate on the "name" field. func NameLTE(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldName), v)) - }) + return predicate.User(sql.FieldLTE(FieldName, v)) } // NameContains applies the Contains predicate on the "name" field. func NameContains(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldName), v)) - }) + return predicate.User(sql.FieldContains(FieldName, v)) } // NameHasPrefix applies the HasPrefix predicate on the "name" field. func NameHasPrefix(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldName), v)) - }) + return predicate.User(sql.FieldHasPrefix(FieldName, v)) } // NameHasSuffix applies the HasSuffix predicate on the "name" field. func NameHasSuffix(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldName), v)) - }) + return predicate.User(sql.FieldHasSuffix(FieldName, v)) } // NameEqualFold applies the EqualFold predicate on the "name" field. func NameEqualFold(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldName), v)) - }) + return predicate.User(sql.FieldEqualFold(FieldName, v)) } // NameContainsFold applies the ContainsFold predicate on the "name" field. func NameContainsFold(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldName), v)) - }) + return predicate.User(sql.FieldContainsFold(FieldName, v)) } // UUIDEQ applies the EQ predicate on the "uuid" field. func UUIDEQ(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUUID), v)) - }) + return predicate.User(sql.FieldEQ(FieldUUID, v)) } // UUIDNEQ applies the NEQ predicate on the "uuid" field. func UUIDNEQ(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldUUID), v)) - }) + return predicate.User(sql.FieldNEQ(FieldUUID, v)) } // UUIDIn applies the In predicate on the "uuid" field. func UUIDIn(vs ...string) predicate.User { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.User(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldUUID), v...)) - }) + return predicate.User(sql.FieldIn(FieldUUID, vs...)) } // UUIDNotIn applies the NotIn predicate on the "uuid" field. func UUIDNotIn(vs ...string) predicate.User { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.User(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldUUID), v...)) - }) + return predicate.User(sql.FieldNotIn(FieldUUID, vs...)) } // UUIDGT applies the GT predicate on the "uuid" field. func UUIDGT(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldUUID), v)) - }) + return predicate.User(sql.FieldGT(FieldUUID, v)) } // UUIDGTE applies the GTE predicate on the "uuid" field. func UUIDGTE(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldUUID), v)) - }) + return predicate.User(sql.FieldGTE(FieldUUID, v)) } // UUIDLT applies the LT predicate on the "uuid" field. func UUIDLT(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldUUID), v)) - }) + return predicate.User(sql.FieldLT(FieldUUID, v)) } // UUIDLTE applies the LTE predicate on the "uuid" field. func UUIDLTE(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldUUID), v)) - }) + return predicate.User(sql.FieldLTE(FieldUUID, v)) } // UUIDContains applies the Contains predicate on the "uuid" field. func UUIDContains(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldUUID), v)) - }) + return predicate.User(sql.FieldContains(FieldUUID, v)) } // UUIDHasPrefix applies the HasPrefix predicate on the "uuid" field. func UUIDHasPrefix(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldUUID), v)) - }) + return predicate.User(sql.FieldHasPrefix(FieldUUID, v)) } // UUIDHasSuffix applies the HasSuffix predicate on the "uuid" field. func UUIDHasSuffix(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldUUID), v)) - }) + return predicate.User(sql.FieldHasSuffix(FieldUUID, v)) } // UUIDEqualFold applies the EqualFold predicate on the "uuid" field. func UUIDEqualFold(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldUUID), v)) - }) + return predicate.User(sql.FieldEqualFold(FieldUUID, v)) } // UUIDContainsFold applies the ContainsFold predicate on the "uuid" field. func UUIDContainsFold(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldUUID), v)) - }) + return predicate.User(sql.FieldContainsFold(FieldUUID, v)) } // EmailEQ applies the EQ predicate on the "email" field. func EmailEQ(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldEmail), v)) - }) + return predicate.User(sql.FieldEQ(FieldEmail, v)) } // EmailNEQ applies the NEQ predicate on the "email" field. func EmailNEQ(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldEmail), v)) - }) + return predicate.User(sql.FieldNEQ(FieldEmail, v)) } // EmailIn applies the In predicate on the "email" field. func EmailIn(vs ...string) predicate.User { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.User(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldEmail), v...)) - }) + return predicate.User(sql.FieldIn(FieldEmail, vs...)) } // EmailNotIn applies the NotIn predicate on the "email" field. func EmailNotIn(vs ...string) predicate.User { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.User(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldEmail), v...)) - }) + return predicate.User(sql.FieldNotIn(FieldEmail, vs...)) } // EmailGT applies the GT predicate on the "email" field. func EmailGT(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldEmail), v)) - }) + return predicate.User(sql.FieldGT(FieldEmail, v)) } // EmailGTE applies the GTE predicate on the "email" field. func EmailGTE(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldEmail), v)) - }) + return predicate.User(sql.FieldGTE(FieldEmail, v)) } // EmailLT applies the LT predicate on the "email" field. func EmailLT(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldEmail), v)) - }) + return predicate.User(sql.FieldLT(FieldEmail, v)) } // EmailLTE applies the LTE predicate on the "email" field. func EmailLTE(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldEmail), v)) - }) + return predicate.User(sql.FieldLTE(FieldEmail, v)) } // EmailContains applies the Contains predicate on the "email" field. func EmailContains(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldEmail), v)) - }) + return predicate.User(sql.FieldContains(FieldEmail, v)) } // EmailHasPrefix applies the HasPrefix predicate on the "email" field. func EmailHasPrefix(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldEmail), v)) - }) + return predicate.User(sql.FieldHasPrefix(FieldEmail, v)) } // EmailHasSuffix applies the HasSuffix predicate on the "email" field. func EmailHasSuffix(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldEmail), v)) - }) + return predicate.User(sql.FieldHasSuffix(FieldEmail, v)) } // EmailEqualFold applies the EqualFold predicate on the "email" field. func EmailEqualFold(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldEmail), v)) - }) + return predicate.User(sql.FieldEqualFold(FieldEmail, v)) } // EmailContainsFold applies the ContainsFold predicate on the "email" field. func EmailContainsFold(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldEmail), v)) - }) + return predicate.User(sql.FieldContainsFold(FieldEmail, v)) } -// HclIDEQ applies the EQ predicate on the "hcl_id" field. -func HclIDEQ(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldHclID), v)) - }) +// HCLIDEQ applies the EQ predicate on the "hcl_id" field. +func HCLIDEQ(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldHCLID, v)) } -// HclIDNEQ applies the NEQ predicate on the "hcl_id" field. -func HclIDNEQ(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldHclID), v)) - }) +// HCLIDNEQ applies the NEQ predicate on the "hcl_id" field. +func HCLIDNEQ(v string) predicate.User { + return predicate.User(sql.FieldNEQ(FieldHCLID, v)) } -// HclIDIn applies the In predicate on the "hcl_id" field. -func HclIDIn(vs ...string) predicate.User { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.User(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.In(s.C(FieldHclID), v...)) - }) +// HCLIDIn applies the In predicate on the "hcl_id" field. +func HCLIDIn(vs ...string) predicate.User { + return predicate.User(sql.FieldIn(FieldHCLID, vs...)) } -// HclIDNotIn applies the NotIn predicate on the "hcl_id" field. -func HclIDNotIn(vs ...string) predicate.User { - v := make([]interface{}, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.User(func(s *sql.Selector) { - // if not arguments were provided, append the FALSE constants, - // since we can't apply "IN ()". This will make this predicate falsy. - if len(v) == 0 { - s.Where(sql.False()) - return - } - s.Where(sql.NotIn(s.C(FieldHclID), v...)) - }) +// HCLIDNotIn applies the NotIn predicate on the "hcl_id" field. +func HCLIDNotIn(vs ...string) predicate.User { + return predicate.User(sql.FieldNotIn(FieldHCLID, vs...)) } -// HclIDGT applies the GT predicate on the "hcl_id" field. -func HclIDGT(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldHclID), v)) - }) +// HCLIDGT applies the GT predicate on the "hcl_id" field. +func HCLIDGT(v string) predicate.User { + return predicate.User(sql.FieldGT(FieldHCLID, v)) } -// HclIDGTE applies the GTE predicate on the "hcl_id" field. -func HclIDGTE(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldHclID), v)) - }) +// HCLIDGTE applies the GTE predicate on the "hcl_id" field. +func HCLIDGTE(v string) predicate.User { + return predicate.User(sql.FieldGTE(FieldHCLID, v)) } -// HclIDLT applies the LT predicate on the "hcl_id" field. -func HclIDLT(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldHclID), v)) - }) +// HCLIDLT applies the LT predicate on the "hcl_id" field. +func HCLIDLT(v string) predicate.User { + return predicate.User(sql.FieldLT(FieldHCLID, v)) } -// HclIDLTE applies the LTE predicate on the "hcl_id" field. -func HclIDLTE(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldHclID), v)) - }) +// HCLIDLTE applies the LTE predicate on the "hcl_id" field. +func HCLIDLTE(v string) predicate.User { + return predicate.User(sql.FieldLTE(FieldHCLID, v)) } -// HclIDContains applies the Contains predicate on the "hcl_id" field. -func HclIDContains(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldHclID), v)) - }) +// HCLIDContains applies the Contains predicate on the "hcl_id" field. +func HCLIDContains(v string) predicate.User { + return predicate.User(sql.FieldContains(FieldHCLID, v)) } -// HclIDHasPrefix applies the HasPrefix predicate on the "hcl_id" field. -func HclIDHasPrefix(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldHclID), v)) - }) +// HCLIDHasPrefix applies the HasPrefix predicate on the "hcl_id" field. +func HCLIDHasPrefix(v string) predicate.User { + return predicate.User(sql.FieldHasPrefix(FieldHCLID, v)) } -// HclIDHasSuffix applies the HasSuffix predicate on the "hcl_id" field. -func HclIDHasSuffix(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldHclID), v)) - }) +// HCLIDHasSuffix applies the HasSuffix predicate on the "hcl_id" field. +func HCLIDHasSuffix(v string) predicate.User { + return predicate.User(sql.FieldHasSuffix(FieldHCLID, v)) } -// HclIDEqualFold applies the EqualFold predicate on the "hcl_id" field. -func HclIDEqualFold(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldHclID), v)) - }) +// HCLIDEqualFold applies the EqualFold predicate on the "hcl_id" field. +func HCLIDEqualFold(v string) predicate.User { + return predicate.User(sql.FieldEqualFold(FieldHCLID, v)) } -// HclIDContainsFold applies the ContainsFold predicate on the "hcl_id" field. -func HclIDContainsFold(v string) predicate.User { - return predicate.User(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldHclID), v)) - }) +// HCLIDContainsFold applies the ContainsFold predicate on the "hcl_id" field. +func HCLIDContainsFold(v string) predicate.User { + return predicate.User(sql.FieldContainsFold(FieldHCLID, v)) } // HasUserToTag applies the HasEdge predicate on the "UserToTag" edge. @@ -569,7 +339,6 @@ func HasUserToTag() predicate.User { return predicate.User(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(UserToTagTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, false, UserToTagTable, UserToTagColumn), ) sqlgraph.HasNeighbors(s, step) @@ -579,11 +348,7 @@ func HasUserToTag() predicate.User { // HasUserToTagWith applies the HasEdge predicate on the "UserToTag" edge with a given conditions (other predicates). func HasUserToTagWith(preds ...predicate.Tag) predicate.User { return predicate.User(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(UserToTagInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, UserToTagTable, UserToTagColumn), - ) + step := newUserToTagStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -597,7 +362,6 @@ func HasUserToEnvironment() predicate.User { return predicate.User(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(UserToEnvironmentTable, FieldID), sqlgraph.Edge(sqlgraph.M2M, true, UserToEnvironmentTable, UserToEnvironmentPrimaryKey...), ) sqlgraph.HasNeighbors(s, step) @@ -607,11 +371,7 @@ func HasUserToEnvironment() predicate.User { // HasUserToEnvironmentWith applies the HasEdge predicate on the "UserToEnvironment" edge with a given conditions (other predicates). func HasUserToEnvironmentWith(preds ...predicate.Environment) predicate.User { return predicate.User(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(UserToEnvironmentInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2M, true, UserToEnvironmentTable, UserToEnvironmentPrimaryKey...), - ) + step := newUserToEnvironmentStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -622,32 +382,15 @@ func HasUserToEnvironmentWith(preds ...predicate.Environment) predicate.User { // And groups predicates with the AND operator between them. func And(predicates ...predicate.User) predicate.User { - return predicate.User(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.User(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.User) predicate.User { - return predicate.User(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.User(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.User) predicate.User { - return predicate.User(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.User(sql.NotPredicates(p)) } diff --git a/ent/user_create.go b/ent/user_create.go index e1d2b099..9481d306 100755 --- a/ent/user_create.go +++ b/ent/user_create.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -40,9 +40,9 @@ func (uc *UserCreate) SetEmail(s string) *UserCreate { return uc } -// SetHclID sets the "hcl_id" field. -func (uc *UserCreate) SetHclID(s string) *UserCreate { - uc.mutation.SetHclID(s) +// SetHCLID sets the "hcl_id" field. +func (uc *UserCreate) SetHCLID(s string) *UserCreate { + uc.mutation.SetHCLID(s) return uc } @@ -97,44 +97,8 @@ func (uc *UserCreate) Mutation() *UserMutation { // Save creates the User in the database. func (uc *UserCreate) Save(ctx context.Context) (*User, error) { - var ( - err error - node *User - ) uc.defaults() - if len(uc.hooks) == 0 { - if err = uc.check(); err != nil { - return nil, err - } - node, err = uc.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*UserMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = uc.check(); err != nil { - return nil, err - } - uc.mutation = mutation - if node, err = uc.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(uc.hooks) - 1; i >= 0; i-- { - if uc.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = uc.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, uc.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, uc.sqlSave, uc.mutation, uc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -178,17 +142,20 @@ func (uc *UserCreate) check() error { if _, ok := uc.mutation.Email(); !ok { return &ValidationError{Name: "email", err: errors.New(`ent: missing required field "User.email"`)} } - if _, ok := uc.mutation.HclID(); !ok { + if _, ok := uc.mutation.HCLID(); !ok { return &ValidationError{Name: "hcl_id", err: errors.New(`ent: missing required field "User.hcl_id"`)} } return nil } func (uc *UserCreate) sqlSave(ctx context.Context) (*User, error) { + if err := uc.check(); err != nil { + return nil, err + } _node, _spec := uc.createSpec() if err := sqlgraph.CreateNode(ctx, uc.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } @@ -199,55 +166,35 @@ func (uc *UserCreate) sqlSave(ctx context.Context) (*User, error) { return nil, err } } + uc.mutation.id = &_node.ID + uc.mutation.done = true return _node, nil } func (uc *UserCreate) createSpec() (*User, *sqlgraph.CreateSpec) { var ( _node = &User{config: uc.config} - _spec = &sqlgraph.CreateSpec{ - Table: user.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(user.Table, sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID)) ) if id, ok := uc.mutation.ID(); ok { _node.ID = id _spec.ID.Value = &id } if value, ok := uc.mutation.Name(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: user.FieldName, - }) + _spec.SetField(user.FieldName, field.TypeString, value) _node.Name = value } if value, ok := uc.mutation.UUID(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: user.FieldUUID, - }) + _spec.SetField(user.FieldUUID, field.TypeString, value) _node.UUID = value } if value, ok := uc.mutation.Email(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: user.FieldEmail, - }) + _spec.SetField(user.FieldEmail, field.TypeString, value) _node.Email = value } - if value, ok := uc.mutation.HclID(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: user.FieldHclID, - }) - _node.HclID = value + if value, ok := uc.mutation.HCLID(); ok { + _spec.SetField(user.FieldHCLID, field.TypeString, value) + _node.HCLID = value } if nodes := uc.mutation.UserToTagIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ @@ -257,10 +204,7 @@ func (uc *UserCreate) createSpec() (*User, *sqlgraph.CreateSpec) { Columns: []string{user.UserToTagColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: tag.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(tag.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -276,10 +220,7 @@ func (uc *UserCreate) createSpec() (*User, *sqlgraph.CreateSpec) { Columns: user.UserToEnvironmentPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -293,11 +234,15 @@ func (uc *UserCreate) createSpec() (*User, *sqlgraph.CreateSpec) { // UserCreateBulk is the builder for creating many User entities in bulk. type UserCreateBulk struct { config + err error builders []*UserCreate } // Save creates the User entities in the database. func (ucb *UserCreateBulk) Save(ctx context.Context) ([]*User, error) { + if ucb.err != nil { + return nil, ucb.err + } specs := make([]*sqlgraph.CreateSpec, len(ucb.builders)) nodes := make([]*User, len(ucb.builders)) mutators := make([]Mutator, len(ucb.builders)) @@ -314,8 +259,8 @@ func (ucb *UserCreateBulk) Save(ctx context.Context) ([]*User, error) { return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, ucb.builders[i+1].mutation) } else { @@ -323,7 +268,7 @@ func (ucb *UserCreateBulk) Save(ctx context.Context) ([]*User, error) { // Invoke the actual operation on the latest mutation in the chain. if err = sqlgraph.BatchCreate(ctx, ucb.driver, spec); err != nil { if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } } } diff --git a/ent/user_delete.go b/ent/user_delete.go index 111efa0e..a61b4bb5 100755 --- a/ent/user_delete.go +++ b/ent/user_delete.go @@ -1,10 +1,9 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (ud *UserDelete) Where(ps ...predicate.User) *UserDelete { // Exec executes the deletion query and returns how many vertices were deleted. func (ud *UserDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(ud.hooks) == 0 { - affected, err = ud.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*UserMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - ud.mutation = mutation - affected, err = ud.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(ud.hooks) - 1; i >= 0; i-- { - if ud.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = ud.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, ud.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, ud.sqlExec, ud.mutation, ud.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (ud *UserDelete) ExecX(ctx context.Context) int { } func (ud *UserDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: user.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(user.Table, sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID)) if ps := ud.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -84,7 +48,12 @@ func (ud *UserDelete) sqlExec(ctx context.Context) (int, error) { } } } - return sqlgraph.DeleteNodes(ctx, ud.driver, _spec) + affected, err := sqlgraph.DeleteNodes(ctx, ud.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + ud.mutation.done = true + return affected, err } // UserDeleteOne is the builder for deleting a single User entity. @@ -92,6 +61,12 @@ type UserDeleteOne struct { ud *UserDelete } +// Where appends a list predicates to the UserDelete builder. +func (udo *UserDeleteOne) Where(ps ...predicate.User) *UserDeleteOne { + udo.ud.mutation.Where(ps...) + return udo +} + // Exec executes the deletion query. func (udo *UserDeleteOne) Exec(ctx context.Context) error { n, err := udo.ud.Exec(ctx) @@ -107,5 +82,7 @@ func (udo *UserDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (udo *UserDeleteOne) ExecX(ctx context.Context) { - udo.ud.ExecX(ctx) + if err := udo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/ent/user_query.go b/ent/user_query.go index 49ea4832..303fc348 100755 --- a/ent/user_query.go +++ b/ent/user_query.go @@ -1,11 +1,10 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent import ( "context" "database/sql/driver" - "errors" "fmt" "math" @@ -22,16 +21,17 @@ import ( // UserQuery is the builder for querying User entities. type UserQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string - predicates []predicate.User - // eager-loading edges. - withUserToTag *TagQuery - withUserToEnvironment *EnvironmentQuery - withFKs bool + ctx *QueryContext + order []user.OrderOption + inters []Interceptor + predicates []predicate.User + withUserToTag *TagQuery + withUserToEnvironment *EnvironmentQuery + withFKs bool + modifiers []func(*sql.Selector) + loadTotal []func(context.Context, []*User) error + withNamedUserToTag map[string]*TagQuery + withNamedUserToEnvironment map[string]*EnvironmentQuery // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -43,34 +43,34 @@ func (uq *UserQuery) Where(ps ...predicate.User) *UserQuery { return uq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (uq *UserQuery) Limit(limit int) *UserQuery { - uq.limit = &limit + uq.ctx.Limit = &limit return uq } -// Offset adds an offset step to the query. +// Offset to start from. func (uq *UserQuery) Offset(offset int) *UserQuery { - uq.offset = &offset + uq.ctx.Offset = &offset return uq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (uq *UserQuery) Unique(unique bool) *UserQuery { - uq.unique = &unique + uq.ctx.Unique = &unique return uq } -// Order adds an order step to the query. -func (uq *UserQuery) Order(o ...OrderFunc) *UserQuery { +// Order specifies how the records should be ordered. +func (uq *UserQuery) Order(o ...user.OrderOption) *UserQuery { uq.order = append(uq.order, o...) return uq } // QueryUserToTag chains the current query on the "UserToTag" edge. func (uq *UserQuery) QueryUserToTag() *TagQuery { - query := &TagQuery{config: uq.config} + query := (&TagClient{config: uq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := uq.prepareQuery(ctx); err != nil { return nil, err @@ -92,7 +92,7 @@ func (uq *UserQuery) QueryUserToTag() *TagQuery { // QueryUserToEnvironment chains the current query on the "UserToEnvironment" edge. func (uq *UserQuery) QueryUserToEnvironment() *EnvironmentQuery { - query := &EnvironmentQuery{config: uq.config} + query := (&EnvironmentClient{config: uq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := uq.prepareQuery(ctx); err != nil { return nil, err @@ -115,7 +115,7 @@ func (uq *UserQuery) QueryUserToEnvironment() *EnvironmentQuery { // First returns the first User entity from the query. // Returns a *NotFoundError when no User was found. func (uq *UserQuery) First(ctx context.Context) (*User, error) { - nodes, err := uq.Limit(1).All(ctx) + nodes, err := uq.Limit(1).All(setContextOp(ctx, uq.ctx, "First")) if err != nil { return nil, err } @@ -138,7 +138,7 @@ func (uq *UserQuery) FirstX(ctx context.Context) *User { // Returns a *NotFoundError when no User ID was found. func (uq *UserQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = uq.Limit(1).IDs(ctx); err != nil { + if ids, err = uq.Limit(1).IDs(setContextOp(ctx, uq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -161,7 +161,7 @@ func (uq *UserQuery) FirstIDX(ctx context.Context) uuid.UUID { // Returns a *NotSingularError when more than one User entity is found. // Returns a *NotFoundError when no User entities are found. func (uq *UserQuery) Only(ctx context.Context) (*User, error) { - nodes, err := uq.Limit(2).All(ctx) + nodes, err := uq.Limit(2).All(setContextOp(ctx, uq.ctx, "Only")) if err != nil { return nil, err } @@ -189,7 +189,7 @@ func (uq *UserQuery) OnlyX(ctx context.Context) *User { // Returns a *NotFoundError when no entities are found. func (uq *UserQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { var ids []uuid.UUID - if ids, err = uq.Limit(2).IDs(ctx); err != nil { + if ids, err = uq.Limit(2).IDs(setContextOp(ctx, uq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -214,10 +214,12 @@ func (uq *UserQuery) OnlyIDX(ctx context.Context) uuid.UUID { // All executes the query and returns a list of Users. func (uq *UserQuery) All(ctx context.Context) ([]*User, error) { + ctx = setContextOp(ctx, uq.ctx, "All") if err := uq.prepareQuery(ctx); err != nil { return nil, err } - return uq.sqlAll(ctx) + qr := querierAll[[]*User, *UserQuery]() + return withInterceptors[[]*User](ctx, uq, qr, uq.inters) } // AllX is like All, but panics if an error occurs. @@ -230,9 +232,12 @@ func (uq *UserQuery) AllX(ctx context.Context) []*User { } // IDs executes the query and returns a list of User IDs. -func (uq *UserQuery) IDs(ctx context.Context) ([]uuid.UUID, error) { - var ids []uuid.UUID - if err := uq.Select(user.FieldID).Scan(ctx, &ids); err != nil { +func (uq *UserQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if uq.ctx.Unique == nil && uq.path != nil { + uq.Unique(true) + } + ctx = setContextOp(ctx, uq.ctx, "IDs") + if err = uq.Select(user.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -249,10 +254,11 @@ func (uq *UserQuery) IDsX(ctx context.Context) []uuid.UUID { // Count returns the count of the given query. func (uq *UserQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, uq.ctx, "Count") if err := uq.prepareQuery(ctx); err != nil { return 0, err } - return uq.sqlCount(ctx) + return withInterceptors[int](ctx, uq, querierCount[*UserQuery](), uq.inters) } // CountX is like Count, but panics if an error occurs. @@ -266,10 +272,15 @@ func (uq *UserQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (uq *UserQuery) Exist(ctx context.Context) (bool, error) { - if err := uq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, uq.ctx, "Exist") + switch _, err := uq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return uq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -289,23 +300,22 @@ func (uq *UserQuery) Clone() *UserQuery { } return &UserQuery{ config: uq.config, - limit: uq.limit, - offset: uq.offset, - order: append([]OrderFunc{}, uq.order...), + ctx: uq.ctx.Clone(), + order: append([]user.OrderOption{}, uq.order...), + inters: append([]Interceptor{}, uq.inters...), predicates: append([]predicate.User{}, uq.predicates...), withUserToTag: uq.withUserToTag.Clone(), withUserToEnvironment: uq.withUserToEnvironment.Clone(), // clone intermediate query. - sql: uq.sql.Clone(), - path: uq.path, - unique: uq.unique, + sql: uq.sql.Clone(), + path: uq.path, } } // WithUserToTag tells the query-builder to eager-load the nodes that are connected to // the "UserToTag" edge. The optional arguments are used to configure the query builder of the edge. func (uq *UserQuery) WithUserToTag(opts ...func(*TagQuery)) *UserQuery { - query := &TagQuery{config: uq.config} + query := (&TagClient{config: uq.config}).Query() for _, opt := range opts { opt(query) } @@ -316,7 +326,7 @@ func (uq *UserQuery) WithUserToTag(opts ...func(*TagQuery)) *UserQuery { // WithUserToEnvironment tells the query-builder to eager-load the nodes that are connected to // the "UserToEnvironment" edge. The optional arguments are used to configure the query builder of the edge. func (uq *UserQuery) WithUserToEnvironment(opts ...func(*EnvironmentQuery)) *UserQuery { - query := &EnvironmentQuery{config: uq.config} + query := (&EnvironmentClient{config: uq.config}).Query() for _, opt := range opts { opt(query) } @@ -338,17 +348,13 @@ func (uq *UserQuery) WithUserToEnvironment(opts ...func(*EnvironmentQuery)) *Use // GroupBy(user.FieldName). // Aggregate(ent.Count()). // Scan(ctx, &v) -// func (uq *UserQuery) GroupBy(field string, fields ...string) *UserGroupBy { - group := &UserGroupBy{config: uq.config} - group.fields = append([]string{field}, fields...) - group.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := uq.prepareQuery(ctx); err != nil { - return nil, err - } - return uq.sqlQuery(ctx), nil - } - return group + uq.ctx.Fields = append([]string{field}, fields...) + grbuild := &UserGroupBy{build: uq} + grbuild.flds = &uq.ctx.Fields + grbuild.label = user.Label + grbuild.scan = grbuild.Scan + return grbuild } // Select allows the selection one or more fields/columns for the given query, @@ -363,14 +369,31 @@ func (uq *UserQuery) GroupBy(field string, fields ...string) *UserGroupBy { // client.User.Query(). // Select(user.FieldName). // Scan(ctx, &v) -// func (uq *UserQuery) Select(fields ...string) *UserSelect { - uq.fields = append(uq.fields, fields...) - return &UserSelect{UserQuery: uq} + uq.ctx.Fields = append(uq.ctx.Fields, fields...) + sbuild := &UserSelect{UserQuery: uq} + sbuild.label = user.Label + sbuild.flds, sbuild.scan = &uq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a UserSelect configured with the given aggregations. +func (uq *UserQuery) Aggregate(fns ...AggregateFunc) *UserSelect { + return uq.Select().Aggregate(fns...) } func (uq *UserQuery) prepareQuery(ctx context.Context) error { - for _, f := range uq.fields { + for _, inter := range uq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, uq); err != nil { + return err + } + } + } + for _, f := range uq.ctx.Fields { if !user.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -385,7 +408,7 @@ func (uq *UserQuery) prepareQuery(ctx context.Context) error { return nil } -func (uq *UserQuery) sqlAll(ctx context.Context) ([]*User, error) { +func (uq *UserQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*User, error) { var ( nodes = []*User{} withFKs = uq.withFKs @@ -398,157 +421,177 @@ func (uq *UserQuery) sqlAll(ctx context.Context) ([]*User, error) { if withFKs { _spec.Node.Columns = append(_spec.Node.Columns, user.ForeignKeys...) } - _spec.ScanValues = func(columns []string) ([]interface{}, error) { + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*User).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { node := &User{config: uq.config} nodes = append(nodes, node) - return node.scanValues(columns) - } - _spec.Assign = func(columns []string, values []interface{}) error { - if len(nodes) == 0 { - return fmt.Errorf("ent: Assign called without calling ScanValues") - } - node := nodes[len(nodes)-1] node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(uq.modifiers) > 0 { + _spec.Modifiers = uq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } if err := sqlgraph.QueryNodes(ctx, uq.driver, _spec); err != nil { return nil, err } if len(nodes) == 0 { return nodes, nil } - if query := uq.withUserToTag; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[uuid.UUID]*User) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - nodes[i].Edges.UserToTag = []*Tag{} + if err := uq.loadUserToTag(ctx, query, nodes, + func(n *User) { n.Edges.UserToTag = []*Tag{} }, + func(n *User, e *Tag) { n.Edges.UserToTag = append(n.Edges.UserToTag, e) }); err != nil { + return nil, err } - query.withFKs = true - query.Where(predicate.Tag(func(s *sql.Selector) { - s.Where(sql.InValues(user.UserToTagColumn, fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { + } + if query := uq.withUserToEnvironment; query != nil { + if err := uq.loadUserToEnvironment(ctx, query, nodes, + func(n *User) { n.Edges.UserToEnvironment = []*Environment{} }, + func(n *User, e *Environment) { n.Edges.UserToEnvironment = append(n.Edges.UserToEnvironment, e) }); err != nil { return nil, err } - for _, n := range neighbors { - fk := n.user_user_to_tag - if fk == nil { - return nil, fmt.Errorf(`foreign-key "user_user_to_tag" is nil for node %v`, n.ID) - } - node, ok := nodeids[*fk] - if !ok { - return nil, fmt.Errorf(`unexpected foreign-key "user_user_to_tag" returned %v for node %v`, *fk, n.ID) - } - node.Edges.UserToTag = append(node.Edges.UserToTag, n) + } + for name, query := range uq.withNamedUserToTag { + if err := uq.loadUserToTag(ctx, query, nodes, + func(n *User) { n.appendNamedUserToTag(name) }, + func(n *User, e *Tag) { n.appendNamedUserToTag(name, e) }); err != nil { + return nil, err + } + } + for name, query := range uq.withNamedUserToEnvironment { + if err := uq.loadUserToEnvironment(ctx, query, nodes, + func(n *User) { n.appendNamedUserToEnvironment(name) }, + func(n *User, e *Environment) { n.appendNamedUserToEnvironment(name, e) }); err != nil { + return nil, err + } + } + for i := range uq.loadTotal { + if err := uq.loadTotal[i](ctx, nodes); err != nil { + return nil, err } } + return nodes, nil +} - if query := uq.withUserToEnvironment; query != nil { - fks := make([]driver.Value, 0, len(nodes)) - ids := make(map[uuid.UUID]*User, len(nodes)) - for _, node := range nodes { - ids[node.ID] = node - fks = append(fks, node.ID) - node.Edges.UserToEnvironment = []*Environment{} +func (uq *UserQuery) loadUserToTag(ctx context.Context, query *TagQuery, nodes []*User, init func(*User), assign func(*User, *Tag)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*User) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) } - var ( - edgeids []uuid.UUID - edges = make(map[uuid.UUID][]*User) - ) - _spec := &sqlgraph.EdgeQuerySpec{ - Edge: &sqlgraph.EdgeSpec{ - Inverse: true, - Table: user.UserToEnvironmentTable, - Columns: user.UserToEnvironmentPrimaryKey, - }, - Predicate: func(s *sql.Selector) { - s.Where(sql.InValues(user.UserToEnvironmentPrimaryKey[1], fks...)) - }, - ScanValues: func() [2]interface{} { - return [2]interface{}{new(uuid.UUID), new(uuid.UUID)} - }, - Assign: func(out, in interface{}) error { - eout, ok := out.(*uuid.UUID) - if !ok || eout == nil { - return fmt.Errorf("unexpected id value for edge-out") - } - ein, ok := in.(*uuid.UUID) - if !ok || ein == nil { - return fmt.Errorf("unexpected id value for edge-in") - } - outValue := *eout - inValue := *ein - node, ok := ids[outValue] - if !ok { - return fmt.Errorf("unexpected node id in edges: %v", outValue) - } - if _, ok := edges[inValue]; !ok { - edgeids = append(edgeids, inValue) - } - edges[inValue] = append(edges[inValue], node) - return nil - }, + } + query.withFKs = true + query.Where(predicate.Tag(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(user.UserToTagColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.user_user_to_tag + if fk == nil { + return fmt.Errorf(`foreign-key "user_user_to_tag" is nil for node %v`, n.ID) } - if err := sqlgraph.QueryEdges(ctx, uq.driver, _spec); err != nil { - return nil, fmt.Errorf(`query edges "UserToEnvironment": %w`, err) + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "user_user_to_tag" returned %v for node %v`, *fk, n.ID) } - query.Where(environment.IDIn(edgeids...)) - neighbors, err := query.All(ctx) - if err != nil { - return nil, err + assign(node, n) + } + return nil +} +func (uq *UserQuery) loadUserToEnvironment(ctx context.Context, query *EnvironmentQuery, nodes []*User, init func(*User), assign func(*User, *Environment)) error { + edgeIDs := make([]driver.Value, len(nodes)) + byID := make(map[uuid.UUID]*User) + nids := make(map[uuid.UUID]map[*User]struct{}) + for i, node := range nodes { + edgeIDs[i] = node.ID + byID[node.ID] = node + if init != nil { + init(node) } - for _, n := range neighbors { - nodes, ok := edges[n.ID] - if !ok { - return nil, fmt.Errorf(`unexpected "UserToEnvironment" node returned %v`, n.ID) + } + query.Where(func(s *sql.Selector) { + joinT := sql.Table(user.UserToEnvironmentTable) + s.Join(joinT).On(s.C(environment.FieldID), joinT.C(user.UserToEnvironmentPrimaryKey[0])) + s.Where(sql.InValues(joinT.C(user.UserToEnvironmentPrimaryKey[1]), edgeIDs...)) + columns := s.SelectedColumns() + s.Select(joinT.C(user.UserToEnvironmentPrimaryKey[1])) + s.AppendSelect(columns...) + s.SetDistinct(false) + }) + if err := query.prepareQuery(ctx); err != nil { + return err + } + qr := QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + return query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) { + assign := spec.Assign + values := spec.ScanValues + spec.ScanValues = func(columns []string) ([]any, error) { + values, err := values(columns[1:]) + if err != nil { + return nil, err + } + return append([]any{new(uuid.UUID)}, values...), nil } - for i := range nodes { - nodes[i].Edges.UserToEnvironment = append(nodes[i].Edges.UserToEnvironment, n) + spec.Assign = func(columns []string, values []any) error { + outValue := *values[0].(*uuid.UUID) + inValue := *values[1].(*uuid.UUID) + if nids[inValue] == nil { + nids[inValue] = map[*User]struct{}{byID[outValue]: {}} + return assign(columns[1:], values[1:]) + } + nids[inValue][byID[outValue]] = struct{}{} + return nil } + }) + }) + neighbors, err := withInterceptors[[]*Environment](ctx, query, qr, query.inters) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nids[n.ID] + if !ok { + return fmt.Errorf(`unexpected "UserToEnvironment" node returned %v`, n.ID) + } + for kn := range nodes { + assign(kn, n) } } - - return nodes, nil + return nil } func (uq *UserQuery) sqlCount(ctx context.Context) (int, error) { _spec := uq.querySpec() - _spec.Node.Columns = uq.fields - if len(uq.fields) > 0 { - _spec.Unique = uq.unique != nil && *uq.unique + if len(uq.modifiers) > 0 { + _spec.Modifiers = uq.modifiers } - return sqlgraph.CountNodes(ctx, uq.driver, _spec) -} - -func (uq *UserQuery) sqlExist(ctx context.Context) (bool, error) { - n, err := uq.sqlCount(ctx) - if err != nil { - return false, fmt.Errorf("ent: check existence: %w", err) + _spec.Node.Columns = uq.ctx.Fields + if len(uq.ctx.Fields) > 0 { + _spec.Unique = uq.ctx.Unique != nil && *uq.ctx.Unique } - return n > 0, nil + return sqlgraph.CountNodes(ctx, uq.driver, _spec) } func (uq *UserQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: user.Table, - Columns: user.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, - }, - From: uq.sql, - Unique: true, - } - if unique := uq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(user.Table, user.Columns, sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID)) + _spec.From = uq.sql + if unique := uq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if uq.path != nil { + _spec.Unique = true } - if fields := uq.fields; len(fields) > 0 { + if fields := uq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, user.FieldID) for i := range fields { @@ -564,10 +607,10 @@ func (uq *UserQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := uq.limit; limit != nil { + if limit := uq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := uq.offset; offset != nil { + if offset := uq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := uq.order; len(ps) > 0 { @@ -583,7 +626,7 @@ func (uq *UserQuery) querySpec() *sqlgraph.QuerySpec { func (uq *UserQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(uq.driver.Dialect()) t1 := builder.Table(user.Table) - columns := uq.fields + columns := uq.ctx.Fields if len(columns) == 0 { columns = user.Columns } @@ -592,7 +635,7 @@ func (uq *UserQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = uq.sql selector.Select(selector.Columns(columns...)...) } - if uq.unique != nil && *uq.unique { + if uq.ctx.Unique != nil && *uq.ctx.Unique { selector.Distinct() } for _, p := range uq.predicates { @@ -601,498 +644,128 @@ func (uq *UserQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range uq.order { p(selector) } - if offset := uq.offset; offset != nil { + if offset := uq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := uq.limit; limit != nil { + if limit := uq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector } -// UserGroupBy is the group-by builder for User entities. -type UserGroupBy struct { - config - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) -} - -// Aggregate adds the given aggregation functions to the group-by query. -func (ugb *UserGroupBy) Aggregate(fns ...AggregateFunc) *UserGroupBy { - ugb.fns = append(ugb.fns, fns...) - return ugb -} - -// Scan applies the group-by query and scans the result into the given value. -func (ugb *UserGroupBy) Scan(ctx context.Context, v interface{}) error { - query, err := ugb.path(ctx) - if err != nil { - return err - } - ugb.sql = query - return ugb.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (ugb *UserGroupBy) ScanX(ctx context.Context, v interface{}) { - if err := ugb.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from group-by. -// It is only allowed when executing a group-by query with one field. -func (ugb *UserGroupBy) Strings(ctx context.Context) ([]string, error) { - if len(ugb.fields) > 1 { - return nil, errors.New("ent: UserGroupBy.Strings is not achievable when grouping more than 1 field") - } - var v []string - if err := ugb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (ugb *UserGroupBy) StringsX(ctx context.Context) []string { - v, err := ugb.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (ugb *UserGroupBy) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = ugb.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{user.Label} - default: - err = fmt.Errorf("ent: UserGroupBy.Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (ugb *UserGroupBy) StringX(ctx context.Context) string { - v, err := ugb.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from group-by. -// It is only allowed when executing a group-by query with one field. -func (ugb *UserGroupBy) Ints(ctx context.Context) ([]int, error) { - if len(ugb.fields) > 1 { - return nil, errors.New("ent: UserGroupBy.Ints is not achievable when grouping more than 1 field") - } - var v []int - if err := ugb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (ugb *UserGroupBy) IntsX(ctx context.Context) []int { - v, err := ugb.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (ugb *UserGroupBy) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = ugb.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{user.Label} - default: - err = fmt.Errorf("ent: UserGroupBy.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (ugb *UserGroupBy) IntX(ctx context.Context) int { - v, err := ugb.Int(ctx) - if err != nil { - panic(err) +// WithNamedUserToTag tells the query-builder to eager-load the nodes that are connected to the "UserToTag" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (uq *UserQuery) WithNamedUserToTag(name string, opts ...func(*TagQuery)) *UserQuery { + query := (&TagClient{config: uq.config}).Query() + for _, opt := range opts { + opt(query) } - return v -} - -// Float64s returns list of float64s from group-by. -// It is only allowed when executing a group-by query with one field. -func (ugb *UserGroupBy) Float64s(ctx context.Context) ([]float64, error) { - if len(ugb.fields) > 1 { - return nil, errors.New("ent: UserGroupBy.Float64s is not achievable when grouping more than 1 field") - } - var v []float64 - if err := ugb.Scan(ctx, &v); err != nil { - return nil, err + if uq.withNamedUserToTag == nil { + uq.withNamedUserToTag = make(map[string]*TagQuery) } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (ugb *UserGroupBy) Float64sX(ctx context.Context) []float64 { - v, err := ugb.Float64s(ctx) - if err != nil { - panic(err) - } - return v + uq.withNamedUserToTag[name] = query + return uq } -// Float64 returns a single float64 from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (ugb *UserGroupBy) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = ugb.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{user.Label} - default: - err = fmt.Errorf("ent: UserGroupBy.Float64s returned %d results when one was expected", len(v)) +// WithNamedUserToEnvironment tells the query-builder to eager-load the nodes that are connected to the "UserToEnvironment" +// edge with the given name. The optional arguments are used to configure the query builder of the edge. +func (uq *UserQuery) WithNamedUserToEnvironment(name string, opts ...func(*EnvironmentQuery)) *UserQuery { + query := (&EnvironmentClient{config: uq.config}).Query() + for _, opt := range opts { + opt(query) } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (ugb *UserGroupBy) Float64X(ctx context.Context) float64 { - v, err := ugb.Float64(ctx) - if err != nil { - panic(err) + if uq.withNamedUserToEnvironment == nil { + uq.withNamedUserToEnvironment = make(map[string]*EnvironmentQuery) } - return v + uq.withNamedUserToEnvironment[name] = query + return uq } -// Bools returns list of bools from group-by. -// It is only allowed when executing a group-by query with one field. -func (ugb *UserGroupBy) Bools(ctx context.Context) ([]bool, error) { - if len(ugb.fields) > 1 { - return nil, errors.New("ent: UserGroupBy.Bools is not achievable when grouping more than 1 field") - } - var v []bool - if err := ugb.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil +// UserGroupBy is the group-by builder for User entities. +type UserGroupBy struct { + selector + build *UserQuery } -// BoolsX is like Bools, but panics if an error occurs. -func (ugb *UserGroupBy) BoolsX(ctx context.Context) []bool { - v, err := ugb.Bools(ctx) - if err != nil { - panic(err) - } - return v +// Aggregate adds the given aggregation functions to the group-by query. +func (ugb *UserGroupBy) Aggregate(fns ...AggregateFunc) *UserGroupBy { + ugb.fns = append(ugb.fns, fns...) + return ugb } -// Bool returns a single bool from a group-by query. -// It is only allowed when executing a group-by query with one field. -func (ugb *UserGroupBy) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = ugb.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{user.Label} - default: - err = fmt.Errorf("ent: UserGroupBy.Bools returned %d results when one was expected", len(v)) +// Scan applies the selector query and scans the result into the given value. +func (ugb *UserGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ugb.build.ctx, "GroupBy") + if err := ugb.build.prepareQuery(ctx); err != nil { + return err } - return + return scanWithInterceptors[*UserQuery, *UserGroupBy](ctx, ugb.build, ugb, ugb.build.inters, v) } -// BoolX is like Bool, but panics if an error occurs. -func (ugb *UserGroupBy) BoolX(ctx context.Context) bool { - v, err := ugb.Bool(ctx) - if err != nil { - panic(err) +func (ugb *UserGroupBy) sqlScan(ctx context.Context, root *UserQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(ugb.fns)) + for _, fn := range ugb.fns { + aggregation = append(aggregation, fn(selector)) } - return v -} - -func (ugb *UserGroupBy) sqlScan(ctx context.Context, v interface{}) error { - for _, f := range ugb.fields { - if !user.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*ugb.flds)+len(ugb.fns)) + for _, f := range *ugb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := ugb.sqlQuery() + selector.GroupBy(selector.Columns(*ugb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := ugb.driver.Query(ctx, query, args, rows); err != nil { + if err := ugb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (ugb *UserGroupBy) sqlQuery() *sql.Selector { - selector := ugb.sql.Select() - aggregation := make([]string, 0, len(ugb.fns)) - for _, fn := range ugb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(ugb.fields)+len(ugb.fns)) - for _, f := range ugb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(ugb.fields...)...) -} - // UserSelect is the builder for selecting fields of User entities. type UserSelect struct { *UserQuery - // intermediate query (i.e. traversal path). - sql *sql.Selector + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (us *UserSelect) Aggregate(fns ...AggregateFunc) *UserSelect { + us.fns = append(us.fns, fns...) + return us } // Scan applies the selector query and scans the result into the given value. -func (us *UserSelect) Scan(ctx context.Context, v interface{}) error { +func (us *UserSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, us.ctx, "Select") if err := us.prepareQuery(ctx); err != nil { return err } - us.sql = us.UserQuery.sqlQuery(ctx) - return us.sqlScan(ctx, v) -} - -// ScanX is like Scan, but panics if an error occurs. -func (us *UserSelect) ScanX(ctx context.Context, v interface{}) { - if err := us.Scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from a selector. It is only allowed when selecting one field. -func (us *UserSelect) Strings(ctx context.Context) ([]string, error) { - if len(us.fields) > 1 { - return nil, errors.New("ent: UserSelect.Strings is not achievable when selecting more than 1 field") - } - var v []string - if err := us.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (us *UserSelect) StringsX(ctx context.Context) []string { - v, err := us.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a selector. It is only allowed when selecting one field. -func (us *UserSelect) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = us.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{user.Label} - default: - err = fmt.Errorf("ent: UserSelect.Strings returned %d results when one was expected", len(v)) - } - return + return scanWithInterceptors[*UserQuery, *UserSelect](ctx, us.UserQuery, us, us.inters, v) } -// StringX is like String, but panics if an error occurs. -func (us *UserSelect) StringX(ctx context.Context) string { - v, err := us.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from a selector. It is only allowed when selecting one field. -func (us *UserSelect) Ints(ctx context.Context) ([]int, error) { - if len(us.fields) > 1 { - return nil, errors.New("ent: UserSelect.Ints is not achievable when selecting more than 1 field") - } - var v []int - if err := us.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (us *UserSelect) IntsX(ctx context.Context) []int { - v, err := us.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a selector. It is only allowed when selecting one field. -func (us *UserSelect) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = us.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{user.Label} - default: - err = fmt.Errorf("ent: UserSelect.Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (us *UserSelect) IntX(ctx context.Context) int { - v, err := us.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. -func (us *UserSelect) Float64s(ctx context.Context) ([]float64, error) { - if len(us.fields) > 1 { - return nil, errors.New("ent: UserSelect.Float64s is not achievable when selecting more than 1 field") - } - var v []float64 - if err := us.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (us *UserSelect) Float64sX(ctx context.Context) []float64 { - v, err := us.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. -func (us *UserSelect) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = us.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{user.Label} - default: - err = fmt.Errorf("ent: UserSelect.Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (us *UserSelect) Float64X(ctx context.Context) float64 { - v, err := us.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from a selector. It is only allowed when selecting one field. -func (us *UserSelect) Bools(ctx context.Context) ([]bool, error) { - if len(us.fields) > 1 { - return nil, errors.New("ent: UserSelect.Bools is not achievable when selecting more than 1 field") - } - var v []bool - if err := us.Scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (us *UserSelect) BoolsX(ctx context.Context) []bool { - v, err := us.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a selector. It is only allowed when selecting one field. -func (us *UserSelect) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = us.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{user.Label} - default: - err = fmt.Errorf("ent: UserSelect.Bools returned %d results when one was expected", len(v)) +func (us *UserSelect) sqlScan(ctx context.Context, root *UserQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(us.fns)) + for _, fn := range us.fns { + aggregation = append(aggregation, fn(selector)) } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (us *UserSelect) BoolX(ctx context.Context) bool { - v, err := us.Bool(ctx) - if err != nil { - panic(err) + switch n := len(*us.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) } - return v -} - -func (us *UserSelect) sqlScan(ctx context.Context, v interface{}) error { rows := &sql.Rows{} - query, args := us.sql.Query() + query, args := selector.Query() if err := us.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/ent/user_update.go b/ent/user_update.go index 562b6eb9..a1c71bb3 100755 --- a/ent/user_update.go +++ b/ent/user_update.go @@ -1,4 +1,4 @@ -// Code generated by entc, DO NOT EDIT. +// Code generated by ent, DO NOT EDIT. package ent @@ -36,21 +36,53 @@ func (uu *UserUpdate) SetName(s string) *UserUpdate { return uu } +// SetNillableName sets the "name" field if the given value is not nil. +func (uu *UserUpdate) SetNillableName(s *string) *UserUpdate { + if s != nil { + uu.SetName(*s) + } + return uu +} + // SetUUID sets the "uuid" field. func (uu *UserUpdate) SetUUID(s string) *UserUpdate { uu.mutation.SetUUID(s) return uu } +// SetNillableUUID sets the "uuid" field if the given value is not nil. +func (uu *UserUpdate) SetNillableUUID(s *string) *UserUpdate { + if s != nil { + uu.SetUUID(*s) + } + return uu +} + // SetEmail sets the "email" field. func (uu *UserUpdate) SetEmail(s string) *UserUpdate { uu.mutation.SetEmail(s) return uu } -// SetHclID sets the "hcl_id" field. -func (uu *UserUpdate) SetHclID(s string) *UserUpdate { - uu.mutation.SetHclID(s) +// SetNillableEmail sets the "email" field if the given value is not nil. +func (uu *UserUpdate) SetNillableEmail(s *string) *UserUpdate { + if s != nil { + uu.SetEmail(*s) + } + return uu +} + +// SetHCLID sets the "hcl_id" field. +func (uu *UserUpdate) SetHCLID(s string) *UserUpdate { + uu.mutation.SetHCLID(s) + return uu +} + +// SetNillableHCLID sets the "hcl_id" field if the given value is not nil. +func (uu *UserUpdate) SetNillableHCLID(s *string) *UserUpdate { + if s != nil { + uu.SetHCLID(*s) + } return uu } @@ -133,34 +165,7 @@ func (uu *UserUpdate) RemoveUserToEnvironment(e ...*Environment) *UserUpdate { // Save executes the query and returns the number of nodes affected by the update operation. func (uu *UserUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(uu.hooks) == 0 { - affected, err = uu.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*UserMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - uu.mutation = mutation - affected, err = uu.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(uu.hooks) - 1; i >= 0; i-- { - if uu.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = uu.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, uu.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, uu.sqlSave, uu.mutation, uu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -186,16 +191,7 @@ func (uu *UserUpdate) ExecX(ctx context.Context) { } func (uu *UserUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: user.Table, - Columns: user.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(user.Table, user.Columns, sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID)) if ps := uu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -204,32 +200,16 @@ func (uu *UserUpdate) sqlSave(ctx context.Context) (n int, err error) { } } if value, ok := uu.mutation.Name(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: user.FieldName, - }) + _spec.SetField(user.FieldName, field.TypeString, value) } if value, ok := uu.mutation.UUID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: user.FieldUUID, - }) + _spec.SetField(user.FieldUUID, field.TypeString, value) } if value, ok := uu.mutation.Email(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: user.FieldEmail, - }) - } - if value, ok := uu.mutation.HclID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: user.FieldHclID, - }) + _spec.SetField(user.FieldEmail, field.TypeString, value) + } + if value, ok := uu.mutation.HCLID(); ok { + _spec.SetField(user.FieldHCLID, field.TypeString, value) } if uu.mutation.UserToTagCleared() { edge := &sqlgraph.EdgeSpec{ @@ -239,10 +219,7 @@ func (uu *UserUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{user.UserToTagColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: tag.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(tag.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -255,10 +232,7 @@ func (uu *UserUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{user.UserToTagColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: tag.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(tag.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -274,10 +248,7 @@ func (uu *UserUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{user.UserToTagColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: tag.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(tag.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -293,10 +264,7 @@ func (uu *UserUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: user.UserToEnvironmentPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -309,10 +277,7 @@ func (uu *UserUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: user.UserToEnvironmentPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -328,10 +293,7 @@ func (uu *UserUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: user.UserToEnvironmentPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -343,10 +305,11 @@ func (uu *UserUpdate) sqlSave(ctx context.Context) (n int, err error) { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{user.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return 0, err } + uu.mutation.done = true return n, nil } @@ -364,21 +327,53 @@ func (uuo *UserUpdateOne) SetName(s string) *UserUpdateOne { return uuo } +// SetNillableName sets the "name" field if the given value is not nil. +func (uuo *UserUpdateOne) SetNillableName(s *string) *UserUpdateOne { + if s != nil { + uuo.SetName(*s) + } + return uuo +} + // SetUUID sets the "uuid" field. func (uuo *UserUpdateOne) SetUUID(s string) *UserUpdateOne { uuo.mutation.SetUUID(s) return uuo } +// SetNillableUUID sets the "uuid" field if the given value is not nil. +func (uuo *UserUpdateOne) SetNillableUUID(s *string) *UserUpdateOne { + if s != nil { + uuo.SetUUID(*s) + } + return uuo +} + // SetEmail sets the "email" field. func (uuo *UserUpdateOne) SetEmail(s string) *UserUpdateOne { uuo.mutation.SetEmail(s) return uuo } -// SetHclID sets the "hcl_id" field. -func (uuo *UserUpdateOne) SetHclID(s string) *UserUpdateOne { - uuo.mutation.SetHclID(s) +// SetNillableEmail sets the "email" field if the given value is not nil. +func (uuo *UserUpdateOne) SetNillableEmail(s *string) *UserUpdateOne { + if s != nil { + uuo.SetEmail(*s) + } + return uuo +} + +// SetHCLID sets the "hcl_id" field. +func (uuo *UserUpdateOne) SetHCLID(s string) *UserUpdateOne { + uuo.mutation.SetHCLID(s) + return uuo +} + +// SetNillableHCLID sets the "hcl_id" field if the given value is not nil. +func (uuo *UserUpdateOne) SetNillableHCLID(s *string) *UserUpdateOne { + if s != nil { + uuo.SetHCLID(*s) + } return uuo } @@ -459,6 +454,12 @@ func (uuo *UserUpdateOne) RemoveUserToEnvironment(e ...*Environment) *UserUpdate return uuo.RemoveUserToEnvironmentIDs(ids...) } +// Where appends a list predicates to the UserUpdate builder. +func (uuo *UserUpdateOne) Where(ps ...predicate.User) *UserUpdateOne { + uuo.mutation.Where(ps...) + return uuo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (uuo *UserUpdateOne) Select(field string, fields ...string) *UserUpdateOne { @@ -468,34 +469,7 @@ func (uuo *UserUpdateOne) Select(field string, fields ...string) *UserUpdateOne // Save executes the query and returns the updated User entity. func (uuo *UserUpdateOne) Save(ctx context.Context) (*User, error) { - var ( - err error - node *User - ) - if len(uuo.hooks) == 0 { - node, err = uuo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*UserMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - uuo.mutation = mutation - node, err = uuo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(uuo.hooks) - 1; i >= 0; i-- { - if uuo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = uuo.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, uuo.mutation); err != nil { - return nil, err - } - } - return node, err + return withHooks(ctx, uuo.sqlSave, uuo.mutation, uuo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -521,16 +495,7 @@ func (uuo *UserUpdateOne) ExecX(ctx context.Context) { } func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: user.Table, - Columns: user.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(user.Table, user.Columns, sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID)) id, ok := uuo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "User.id" for update`)} @@ -556,32 +521,16 @@ func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) } } if value, ok := uuo.mutation.Name(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: user.FieldName, - }) + _spec.SetField(user.FieldName, field.TypeString, value) } if value, ok := uuo.mutation.UUID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: user.FieldUUID, - }) + _spec.SetField(user.FieldUUID, field.TypeString, value) } if value, ok := uuo.mutation.Email(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: user.FieldEmail, - }) - } - if value, ok := uuo.mutation.HclID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: user.FieldHclID, - }) + _spec.SetField(user.FieldEmail, field.TypeString, value) + } + if value, ok := uuo.mutation.HCLID(); ok { + _spec.SetField(user.FieldHCLID, field.TypeString, value) } if uuo.mutation.UserToTagCleared() { edge := &sqlgraph.EdgeSpec{ @@ -591,10 +540,7 @@ func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) Columns: []string{user.UserToTagColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: tag.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(tag.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -607,10 +553,7 @@ func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) Columns: []string{user.UserToTagColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: tag.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(tag.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -626,10 +569,7 @@ func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) Columns: []string{user.UserToTagColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: tag.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(tag.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -645,10 +585,7 @@ func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) Columns: user.UserToEnvironmentPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -661,10 +598,7 @@ func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) Columns: user.UserToEnvironmentPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -680,10 +614,7 @@ func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) Columns: user.UserToEnvironmentPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: environment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(environment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -698,9 +629,10 @@ func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{user.Label} } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{err.Error(), err} + err = &ConstraintError{msg: err.Error(), wrap: err} } return nil, err } + uuo.mutation.done = true return _node, nil } diff --git a/go.mod b/go.mod index 8885c9f8..f7f2369b 100755 --- a/go.mod +++ b/go.mod @@ -1,145 +1,162 @@ module github.com/gen0cide/laforge -go 1.18 +go 1.21 require ( - entgo.io/contrib v0.2.0 - entgo.io/ent v0.10.1 - github.com/99designs/gqlgen v0.17.10 - github.com/apenella/go-ansible v1.1.5 - github.com/aws/aws-sdk-go-v2 v1.16.2 - github.com/aws/aws-sdk-go-v2/config v1.15.3 - github.com/aws/aws-sdk-go-v2/service/ec2 v1.34.0 + entgo.io/contrib v0.4.5 + entgo.io/ent v0.12.5 + github.com/99designs/gqlgen v0.17.41 + github.com/apenella/go-ansible v1.2.2 + github.com/aws/aws-sdk-go-v2 v1.24.0 + github.com/aws/aws-sdk-go-v2/config v1.26.2 + github.com/aws/aws-sdk-go-v2/service/ec2 v1.142.0 github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 github.com/briandowns/formatifier v0.0.0-20150226010542-6d068a6ce63a github.com/davecgh/go-spew v1.1.1 github.com/dgrijalva/jwt-go v3.2.0+incompatible - github.com/fatih/color v1.13.0 + github.com/fatih/color v1.16.0 github.com/ghodss/yaml v1.0.0 - github.com/gin-contrib/cors v1.3.1 - github.com/gin-gonic/gin v1.7.1 - github.com/go-git/go-git/v5 v5.4.2 - github.com/go-redis/redis/v8 v8.11.0 - github.com/google/uuid v1.3.0 - github.com/gophercloud/gophercloud v0.24.0 - github.com/gorilla/sessions v1.2.1 - github.com/gorilla/websocket v1.5.0 + github.com/gin-contrib/cors v1.5.0 + github.com/gin-gonic/gin v1.9.1 + github.com/go-git/go-git/v5 v5.11.0 + github.com/go-redis/redis/v8 v8.11.5 + github.com/google/uuid v1.5.0 + github.com/gophercloud/gophercloud v1.8.0 + github.com/gorilla/sessions v1.2.2 + github.com/gorilla/websocket v1.5.1 github.com/hashicorp/go-multierror v1.1.1 - github.com/hashicorp/hcl/v2 v2.11.1 - github.com/hedwigz/entviz v0.0.0-20220529060928-44574cfd7a21 - github.com/iamacarpet/go-win64api v0.0.0-20210311141720-fe38760bed28 - github.com/iancoleman/strcase v0.2.0 - github.com/jackc/pgx/v4 v4.11.0 - github.com/kardianos/service v1.2.0 - github.com/markbates/goth v1.67.1 - github.com/mattn/go-sqlite3 v1.14.10 - github.com/mattn/go-zglob v0.0.3 + github.com/hashicorp/hcl/v2 v2.19.1 + github.com/hedwigz/entviz v0.0.0-20221011080911-9d47f6f1d818 + github.com/iamacarpet/go-win64api v0.0.0-20230324134531-ef6dbdd6db97 + github.com/iancoleman/strcase v0.3.0 + github.com/jackc/pgx/v4 v4.18.1 + github.com/kardianos/service v1.2.2 + github.com/markbates/goth v1.78.0 + github.com/mattn/go-sqlite3 v1.14.19 + github.com/mattn/go-zglob v0.0.4 github.com/mholt/archiver v3.1.1+incompatible github.com/mholt/archiver/v3 v3.5.1 - github.com/shirou/gopsutil v3.21.4+incompatible - github.com/sirupsen/logrus v1.8.1 - github.com/vektah/gqlparser/v2 v2.4.5 - github.com/vmihailenco/msgpack/v5 v5.0.0-beta.9 - github.com/vmware/govmomi v0.25.0 - github.com/zclconf/go-cty v1.8.3 - golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29 - golang.org/x/net v0.0.0-20220401154927-543a649e0bdd - golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - google.golang.org/grpc v1.42.0 - google.golang.org/protobuf v1.28.0 + github.com/shirou/gopsutil v3.21.11+incompatible + github.com/sirupsen/logrus v1.9.3 + github.com/vektah/gqlparser/v2 v2.5.10 + github.com/vmware/govmomi v0.34.1 + github.com/zclconf/go-cty v1.14.1 + golang.org/x/crypto v0.17.0 + golang.org/x/net v0.19.0 + golang.org/x/sync v0.5.0 + google.golang.org/grpc v1.60.1 + google.golang.org/protobuf v1.31.0 gopkg.in/guregu/null.v4 v4.0.0 ) require ( - ariga.io/atlas v0.3.7-0.20220303204946-787354f533c3 // indirect - github.com/Microsoft/go-winio v0.4.16 // indirect - github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 // indirect - github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46 // indirect - github.com/acomagu/bufpipe v1.0.3 // indirect + ariga.io/atlas v0.16.0 // indirect + dario.cat/mergo v1.0.0 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c // indirect github.com/agext/levenshtein v1.2.3 // indirect github.com/agnivade/levenshtein v1.1.1 // indirect - github.com/andybalholm/brotli v1.0.4 // indirect - github.com/apenella/go-common-utils/data v0.0.0-20210528133155-34ba915e28c8 // indirect - github.com/apenella/go-common-utils/error v0.0.0-20210528133155-34ba915e28c8 // indirect - github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.11.2 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.3 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.9 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.3 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.3.10 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.3 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.11.3 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.16.3 // indirect - github.com/aws/smithy-go v1.11.2 // indirect - github.com/cespare/xxhash/v2 v2.1.1 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect + github.com/andybalholm/brotli v1.0.6 // indirect + github.com/apenella/go-common-utils/data v0.0.0-20221227202648-5452d804e940 // indirect + github.com/apenella/go-common-utils/error v0.0.0-20221227202648-5452d804e940 // indirect + github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.16.13 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.10 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.9 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.9 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.9 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.18.5 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.5 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.26.6 // indirect + github.com/aws/smithy-go v1.19.0 // indirect + github.com/bytedance/sonic v1.10.2 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect + github.com/chenzhuoyu/iasm v0.9.1 // indirect + github.com/cloudflare/circl v1.3.6 // indirect + github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect - github.com/emirpasic/gods v1.12.0 // indirect + github.com/emirpasic/gods v1.18.1 // indirect github.com/frankban/quicktest v1.13.0 // indirect + github.com/gabriel-vasile/mimetype v1.4.3 // indirect github.com/gin-contrib/sse v0.1.0 // indirect - github.com/go-git/gcfg v1.5.0 // indirect - github.com/go-git/go-billy/v5 v5.3.1 // indirect - github.com/go-ole/go-ole v1.2.5 // indirect + github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect + github.com/go-git/go-billy/v5 v5.5.0 // indirect + github.com/go-ole/go-ole v1.3.0 // indirect github.com/go-openapi/inflect v0.19.0 // indirect - github.com/go-playground/locales v0.13.0 // indirect - github.com/go-playground/universal-translator v0.17.0 // indirect - github.com/go-playground/validator/v10 v10.6.1 // indirect - github.com/gofrs/uuid v4.0.0+incompatible // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/golang/snappy v0.0.2 // indirect - github.com/google/go-cmp v0.5.7 // indirect - github.com/gorilla/mux v1.7.3 // indirect - github.com/gorilla/securecookie v1.1.1 // indirect - github.com/graphql-go/graphql v0.7.10-0.20210411022516-8a92e977c10b // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/go-playground/validator/v10 v10.16.0 // indirect + github.com/goccy/go-json v0.10.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/cabbie v1.0.5 // indirect + github.com/google/glazier v0.0.0-20231213170836-8063f886cbd6 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/gorilla/mux v1.8.1 // indirect + github.com/gorilla/securecookie v1.1.2 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/hashicorp/golang-lru v0.5.4 // indirect - github.com/imdario/mergo v0.3.12 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/jackc/chunkreader/v2 v2.0.1 // indirect - github.com/jackc/pgconn v1.8.1 // indirect + github.com/jackc/pgconn v1.14.1 // indirect github.com/jackc/pgio v1.0.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect - github.com/jackc/pgproto3/v2 v2.0.7 // indirect - github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect - github.com/jackc/pgtype v1.7.0 // indirect + github.com/jackc/pgproto3/v2 v2.3.2 // indirect + github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 // indirect + github.com/jackc/pgtype v1.14.0 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 // indirect - github.com/klauspost/compress v1.11.4 // indirect - github.com/klauspost/pgzip v1.2.5 // indirect - github.com/kylelemons/godebug v1.1.0 // indirect - github.com/leodido/go-urn v1.2.1 // indirect - github.com/matryer/moq v0.2.7 // indirect - github.com/mattn/go-colorable v0.1.12 // indirect - github.com/mattn/go-isatty v0.0.14 // indirect - github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/kevinburke/ssh_config v1.2.0 // indirect + github.com/klauspost/compress v1.17.4 // indirect + github.com/klauspost/cpuid/v2 v2.2.6 // indirect + github.com/klauspost/pgzip v1.2.6 // indirect + github.com/leodido/go-urn v1.2.4 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect - github.com/mitchellh/mapstructure v1.4.3 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/nwaples/rardecode v1.1.0 // indirect - github.com/pierrec/lz4 v2.6.0+incompatible // indirect - github.com/pierrec/lz4/v4 v4.1.2 // indirect - github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/sergi/go-diff v1.1.0 // indirect - github.com/tklauser/go-sysconf v0.3.5 // indirect - github.com/tklauser/numcpus v0.2.2 // indirect - github.com/ugorji/go/codec v1.2.5 // indirect - github.com/ulikunitz/xz v0.5.10 // indirect - github.com/urfave/cli/v2 v2.8.1 // indirect - github.com/vmihailenco/tagparser v0.1.2 // indirect - github.com/xanzy/ssh-agent v0.3.0 // indirect + github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d // indirect + github.com/nwaples/rardecode v1.1.3 // indirect + github.com/pelletier/go-toml/v2 v2.1.1 // indirect + github.com/pierrec/lz4 v2.6.1+incompatible // indirect + github.com/pierrec/lz4/v4 v4.1.19 // indirect + github.com/pjbgf/sha1cd v0.3.0 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/scjalliance/comshim v0.0.0-20231116235529-bbacf79a4691 // indirect + github.com/sergi/go-diff v1.3.1 // indirect + github.com/skeema/knownhosts v1.2.1 // indirect + github.com/sosodev/duration v1.2.0 // indirect + github.com/stretchr/objx v0.5.1 // indirect + github.com/stretchr/testify v1.8.4 // indirect + github.com/tklauser/go-sysconf v0.3.13 // indirect + github.com/tklauser/numcpus v0.7.0 // indirect + github.com/twitchyliquid64/golang-asm v0.15.1 // indirect + github.com/ugorji/go/codec v1.2.12 // indirect + github.com/ulikunitz/xz v0.5.11 // indirect + github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect + github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect + github.com/xanzy/ssh-agent v0.3.3 // indirect github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect - github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect - golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect - golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 // indirect - golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b // indirect - golang.org/x/text v0.3.7 // indirect - golang.org/x/tools v0.1.10 // indirect - golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect - google.golang.org/appengine v1.6.6 // indirect - google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb // indirect + github.com/yusufpapurcu/wmi v1.2.3 // indirect + golang.org/x/arch v0.6.0 // indirect + golang.org/x/exp v0.0.0-20231219180239-dc181d75b848 // indirect + golang.org/x/mod v0.14.0 // indirect + golang.org/x/oauth2 v0.15.0 // indirect + golang.org/x/sys v0.15.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/tools v0.16.1 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0 // indirect + gopkg.in/toast.v1 v1.0.0-20180812000517-0a84660828b2 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 770ad7b1..ec69c19d 100755 --- a/go.sum +++ b/go.sum @@ -1,5 +1,7 @@ -ariga.io/atlas v0.3.7-0.20220303204946-787354f533c3 h1:fjG4oFCQEfGrRi0QoxWcH2OO28CE6VYa6DkIr3yDySU= ariga.io/atlas v0.3.7-0.20220303204946-787354f533c3/go.mod h1:yWGf4VPiD4SW83+kAqzD624txN9VKoJC+bpVXr2pKJA= +ariga.io/atlas v0.16.0 h1:3MKpTTaI1XAe3b2EnUBiS6Z/73QazcrwynLOYsKNN5E= +ariga.io/atlas v0.16.0/go.mod h1:uj3pm+hUTVN/X5yfdBexHlZv+1Xu5u5ZbZx7+CDavNU= +bitbucket.org/creachadair/stringset v0.0.9/go.mod h1:t+4WcQ4+PXTa8aQdNKe40ZP6iwesoMFWAxPGd3UGjyY= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -16,6 +18,20 @@ cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZ cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.67.0/go.mod h1:YNan/mUhNZFrYUor0vqrsQ0Ffl7Xtm/ACOy/vsTS858= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -24,7 +40,7 @@ cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4g cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -34,48 +50,37 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -entgo.io/contrib v0.2.0 h1:GWYFtP2heR9wuCE9NiXVDlIH/6mKdxJKWJfTth6dcE4= -entgo.io/contrib v0.2.0/go.mod h1:BMOqlqo47WBJ4NReoShECVtbHNzNeVvgVr6xV/1Cogo= -entgo.io/ent v0.9.1/go.mod h1:6NUeTfUN5mp5YN+5tgoH1SlakSvYPTBOYotSOvaI4ak= -entgo.io/ent v0.9.2-0.20210821141344-368a8f7a2e9a/go.mod h1:lYe2Pg6DmqA5f7kNzTdRm2q3HDAvMuHR/tPm1mIPy2Q= -entgo.io/ent v0.10.1 h1:dM5h4Zk6yHGIgw4dCqVzGw3nWgpGYJiV4/kyHEF6PFo= +entgo.io/contrib v0.4.5 h1:BFaOHwFLE8WZjVJadP0XHCIaxgcC1BAtUvAyw7M/GHk= +entgo.io/contrib v0.4.5/go.mod h1:wpZyq2DJgthugFvDBlaqMXj9mV4/9ebyGEn7xlTVQqE= entgo.io/ent v0.10.1/go.mod h1:YPgxeLnoQ/YdpVORRtqjBF+wCy9NX9IR7veTv3Bffus= -github.com/99designs/gqlgen v0.14.0/go.mod h1:S7z4boV+Nx4VvzMUpVrY/YuHjFX4n7rDyuTqvAkuoRE= -github.com/99designs/gqlgen v0.17.10 h1:+JtGPZ6jqL0IcmLopq4iaEbh5Ggye+NiutU57w82xvk= -github.com/99designs/gqlgen v0.17.10/go.mod h1:tjgUrZGpynt+w38zmgTn5QGgd3EUhkHa4VRcX6/AyGo= -github.com/AlekSi/pointer v1.1.0/go.mod h1:y7BvfRI3wXPWKXEBhU71nbnIEEZX0QTSB2Bj48UJIZE= +entgo.io/ent v0.12.5 h1:KREM5E4CSoej4zeGa88Ou/gfturAnpUv0mzAjch1sj4= +entgo.io/ent v0.12.5/go.mod h1:Y3JVAjtlIk8xVZYSn3t3mf8xlZIn5SAOXZQxD6kKI+Q= +github.com/99designs/gqlgen v0.17.41 h1:C1/zYMhGVP5TWNCNpmZ9Mb6CqT1Vr5SHEWoTOEJ3v3I= +github.com/99designs/gqlgen v0.17.41/go.mod h1:GQ6SyMhwFbgHR0a8r2Wn8fYgEwPxxmndLFPhU63+cJE= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= -github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= -github.com/Microsoft/go-winio v0.4.16 h1:FtSW/jqD+l4ba5iPBj9CODVtgfYAD8w2wS923g/cFDk= -github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 h1:YoJbenK9C67SkzkDfmQuVln04ygHj3vjZfd9FL+GmQQ= -github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46 h1:5sXbqlSomvdjlRbWyNqkPsJ3Fg+tQZCbgeX1VGljbQY= -github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= -github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/a8m/tree v0.0.0-20210115125333-10a5fd5b637d/go.mod h1:FSdwKX97koS5efgm8WevNf7XS3PqtyFkKDDXrz778cg= -github.com/acomagu/bufpipe v1.0.3 h1:fxAGrHZTgQ9w5QqVItgzwj235/uYZYgbXitB+dLupOk= -github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= -github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c h1:kMFnB0vCcX7IL/m9Y5LO+KQYv+t1CQOiFe6+SV2J7bE= +github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= +github.com/PuerkitoBio/goquery v1.8.1 h1:uQxhNlArOIdbrH1tr0UXwdVFgDcZDrZVdcpygAcwmWM= +github.com/PuerkitoBio/goquery v1.8.1/go.mod h1:Q8ICL1kNUJ2sXGoAhPGUdYDJvgQgHzJsnnd3H7Ho5jQ= +github.com/StackExchange/wmi v1.2.0/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= -github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= -github.com/agnivade/levenshtein v1.1.0/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo= github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8= github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo= -github.com/alecthomas/kong v0.2.11/go.mod h1:kQOmtJgV+Lb4aj+I2LEn40cbtawdWJ9Y8QLq+lElKxE= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -83,211 +88,228 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/andybalholm/brotli v1.0.1/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= -github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY= -github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/andybalholm/brotli v1.0.6 h1:Yf9fFpf49Zrxb9NlQaluyE92/+X7UVHlhMNJN2sxfOI= +github.com/andybalholm/brotli v1.0.6/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= +github.com/andybalholm/cascadia v1.3.1 h1:nhxRkql1kdYCc8Snf7D5/D3spOX+dBgjA6u8x004T2c= +github.com/andybalholm/cascadia v1.3.1/go.mod h1:R4bJ1UQfqADjvDa4P6HZHLh/3OxWWEqc0Sk8XGwHqvA= +github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= +github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apenella/go-ansible v1.1.5 h1:kiAS3I9rgq1D6nBxuE4db/bhBNZO9Qy5bIsCHacOdJU= -github.com/apenella/go-ansible v1.1.5/go.mod h1:RfpzSuv8Cbb2HvcdjPhzXtxUGlsE3cD+kTb+IeKXYfo= -github.com/apenella/go-common-utils/data v0.0.0-20210528133155-34ba915e28c8 h1:bjcIpzMcDycgqE1C8rktB04QEOJD3+qKLE5vnBeJlZo= -github.com/apenella/go-common-utils/data v0.0.0-20210528133155-34ba915e28c8/go.mod h1:pOb2o2/kk9IwfdAZ36n58dYAc5k8nzBJkwacgLDwpoM= -github.com/apenella/go-common-utils/error v0.0.0-20210528133155-34ba915e28c8 h1:2u17yc+aQJwDHRqnmnJd3arxcGcatJ/0eCFJtq45suc= -github.com/apenella/go-common-utils/error v0.0.0-20210528133155-34ba915e28c8/go.mod h1:Hj3S/BcSHKfv9VDMcrY7lsm9hGnb7cd70alSkl/Sv+4= +github.com/apenella/go-ansible v1.2.2 h1:tfNTLbYR9DUDqiFdNjG6AcExd3+WX2EQafZ9cbUiO8k= +github.com/apenella/go-ansible v1.2.2/go.mod h1:qiLWolQqLkCoqFlPvdsKhxk7O0qjIBCPNhUKpH/vjaU= +github.com/apenella/go-common-utils/data v0.0.0-20221227202648-5452d804e940 h1:heIU8S8TKSbqebsLywfM4n8pvQtqjDZHU6gVPbzrQHQ= +github.com/apenella/go-common-utils/data v0.0.0-20221227202648-5452d804e940/go.mod h1:cLVL6GjUiKG/WyBzX+KD6h/XRV/HnNZIZbMNNiBgQ9o= +github.com/apenella/go-common-utils/error v0.0.0-20221227202648-5452d804e940 h1:M6LTqQBjGqTf9t0O2i0GunjhlsX4REK8aSS44sGOEv4= +github.com/apenella/go-common-utils/error v0.0.0-20221227202648-5452d804e940/go.mod h1:+3dyIlHX350xJIUIffwMLswZXU+N2FwDE05VuKqxYdw= github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= -github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= +github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= +github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q= github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= -github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= -github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= -github.com/aws/aws-sdk-go-v2 v1.16.2 h1:fqlCk6Iy3bnCumtrLz9r3mJ/2gUT0pJ0wLFVIdWh+JA= -github.com/aws/aws-sdk-go-v2 v1.16.2/go.mod h1:ytwTPBG6fXTZLxxeeCCWj2/EMYp/xDUgX+OET6TLNNU= -github.com/aws/aws-sdk-go-v2/config v1.15.3 h1:5AlQD0jhVXlGzwo+VORKiUuogkG7pQcLJNzIzK7eodw= -github.com/aws/aws-sdk-go-v2/config v1.15.3/go.mod h1:9YL3v07Xc/ohTsxFXzan9ZpFpdTOFl4X65BAKYaz8jg= -github.com/aws/aws-sdk-go-v2/credentials v1.11.2 h1:RQQ5fzclAKJyY5TvF+fkjJEwzK4hnxQCLOu5JXzDmQo= -github.com/aws/aws-sdk-go-v2/credentials v1.11.2/go.mod h1:j8YsY9TXTm31k4eFhspiQicfXPLZ0gYXA50i4gxPE8g= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.3 h1:LWPg5zjHV9oz/myQr4wMs0gi4CjnDN/ILmyZUFYXZsU= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.3/go.mod h1:uk1vhHHERfSVCUnqSqz8O48LBYDSC+k6brng09jcMOk= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.9 h1:onz/VaaxZ7Z4V+WIN9Txly9XLTmoOh1oJ8XcAC3pako= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.9/go.mod h1:AnVH5pvai0pAF4lXRq0bmhbes1u9R8wTE+g+183bZNM= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.3 h1:9stUQR/u2KXU6HkFJYlqnZEjBnbgrVbG6I5HN09xZh0= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.3/go.mod h1:ssOhaLpRlh88H3UmEcsBoVKq309quMvm3Ds8e9d4eJM= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.10 h1:by9P+oy3P/CwggN4ClnW2D4oL91QV7pBzBICi1chZvQ= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.10/go.mod h1:8DcYQcz0+ZJaSxANlHIsbbi6S+zMwjwdDqwW3r9AzaE= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.34.0 h1:dfWleW7/a3+TR6qJynYZsaovCEStQOep5x+BxkiBDhc= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.34.0/go.mod h1:37MWOQMGyj8lcranOwo716OHvJgeFJUOaWu6vk1pWNE= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.3 h1:Gh1Gpyh01Yvn7ilO/b/hr01WgNpaszfbKMUgqM186xQ= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.3/go.mod h1:wlY6SVjuwvh3TVRpTqdy4I1JpBFLX4UGeKZdWntaocw= -github.com/aws/aws-sdk-go-v2/service/sso v1.11.3 h1:frW4ikGcxfAEDfmQqWgMLp+F1n4nRo9sF39OcIb5BkQ= -github.com/aws/aws-sdk-go-v2/service/sso v1.11.3/go.mod h1:7UQ/e69kU7LDPtY40OyoHYgRmgfGM4mgsLYtcObdveU= -github.com/aws/aws-sdk-go-v2/service/sts v1.16.3 h1:cJGRyzCSVwZC7zZZ1xbx9m32UnrKydRYhOvcD1NYP9Q= -github.com/aws/aws-sdk-go-v2/service/sts v1.16.3/go.mod h1:bfBj0iVmsUyUg4weDB4NxktD9rDGeKSVWnjTnwbx9b8= -github.com/aws/smithy-go v1.11.2 h1:eG/N+CcUMAvsdffgMvjMKwfyDzIkjM6pfxMJ8Mzc6mE= -github.com/aws/smithy-go v1.11.2/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM= +github.com/aws/aws-sdk-go-v2 v1.24.0 h1:890+mqQ+hTpNuw0gGP6/4akolQkSToDJgHfQE7AwGuk= +github.com/aws/aws-sdk-go-v2 v1.24.0/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4= +github.com/aws/aws-sdk-go-v2/config v1.26.2 h1:+RWLEIWQIGgrz2pBPAUoGgNGs1TOyF4Hml7hCnYj2jc= +github.com/aws/aws-sdk-go-v2/config v1.26.2/go.mod h1:l6xqvUxt0Oj7PI/SUXYLNyZ9T/yBPn3YTQcJLLOdtR8= +github.com/aws/aws-sdk-go-v2/credentials v1.16.13 h1:WLABQ4Cp4vXtXfOWOS3MEZKr6AAYUpMczLhgKtAjQ/8= +github.com/aws/aws-sdk-go-v2/credentials v1.16.13/go.mod h1:Qg6x82FXwW0sJHzYruxGiuApNo31UEtJvXVSZAXeWiw= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.10 h1:w98BT5w+ao1/r5sUuiH6JkVzjowOKeOJRHERyy1vh58= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.10/go.mod h1:K2WGI7vUvkIv1HoNbfBA1bvIZ+9kL3YVmWxeKuLQsiw= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.9 h1:v+HbZaCGmOwnTTVS86Fleq0vPzOd7tnJGbFhP0stNLs= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.9/go.mod h1:Xjqy+Nyj7VDLBtCMkQYOw1QYfAEZCVLrfI0ezve8wd4= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.9 h1:N94sVhRACtXyVcjXxrwK1SKFIJrA9pOJ5yu2eSHnmls= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.9/go.mod h1:hqamLz7g1/4EJP+GH5NBhcUMLjW+gKLQabgyz6/7WAU= +github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 h1:GrSw8s0Gs/5zZ0SX+gX4zQjRnRsMJDJ2sLur1gRBhEM= +github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.142.0 h1:VrFC1uEZjX4ghkm/et8ATVGb1mT75Iv8aPKPjUE+F8A= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.142.0/go.mod h1:qjhtI9zjpUHRc6khtrIM9fb48+ii6+UikL3/b+MKYn0= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.9 h1:Nf2sHxjMJR8CSImIVCONRi4g0Su3J+TSTbS7G0pUeMU= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.9/go.mod h1:idky4TER38YIjr2cADF1/ugFMKvZV7p//pVeV5LZbF0= +github.com/aws/aws-sdk-go-v2/service/sso v1.18.5 h1:ldSFWz9tEHAwHNmjx2Cvy1MjP5/L9kNoR0skc6wyOOM= +github.com/aws/aws-sdk-go-v2/service/sso v1.18.5/go.mod h1:CaFfXLYL376jgbP7VKC96uFcU8Rlavak0UlAwk1Dlhc= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.5 h1:2k9KmFawS63euAkY4/ixVNsYYwrwnd5fIvgEKkfZFNM= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.5/go.mod h1:W+nd4wWDVkSUIox9bacmkBP5NMFQeTJ/xqNabpzSR38= +github.com/aws/aws-sdk-go-v2/service/sts v1.26.6 h1:HJeiuZ2fldpd0WqngyMR6KW7ofkXNLyOaHwEIGm39Cs= +github.com/aws/aws-sdk-go-v2/service/sts v1.26.6/go.mod h1:XX5gh4CB7wAs4KhcF46G6C8a2i7eupU19dcAAE+EydU= +github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM= +github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/bmatcuk/doublestar/v4 v4.0.2/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 h1:GKTyiRCL6zVf5wWaqKnf+7Qs6GbEPfd4iMOitWzXJx8= github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8/go.mod h1:spo1JLcs67NmW1aVLEgtA8Yy1elc+X8y5SRW1sFW4Og= github.com/briandowns/formatifier v0.0.0-20150226010542-6d068a6ce63a h1:35aRLEEdnaAaK4Ksy23WkbXXk0jyXamuIdmLM2Q9kmA= github.com/briandowns/formatifier v0.0.0-20150226010542-6d068a6ce63a/go.mod h1:ZtAFlT/PBrBDr5bP6SFBxz/U9OE1ebPYRLW6L6jgkp8= -github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= -github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= +github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM= +github.com/bytedance/sonic v1.10.0-rc/go.mod h1:ElCzW+ufi8qKqNW0FY314xriJhyJhuoJ3gFZdAHF7NM= +github.com/bytedance/sonic v1.10.2 h1:GQebETVBxYB7JGWJtLBi07OVzWwt+8dWA00gEVW2ZFE= +github.com/bytedance/sonic v1.10.2/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4= +github.com/capnspacehook/taskmaster v0.0.0-20210519235353-1629df7c85e9/go.mod h1:257CYs3Wd/CTlLQ3c72jKv+fFE2MV3WPNnV5jiroYUU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= +github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk= +github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d h1:77cEq6EriyTZ0g/qfRdp61a3Uu/AWrgIq2s0ClJV1g0= +github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d/go.mod h1:8EPpVsBuRksnlj1mLy4AWzRNQYxauNi62uWcE3to6eA= +github.com/chenzhuoyu/iasm v0.9.0/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog= +github.com/chenzhuoyu/iasm v0.9.1 h1:tUHQJXo3NhBqw6s33wkGn9SP3bvrWLdlVIJ3hQBL7P0= +github.com/chenzhuoyu/iasm v0.9.1/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= +github.com/cloudflare/circl v1.3.6 h1:/xbKIqSHbZXHwkhbrhrt2YOHIwYJlXH94E3tI/gDlUg= +github.com/cloudflare/circl v1.3.6/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.1 h1:r/myEWzV9lfsM1tFLgDyu0atFtJ1fXn261LKYj/3DxU= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creachadair/staticfile v0.1.3/go.mod h1:a3qySzCIXEprDGxk6tSxSI+dBBdLzqeBOMhZ+o2d3pM= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= +github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-xdr v0.0.0-20161123171359-e6a2ba005892/go.mod h1:CTDl0pzVzE5DEzZhPfvhY/9sPFMQIxaJ9VAMs9AagrE= +github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.0-20210816181553-5444fa50b93d/go.mod h1:tmAIfUFEirG/Y8jhZ9M+h36obRZAk/1fcSpXwAVlfqE= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48 h1:fRzb/w+pyskVMQ+UbP35JkH8yB7MYb4q/qhBarqZE6g= github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 h1:iFaUwBSo5Svw6L7HYpRu/0lE3e0BaElwnNO1qkNQxBY= github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5/go.mod h1:qssHWj60/X5sZFNxpG4HBPDHVqxNm4DfnCKgrbZOT+s= github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= -github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= -github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU= +github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= +github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= +github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= -github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk= github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= +github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= +github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= +github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gin-contrib/cors v1.3.1 h1:doAsuITavI4IOcd0Y19U4B+O0dNWihRyX//nn4sEmgA= -github.com/gin-contrib/cors v1.3.1/go.mod h1:jjEJ4268OPZUcU7k9Pm653S7lXUGcqMADzFA61xsmDk= +github.com/gin-contrib/cors v1.5.0 h1:DgGKV7DDoOn36DFkNtbHrjoRiT5ExCe+PC9/xp7aKvk= +github.com/gin-contrib/cors v1.5.0/go.mod h1:TvU7MAZ3EwrPLI2ztzTt3tqgvBCq+wn8WpZmfADjupI= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= -github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do= -github.com/gin-gonic/gin v1.7.1 h1:qC89GU3p8TvKWMAVhEpmpB2CIb1hnqt2UdKZaP93mS8= -github.com/gin-gonic/gin v1.7.1/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY= -github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0= -github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= -github.com/go-bindata/go-bindata v1.0.1-0.20190711162640-ee3c2418e368/go.mod h1:7xCgX1lzlrXPHkfvn3EhumqHkmSlzt8at9q7v0ax19c= -github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= -github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= -github.com/go-git/go-billy/v5 v5.2.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-billy/v5 v5.3.1 h1:CPiOUAzKtMRvolEKw+bG1PLRpT7D3LIs3/3ey4Aiu34= -github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-git-fixtures/v4 v4.2.1 h1:n9gGL1Ct/yIw+nfsfr8s4+sbhT+Ncu2SubfXjIWgci8= -github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6vMiRR/nnVTBtavnB0= -github.com/go-git/go-git/v5 v5.4.2 h1:BXyZu9t0VkbiHtqrsvdq39UDhGJTl1h55VW6CSC4aY4= -github.com/go-git/go-git/v5 v5.4.2/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc= +github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg= +github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU= +github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY= +github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= +github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= +github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= +github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= +github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= +github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4= +github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= -github.com/go-ole/go-ole v1.2.5 h1:t4MGB5xEDZvXI+0rMjjsfBsD7yAgp/s9ZDkL1JndXwY= github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= +github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= github.com/go-openapi/inflect v0.19.0 h1:9jCH9scKIbHeV9m12SmPilScz6krDxKRasNNSNPXu/4= github.com/go-openapi/inflect v0.19.0/go.mod h1:lHpZVlpIQqLyKwJ4N+YSc9hchQy/i12fJykb83CRBH4= -github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= -github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= -github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= -github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= -github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= -github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= -github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= -github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= -github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= -github.com/go-playground/validator/v10 v10.6.1 h1:W6TRDXt4WcWp4c4nf/G+6BkGdhiIo0k417gfr+V6u4I= -github.com/go-playground/validator/v10 v10.6.1/go.mod h1:xm76BBt941f7yWdGnI2DVPFFg1UK3YY04qifoXU3lOk= -github.com/go-redis/redis/v8 v8.11.0 h1:O1Td0mQ8UFChQ3N9zFQqo6kTU2cJ+/it88gDB+zg0wo= -github.com/go-redis/redis/v8 v8.11.0/go.mod h1:DLomh7y2e3ggQXQLd1YgmvIfecPJoFl7WU5SOQ/r06M= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.5.1-0.20200311113236-681ffa848bae/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.16.0 h1:x+plE831WK4vaKHO/jpgUGsvLKIqRRkz6M78GuJAfGE= +github.com/go-playground/validator/v10 v10.16.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= +github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= +github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= +github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/goccy/go-json v0.9.6/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= +github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/godbus/dbus v4.1.0+incompatible/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/glog v0.0.0-20210429001901-424d2337a529/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -295,6 +317,8 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -311,14 +335,24 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.2 h1:aeE13tS0IiQgFjYdoL8qN3K1N2bXXtI6Vi51/y7BpMw= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/addlicense v1.0.0/go.mod h1:Sm/DHu7Jk+T5miFHHehdIjbi4M5+dJDRS3Cq0rncIxA= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/aukera v0.0.0-20201117230544-d145c8357fea/go.mod h1:oXqTZORBzdwQ6L32YjJmaPajqIV/hoGEouwpFMf4cJE= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/cabbie v1.0.2/go.mod h1:6MmHaUrgfabehCHAIaxdrbmvHSxUVXj3Abs08FMABSo= +github.com/google/cabbie v1.0.5 h1:j+JWBiMpzJCTkVLKrzsNBQLkRff55sjzXc0AQOTV2JU= +github.com/google/cabbie v1.0.5/go.mod h1:WytqVAbQee3vvDZQSROF6ZsPGrUsmpot9tKNtxnr/lk= +github.com/google/glazier v0.0.0-20210617205946-bf91b619f5d4/go.mod h1:g7oyIhindbeebnBh0hbFua5rv6XUt/nweDwIWdvxirg= +github.com/google/glazier v0.0.0-20211029225403-9f766cca891d/go.mod h1:h2R3DLUecGbLSyi6CcxBs5bdgtJhgK+lIffglvAcGKg= +github.com/google/glazier v0.0.0-20231213170836-8063f886cbd6 h1:qjmtTOed3lENJzDzn7gcOEObUtLOp2MDr+RiEExIcA0= +github.com/google/glazier v0.0.0-20231213170836-8063f886cbd6/go.mod h1:g/uB9yDfCoH08pu7+Isrzaeh35vCvAY0SUFEJCJ1+5I= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -328,13 +362,20 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/logger v1.1.0/go.mod h1:w7O8nrRr0xufejBlQMI83MXqRusvREoJdaAxV+CoAB4= +github.com/google/logger v1.1.1/go.mod h1:BkeJZ+1FhQ+/d087r4dzojEg1u2ZX+ZqG1jTUrLM+zQ= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -343,86 +384,93 @@ github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200905233945-acf8798be1f7/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v0.0.0-20170306145142-6a5e28554805/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= +github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/winops v0.0.0-20210803215038-c8511b84de2b/go.mod h1:ShbX8v8clPm/3chw9zHVwtW3QhrFpL8mXOwNxClt4pg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/gophercloud/gophercloud v0.24.0 h1:jDsIMGJ1KZpAjYfQgGI2coNQj5Q83oPzuiGJRFWgMzw= -github.com/gophercloud/gophercloud v0.24.0/go.mod h1:Q8fZtyi5zZxPS/j9aj3sSxtvj41AdQMDwyo1myduD5c= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= -github.com/gorilla/context v0.0.0-20160226214623-1ea25387ff6f/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/gophercloud/gophercloud v1.8.0 h1:TM3Jawprb2NrdOnvcHhWJalmKmAmOGgfZElM/3oBYCk= +github.com/gophercloud/gophercloud v1.8.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/pat v0.0.0-20180118222023-199c85a7f6d1/go.mod h1:YeAe0gNeiNT5hoiZRI4yiOky6jVdNvfO2N6Kav/HmxY= -github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA= +github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo= github.com/gorilla/sessions v1.1.1/go.mod h1:8KCfur6+4Mqcc6S0FEfKuN15Vl5MgXW92AE8ovaJD0w= -github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI= -github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/sessions v1.2.2 h1:lqzMYz6bOfvn2WriPUjNByzeXIlVzURcPmgMczkmTjY= +github.com/gorilla/sessions v1.2.2/go.mod h1:ePLdVu+jbEgHH+KWw8I1z2wqd0BAdAQh/8LRvBeoNcQ= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/graphql-go/graphql v0.7.10-0.20210411022516-8a92e977c10b h1:pFOI7cDz2wI+MwaoDqqrhFCXkwvpvkWpYQCXvQVAlfs= -github.com/graphql-go/graphql v0.7.10-0.20210411022516-8a92e977c10b/go.mod h1:k6yrAYQaSP59DC5UVxbgxESlmVyojThKdORUqGDGmrI= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= +github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= +github.com/groob/plist v0.0.0-20210519001750-9f754062e6d6/go.mod h1:itkABA+w2cw7x5nYUS/pLRef6ludkZKOigbROmCTaFw= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= +github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/hcl/v2 v2.11.1 h1:yTyWcXcm9XB0TEkyU/JCRU6rYy4K+mgLtzn2wlrJbcc= -github.com/hashicorp/hcl/v2 v2.11.1/go.mod h1:FwWsfWEjyV/CMj8s/gqAuiviY72rJ1/oayI9WftqcKg= +github.com/hashicorp/hcl/v2 v2.10.0/go.mod h1:FwWsfWEjyV/CMj8s/gqAuiviY72rJ1/oayI9WftqcKg= +github.com/hashicorp/hcl/v2 v2.19.1 h1://i05Jqznmb2EXqa39Nsvyan2o5XyMowW5fnCKW5RPI= +github.com/hashicorp/hcl/v2 v2.19.1/go.mod h1:ThLC89FV4p9MPW804KVbe/cEXoQ8NZEh+JtMeeGErHE= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hedwigz/entviz v0.0.0-20220529060928-44574cfd7a21 h1:OLaWKKM5Y0LB6Z0VhI9TmIJZk4ZX1G6cJkFlYKC6XMw= -github.com/hedwigz/entviz v0.0.0-20220529060928-44574cfd7a21/go.mod h1:eBRCW7Hyg6ZjOclls554VWeO/E7b5KQD3eO0Srxb2ZY= +github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= +github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= +github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= +github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= +github.com/hedwigz/entviz v0.0.0-20221011080911-9d47f6f1d818 h1:IIK1IY/v9aqVpaNXTr/rkoEnOc4rH3KbPYZYZLP3N04= +github.com/hedwigz/entviz v0.0.0-20221011080911-9d47f6f1d818/go.mod h1:B8PUHBD/X0sZZqUX9c+YntJRDBMg5EwQCpyMHoFy5rs= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= -github.com/iamacarpet/go-win64api v0.0.0-20210311141720-fe38760bed28 h1:QhDPvIcXXFltItF7kQ2Go4frViywCx9xDl2okzLNt+A= github.com/iamacarpet/go-win64api v0.0.0-20210311141720-fe38760bed28/go.mod h1:oGJx9dz0Ny7HC7U55RZ0Smd6N9p3hXP/+hOFtuYrAxM= -github.com/iancoleman/strcase v0.2.0 h1:05I4QRnGpI0m37iZQRuskXh+w77mr6Z41lwQzuHLwW0= +github.com/iamacarpet/go-win64api v0.0.0-20230324134531-ef6dbdd6db97 h1:VjwKCN2PMLlMKM2k9AW8QQsfmEH43ldlX+JGeWW9cEE= +github.com/iamacarpet/go-win64api v0.0.0-20230324134531-ef6dbdd6db97/go.mod h1:B7zFQPAznj+ujXel5X+LUoK3LgY6VboCdVYHZNn7gpg= github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= +github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= @@ -430,15 +478,18 @@ github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgO github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= -github.com/jackc/pgconn v1.4.0/go.mod h1:Y2O3ZDF0q4mMacyWV3AstPJpeHXWGEetiFttmq5lahk= -github.com/jackc/pgconn v1.5.0/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= -github.com/jackc/pgconn v1.5.1-0.20200601181101-fa742c524853/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= -github.com/jackc/pgconn v1.8.1 h1:ySBX7Q87vOMqKU2bbmKbUvtYhauDFclYbNDYIE1/h6s= -github.com/jackc/pgconn v1.8.1/go.mod h1:JV6m6b6jhjdmzchES0drzCcYcAHS1OPD5xu3OZ/lE2g= +github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= +github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= +github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= +github.com/jackc/pgconn v1.14.0/go.mod h1:9mBNlny0UvkgJdCDvdVHYSjI+8tD2rnKK69Wz8ti++E= +github.com/jackc/pgconn v1.14.1 h1:smbxIaZA08n6YuxEX1sDyjV/qkbtUtkH20qLkR9MUR4= +github.com/jackc/pgconn v1.14.1/go.mod h1:9mBNlny0UvkgJdCDvdVHYSjI+8tD2rnKK69Wz8ti++E= github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= -github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2 h1:JVX6jT/XfzNqIjye4717ITLaNwV9mWbJx0dLCpcRzdA= github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= +github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= @@ -446,77 +497,73 @@ github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= -github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.0.7 h1:6Pwi1b3QdY65cuv6SyVO0FgPd5J3Bl7wf/nQQjinHMA= -github.com/jackc/pgproto3/v2 v2.0.7/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= -github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg= +github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.3.2 h1:7eY55bdBeCz1F2fTzSz69QC+pG46jYq9/jtSPiJ5nn0= +github.com/jackc/pgproto3/v2 v2.3.2/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 h1:L0QtFUgDarD7Fpv9jeVMgy/+Ec0mtnmYuImjTz6dtDA= +github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= -github.com/jackc/pgtype v1.2.0/go.mod h1:5m2OfMh1wTK7x+Fk952IDmI4nw3nPrvtQdM0ZT4WpC0= -github.com/jackc/pgtype v1.3.1-0.20200510190516-8cd94a14c75a/go.mod h1:vaogEUkALtxZMCH411K+tKzNpwzCKU+AnPzBKZ+I+Po= -github.com/jackc/pgtype v1.3.1-0.20200606141011-f6355165a91c/go.mod h1:cvk9Bgu/VzJ9/lxTO5R5sf80p0DiucVtN7ZxvaC4GmQ= -github.com/jackc/pgtype v1.7.0 h1:6f4kVsW01QftE38ufBYxKciO6gyioXSC0ABIRLcZrGs= -github.com/jackc/pgtype v1.7.0/go.mod h1:ZnHF+rMePVqDKaOfJVI4Q8IVvAQMryDlDkZnKOI75BE= +github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= +github.com/jackc/pgtype v1.14.0 h1:y+xUdabmyMkJLyApYuPj38mW+aAIqCe5uuBB51rH3Vw= +github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= -github.com/jackc/pgx/v4 v4.5.0/go.mod h1:EpAKPLdnTorwmPUUsqrPxy5fphV18j9q3wrfRXgo+kA= -github.com/jackc/pgx/v4 v4.6.1-0.20200510190926-94ba730bb1e9/go.mod h1:t3/cdRQl6fOLDxqtlyhe9UWgfIi9R8+8v8GKV5TRA/o= -github.com/jackc/pgx/v4 v4.6.1-0.20200606145419-4e5062306904/go.mod h1:ZDaNWkt9sW1JMiNn0kdYBaLelIhw7Pg4qd+Vk6tw7Hg= -github.com/jackc/pgx/v4 v4.11.0 h1:J86tSWd3Y7nKjwT/43xZBvpi04keQWx8gNC2YkdJhZI= -github.com/jackc/pgx/v4 v4.11.0/go.mod h1:i62xJgdrtVDsnL3U8ekyrQXEwGNTRoG7/8r+CIdYfcc= +github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= +github.com/jackc/pgx/v4 v4.18.1 h1:YP7G1KABtKpB5IHrO9vYwSrCOhs7p3uqhvhhQBptya0= +github.com/jackc/pgx/v4 v4.18.1/go.mod h1:FydWkUyadDmdNH/mHnGob881GawxeEm7TcMCzkb+qQE= github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.1.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jarcoal/httpmock v0.0.0-20180424175123-9c70cfe4a1da/go.mod h1:ks+b9deReOc7jgqp+e7LuFiCBH6Rm5hL32cLcEAArb4= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= -github.com/jhump/protoreflect v1.10.1/go.mod h1:7GcYQDdMU/O/BBrl/cX6PNHpXh6cenjd8pneu5yW7Tg= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kardianos/service v1.2.0 h1:bGuZ/epo3vrt8IPC7mnKQolqFeYJb7Cs8Rk4PSOBB/g= -github.com/kardianos/service v1.2.0/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM= -github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 h1:DowS9hvgyYSX4TO5NpyC606/Z4SxnNYbT+WX27or6Ck= -github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= -github.com/kevinmbeaulieu/eq-go v1.0.0/go.mod h1:G3S8ajA56gKBZm4UB9AOyoOS37JO3roToPzKNM8dtdM= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kardianos/service v1.2.2 h1:ZvePhAHfvo0A7Mftk/tEzqEZ7Q4lgnR8sGz4xu1YX60= +github.com/kardianos/service v1.2.2/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM= +github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= +github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.11.4 h1:kz40R/YWls3iqT9zX9AHN3WoVsrAWVyui5sxuLqiXqU= github.com/klauspost/compress v1.11.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= +github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= +github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= +github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -525,82 +572,71 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= -github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= -github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w= -github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= -github.com/lestrrat-go/jwx v0.9.0/go.mod h1:iEoxlYfZjvoGpuWwxUz+eR5e6KTJGsaRcy/YNA/UnBk= +github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q= +github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= +github.com/lestrrat-go/backoff/v2 v2.0.8/go.mod h1:rHP/q/r9aT27n24JQLa7JhSQZCKBBOiM/uP402WwN8Y= +github.com/lestrrat-go/blackmagic v1.0.0/go.mod h1:TNgH//0vYSs8VXDCfkZLgIrVTTXQELZffUV0tz3MtdQ= +github.com/lestrrat-go/httpcc v1.0.0/go.mod h1:tGS/u00Vh5N6FHNkExqGGNId8e0Big+++0Gf8MBnAvE= +github.com/lestrrat-go/iter v1.0.1/go.mod h1:zIdgO1mRKhn8l9vrZJZz9TUMMFbQbLeTsbqPDrJ/OJc= +github.com/lestrrat-go/jwx v1.2.21/go.mod h1:9cfxnOH7G1gN75CaJP2hKGcxFEx5sPh1abRIA/ZJVh4= +github.com/lestrrat-go/option v1.0.0/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.3/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.4 h1:SO9z7FRPzA03QhHKJrH5BXA6HU1rS4V2nIVrrNC1iYk= -github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= -github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/logrusorgru/aurora v0.0.0-20200102142835-e9ef32dff381/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= -github.com/logrusorgru/aurora v2.0.3+incompatible/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= -github.com/logrusorgru/aurora/v3 v3.0.0/go.mod h1:vsR12bk5grlLvLXAYrBsb5Oc/N+LxAlxggSjiwMnCUc= -github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= +github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= github.com/markbates/going v1.0.0/go.mod h1:I6mnB4BPnEeqo85ynXIx1ZFLLbtiLHNXVgWeFO9OGOA= -github.com/markbates/goth v1.67.1 h1:gU5B0pzHVyhnJPwGynfFnkfvaQ39C1Sy+ewdl+bhAOw= -github.com/markbates/goth v1.67.1/go.mod h1:EyLFHGU5ySr2GXRDyJH5nu2dA7parbC8QwIYW/rGcWg= -github.com/matryer/is v1.2.0 h1:92UTHpy8CDwaJ08GqLDzhhuixiBUUD1p3AU6PHddz4A= -github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= -github.com/matryer/moq v0.0.0-20200106131100-75d0ddfc0007/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= -github.com/matryer/moq v0.2.7 h1:RtpiPUM8L7ZSCbSwK+QcZH/E9tgqAkFjKQxsRs25b4w= -github.com/matryer/moq v0.2.7/go.mod h1:kITsx543GOENm48TUAQyJ9+SAvFSr7iGQXPoth/VUBk= +github.com/markbates/goth v1.78.0 h1:7VEIFDycJp9deyVv3YraGBPdD0ZYQW93Y3Aw1eVP3BY= +github.com/markbates/goth v1.78.0/go.mod h1:X6xdNgpapSENS0O35iTBBcMHoJDQDfI9bJl+APCkYMc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= -github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-sqlite3 v1.14.8/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/mattn/go-sqlite3 v1.14.10 h1:MLn+5bFRlWMGoSRmJour3CL1w/qL96mvipqpwQW/Sfk= github.com/mattn/go-sqlite3 v1.14.10/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/mattn/go-zglob v0.0.3 h1:6Ry4EYsScDyt5di4OI6xw1bYhOqfE5S33Z1OPy+d+To= -github.com/mattn/go-zglob v0.0.3/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo= +github.com/mattn/go-sqlite3 v1.14.19 h1:fhGleo2h1p8tVChob4I9HpmVFIAkKGpiukdrgQbWfGI= +github.com/mattn/go-sqlite3 v1.14.19/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/mattn/go-zglob v0.0.4 h1:LQi2iOm0/fGgu80AioIJ/1j9w9Oh+9DZ39J4VAGzHQM= +github.com/mattn/go-zglob v0.0.4/go.mod h1:MxxjyoXXnMxfIpxTK2GAkw1w8glPsQILx3N5wrKakiY= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mholt/archiver v3.1.1+incompatible h1:1dCVxuqs0dJseYEhi5pl7MYPH9zDa1wBi7mF09cbNkU= github.com/mholt/archiver v3.1.1+incompatible/go.mod h1:Dh2dOXnSdiLxRiPoVfIr/fI1TwETms9B8CTWfeh7ROU= github.com/mholt/archiver/v3 v3.5.1 h1:rDjOBX9JSF5BvoJGvjqK479aL70qh9DIpZCl+k7Clwo= github.com/mholt/archiver/v3 v3.5.1/go.mod h1:e3dqJ7H78uzsRSEACH1joayhuSyhnonssnDhppzS1L4= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v0.0.0-20180203102830-a4e142e9c047/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.3.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs= github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -610,214 +646,176 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mrjones/oauth v0.0.0-20180629183705-f4e24b6d100c/go.mod h1:skjdDftzkFALcuGzYSklqYd8gvat6F1gZJ4YPVbkZpM= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= -github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= -github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= -github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= -github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nishanths/predeclared v0.0.0-20200524104333-86fad755b4d3/go.mod h1:nt3d53pc1VYcphSCIaYAJtnPYnr3Zyn8fMq2wvPGPso= -github.com/nwaples/rardecode v1.1.0 h1:vSxaY8vQhOcVr4mm5e8XllHWTiM4JF507A0Katqw7MQ= +github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d h1:VhgPp6v9qf9Agr/56bj7Y/xa04UccTW04VP0Qed4vnQ= +github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d/go.mod h1:YUTz3bUH2ZwIWBy3CJBeOBEugqcmXREj14T+iG/4k4U= github.com/nwaples/rardecode v1.1.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0= -github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/nwaples/rardecode v1.1.3 h1:cWCaZwfM5H7nAD6PyEdcVnczzV8i/JtotnyW/dD9lEc= +github.com/nwaples/rardecode v1.1.3/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/oklog/ulid/v2 v2.0.2/go.mod h1:mtBL0Qe/0HAx6/a4Z30qxVIAL1eQDweXq5lxOEiwQ68= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.15.0 h1:1V1NfVQR87RtWAgp1lv9JZJ5Jap+XFGKPi00andXGi4= -github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.5 h1:7n6FEkpFmfCoo2t+YYqXH0evK+a9ICQz0xcAy9dYcaQ= -github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= -github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= -github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= -github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= +github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= +github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= -github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4 v2.6.0+incompatible h1:Ix9yFKn1nSPBLFl/yZknTp8TU5G4Ps0JDmguYK6iH1A= -github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4/v4 v4.1.2 h1:qvY3YFXRQE/XB8MlLzJH7mSzBs74eA2gg52YTk6jUPM= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml/v2 v2.1.1 h1:LWAJwfNvjQZCFIDKWYQaM62NcYeYViCmWIwmOStowAI= +github.com/pelletier/go-toml/v2 v2.1.1/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= +github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4/v4 v4.1.2/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.19 h1:tYLzDnjDXh9qIxSTKHwXwOYmm9d887Y7Y1ZkyXYHAN4= +github.com/pierrec/lz4/v4 v4.1.19/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= +github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rickb777/date v1.14.2/go.mod h1:swmf05C+hN+m8/Xh7gEq3uB6QJDNc5pQBWojKdHetOs= +github.com/rickb777/plural v1.2.2/go.mod h1:xyHbelv4YvJE51gjMnHvk+U2e9zIysg6lTnSQK8XUYA= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/scjalliance/comshim v0.0.0-20190308082608-cf06d2532c4e/go.mod h1:9Tc1SKnfACJb9N7cw2eyuI6xzy845G7uZONBsi5uPEA= +github.com/scjalliance/comshim v0.0.0-20231116235529-bbacf79a4691 h1:P+tjDAuIC11VabBunlN30hVNqqXAmU+xEyrTEFU8avY= +github.com/scjalliance/comshim v0.0.0-20231116235529-bbacf79a4691/go.mod h1:frmTThEHn5H+hHqLPGBDKVlFLpE8f/4vY2M9od2tW9k= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/shirou/gopsutil v3.21.4+incompatible h1:fuHcTm5mX+wzo542cmYcV9RTGQLbnHLI5SyQ5ryTVck= -github.com/shirou/gopsutil v3.21.4+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= +github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= +github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= +github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= -github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc h1:jUIKcSPO9MoMJBbEoyE/RJoE8vz7Mb8AjvifMMwSyvY= -github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/shurcooL/vfsgen v0.0.0-20180121065927-ffb13db8def0/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= +github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ= +github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= +github.com/sosedoff/ansible-vault-go v0.2.0 h1:XqkBdqbXgTuFQ++NdrZvSdUTNozeb6S3V5x7FVs17vg= +github.com/sosedoff/ansible-vault-go v0.2.0/go.mod h1:wMU54HNJfY0n0KIgbpA9m15NBfaUDlJrAsaZp0FwzkI= +github.com/sosodev/duration v1.2.0 h1:pqK/FLSjsAADWY74SyWDCjOcd5l7H8GSnnOGEB9A1Us= +github.com/sosodev/duration v1.2.0/go.mod h1:RQIBBX0+fMLc/D9+Jb/fwvVmo0eZvDDEERAikUR6SDg= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= -github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/objx v0.3.0 h1:NGXK3lHquSN08v5vWalVI/L8XU9hdzE/G6xsrze47As= -github.com/stretchr/objx v0.3.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.1 h1:4VhoImhV/Bm0ToFkXFi8hXNXwpDRZ/ynw3amt82mzq0= +github.com/stretchr/objx v0.5.1/go.mod h1:/iHQpkQwBD6DLUmQ4pE+s1TXdob1mORJ4/UFdrifcy0= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= +github.com/stretchr/testify v1.7.1-0.20210427113832-6241f9ab9942/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tklauser/go-sysconf v0.3.5 h1:uu3Xl4nkLzQfXNsWn15rPc/HQCJKObbt1dKJeWp3vU4= -github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= -github.com/tklauser/numcpus v0.2.2 h1:oyhllyrScuYI6g+h/zUvNXNp1wy7x8qQy3t/piefldA= -github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= -github.com/ugorji/go v1.2.5/go.mod h1:gat2tIT8KJG8TVI8yv77nEO/KYT6dV7JE1gfUa8Xuls= -github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= -github.com/ugorji/go/codec v1.2.5 h1:8WobZKAk18Msm2CothY2jnztY56YVY8kF1oQrj21iis= -github.com/ugorji/go/codec v1.2.5/go.mod h1:QPxoTbPKSEAlAHPYt02++xp/en9B/wUdwFCz+hj5caA= +github.com/tklauser/go-sysconf v0.3.13 h1:GBUpcahXSpR2xN01jhkNAbTLRk2Yzgggk8IM08lq3r4= +github.com/tklauser/go-sysconf v0.3.13/go.mod h1:zwleP4Q4OehZHGn4CYZDipCgg9usW5IJePewFCGVEa0= +github.com/tklauser/numcpus v0.7.0 h1:yjuerZP127QG9m5Zh/mSO4wqurYil27tHrqwRoRjpr4= +github.com/tklauser/numcpus v0.7.0/go.mod h1:bb6dMVcj8A42tSE7i32fsIUCbQNllK5iDguyOZRUzAY= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= +github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= +github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= +github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ulikunitz/xz v0.5.9/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8= -github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli/v2 v2.1.1/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= -github.com/urfave/cli/v2 v2.8.1 h1:CGuYNZF9IKZY/rfBe3lJpccSoIY1ytfvmgQT90cNOl4= -github.com/urfave/cli/v2 v2.8.1/go.mod h1:Z41J9TPoffeoqP0Iza0YbAhGvymRdZAd2uPmZ5JxRdY= -github.com/vektah/dataloaden v0.2.1-0.20190515034641-a19b9a6e7c9e/go.mod h1:/HUdMve7rvxZma+2ZELQeNh88+003LL7Pf/CZ089j8U= -github.com/vektah/gqlparser/v2 v2.2.0/go.mod h1:i3mQIGIrbK2PD1RrCeMTlVbkF2FJ6WkU1KJlJlC+3F4= -github.com/vektah/gqlparser/v2 v2.4.5 h1:C02NsyEsL4TXJB7ndonqTfuQOL4XPIu0aAWugdmTgmc= -github.com/vektah/gqlparser/v2 v2.4.5/go.mod h1:flJWIR04IMQPGz+BXLrORkrARBxv/rtyIAFvd/MceW0= +github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= +github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/vektah/gqlparser/v2 v2.5.10 h1:6zSM4azXC9u4Nxy5YmdmGu4uKamfwsdKTwp5zsEealU= +github.com/vektah/gqlparser/v2 v2.5.10/go.mod h1:1rCcfwB2ekJofmluGWXMSEnPMZgbxzwj6FaZ/4OT8Cc= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= -github.com/vmihailenco/msgpack/v5 v5.0.0-beta.9 h1:iBRIniTnWOo0kqkg3k3XR8Vn6OCkVlIuZNo0UoBrKx4= -github.com/vmihailenco/msgpack/v5 v5.0.0-beta.9/go.mod h1:HVxBVPUK/+fZMonk4bi1islLa8V3cfnBug0+4dykPzo= +github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8= +github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok= github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= -github.com/vmihailenco/tagparser v0.1.2 h1:gnjoVuB/kljJ5wICEEOpx98oXMWPLj22G67Vbd1qPqc= -github.com/vmihailenco/tagparser v0.1.2/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= -github.com/vmware/govmomi v0.25.0 h1:EOlLkucY7mxLTDt9xUektZWlKu3TfmoCdYt/Pb6RFxw= -github.com/vmware/govmomi v0.25.0/go.mod h1:bi7jKMEW2kgT/dO5LrVPsVjS3apHGz0sDmY3ADSxHRQ= -github.com/vmware/vmw-guestinfo v0.0.0-20170707015358-25eff159a728/go.mod h1:x9oS4Wk2s2u4tS29nEaDLdzvuHdB19CvSGJjPgkZJNk= -github.com/xanzy/ssh-agent v0.3.0 h1:wUMzuKtKilRgBAD1sUb8gOwwRr2FGoBVumcjoOACClI= -github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= +github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= +github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= +github.com/vmware/govmomi v0.34.1 h1:Hqu2Uke2itC+cNoIcFQBLEZvX9wBRTTOP04J7V1fqRw= +github.com/vmware/govmomi v0.34.1/go.mod h1:qWWT6n9mdCr/T9vySsoUqcI04sSEj4CqHXxtk/Y+Los= +github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= +github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= -github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= +github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= github.com/zclconf/go-cty v1.8.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= -github.com/zclconf/go-cty v1.8.3 h1:48gwZXrdSADU2UW9eZKHprxAI7APZGW9XmExpJpSjT0= -github.com/zclconf/go-cty v1.8.3/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= +github.com/zclconf/go-cty v1.14.1 h1:t9fyA35fwjjUMcmL5hLER+e/rEPqrbCK1/OSE4SI9KA= +github.com/zclconf/go-cty v1.14.1/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -825,36 +823,42 @@ go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= -go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= +golang.org/x/arch v0.6.0 h1:S0JTfE48HbRj80+4tbvZDYsJ3tGv6BUU3XxyZ7CirAc= +golang.org/x/arch v0.6.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211202192323-5770296d904e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29 h1:tkVvjkPTB7pnW3jnid7kNyAMPVWllTNOf/qKDze4p9o= -golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -865,6 +869,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20231219180239-dc181d75b848 h1:+iq7lrkxmFNBM7xx+Rae2W6uyPfhPeDWD+n+JgppptE= +golang.org/x/exp v0.0.0-20231219180239-dc181d75b848/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -877,6 +883,8 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -885,20 +893,22 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o= -golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -910,6 +920,7 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -926,25 +937,46 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200927032502-5d4f70055728/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200930145003-4acb6c075d10/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220401154927-543a649e0bdd h1:zYlwaUHTmxuf6H7hwO2dgwqozQmH7zf4x+/qql4oVWc= -golang.org/x/net v0.0.0-20220401154927-543a649e0bdd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 h1:ld7aEMNHoBnnDAX15v1T6z31v8HwR2A9FYOuAhWqkwc= golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ= +golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -954,14 +986,17 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -977,19 +1012,21 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1006,64 +1043,93 @@ golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210601080250-7ecdf8ef093b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211113001501-0c823b97ae02/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b h1:2n253B2r0pYSmEV+UNCQoPfU/FiaizQEK5Gu4Bq4JE8= -golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211107104306-e0b2ad06fe42/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190515012406-7d7faa4812bd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1086,30 +1152,35 @@ golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWc golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200522201501-cb1345f3a375/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200717024301-6ddee64345a6/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20200929161345-d7fc70abf50f/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= -golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= -golang.org/x/tools v0.1.10 h1:QjFRCZxdOhBJ/UNgnBZLbNV13DlbnK0quyivTnXJM20= -golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= +golang.org/x/tools v0.1.9-0.20211216111533-8d383106f7e7/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= +golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -1127,23 +1198,38 @@ google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/api v0.32.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= @@ -1163,29 +1249,53 @@ google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200929141702-51c3e5b607fe/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb h1:hcskBH5qZCOa7WpTUFUFvoebnSFZBYpjykLtjIp9DVk= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211028162531-8db9c33dc351/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0 h1:/jFB8jK5R3Sq3i/lmeZO0cATSzFfZaJq1J2Euan3XKU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0/go.mod h1:FUoWkonphQm3RhTS+kOEhF8h0iDpm4tdXolVCeZ9KKA= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= @@ -1195,9 +1305,21 @@ google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.42.0 h1:XT2/MFpuPFsEX2fWh3YQtHkZ+WYZFQRfaUgLZYj/p6A= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= +google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1205,64 +1327,54 @@ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miE google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.25.1-0.20200805231151-a709e31e5d12/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= -gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= gopkg.in/guregu/null.v4 v4.0.0 h1:1Wm3S1WEA2I26Kq+6vcW+w0gcDo44YKYD7YIEJNHDjg= gopkg.in/guregu/null.v4 v4.0.0/go.mod h1:YoQhUrADuG3i9WqesrCmpNRwm1ypAgSHYqoOcTu/JrI= gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/toast.v1 v1.0.0-20180812000517-0a84660828b2 h1:MZF6J7CV6s/h0HBkfqebrYfKCVEo5iN+wzE4QhV3Evo= +gopkg.in/toast.v1 v1.0.0-20180812000517-0a84660828b2/go.mod h1:s1Sn2yZos05Qfs7NKt867Xe18emOmtsO3eAKbDaon0o= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= -sourcegraph.com/sourcegraph/appdash-data v0.0.0-20151005221446-73f23eafcf67/go.mod h1:L5q+DGLGOQFpo1snNEkLOJT2d1YTW66rWNzatr3He1k= diff --git a/graphql/graph/generated/generated.go b/graphql/graph/generated/generated.go index 8bd71499..7daf10b5 100644 --- a/graphql/graph/generated/generated.go +++ b/graphql/graph/generated/generated.go @@ -26,6 +26,7 @@ import ( // NewExecutableSchema creates an ExecutableSchema from the ResolverRoot interface. func NewExecutableSchema(cfg Config) graphql.ExecutableSchema { return &executableSchema{ + schema: cfg.Schema, resolvers: cfg.Resolvers, directives: cfg.Directives, complexity: cfg.Complexity, @@ -33,6 +34,7 @@ func NewExecutableSchema(cfg Config) graphql.ExecutableSchema { } type Config struct { + Schema *ast.Schema Resolvers ResolverRoot Directives DirectiveRoot Complexity ComplexityRoot @@ -157,7 +159,7 @@ type ComplexityRoot struct { Cooldown func(childComplexity int) int Description func(childComplexity int) int Disabled func(childComplexity int) int - HclID func(childComplexity int) int + HCLID func(childComplexity int) int ID func(childComplexity int) int IgnoreErrors func(childComplexity int) int Name func(childComplexity int) int @@ -172,7 +174,7 @@ type ComplexityRoot struct { CompetitionToDNS func(childComplexity int) int CompetitionToEnvironment func(childComplexity int) int Config func(childComplexity int) int - HclID func(childComplexity int) int + HCLID func(childComplexity int) int ID func(childComplexity int) int RootPassword func(childComplexity int) int Tags func(childComplexity int) int @@ -183,7 +185,7 @@ type ComplexityRoot struct { DNSServers func(childComplexity int) int DNSToCompetition func(childComplexity int) int DNSToEnvironment func(childComplexity int) int - HclID func(childComplexity int) int + HCLID func(childComplexity int) int ID func(childComplexity int) int NtpServers func(childComplexity int) int RootDomain func(childComplexity int) int @@ -193,7 +195,7 @@ type ComplexityRoot struct { DNSRecord struct { DNSRecordToEnvironment func(childComplexity int) int Disabled func(childComplexity int) int - HclID func(childComplexity int) int + HCLID func(childComplexity int) int ID func(childComplexity int) int Name func(childComplexity int) int Tags func(childComplexity int) int @@ -230,7 +232,7 @@ type ComplexityRoot struct { EnvironmentToServerTask func(childComplexity int) int EnvironmentToUser func(childComplexity int) int ExposedVdiPorts func(childComplexity int) int - HclID func(childComplexity int) int + HCLID func(childComplexity int) int ID func(childComplexity int) int Name func(childComplexity int) int Revision func(childComplexity int) int @@ -240,7 +242,7 @@ type ComplexityRoot struct { FileDelete struct { FileDeleteToEnvironment func(childComplexity int) int - HclID func(childComplexity int) int + HCLID func(childComplexity int) int ID func(childComplexity int) int Path func(childComplexity int) int Tags func(childComplexity int) int @@ -251,7 +253,7 @@ type ComplexityRoot struct { Destination func(childComplexity int) int Disabled func(childComplexity int) int FileDownloadToEnvironment func(childComplexity int) int - HclID func(childComplexity int) int + HCLID func(childComplexity int) int ID func(childComplexity int) int Md5 func(childComplexity int) int Perms func(childComplexity int) int @@ -264,7 +266,7 @@ type ComplexityRoot struct { FileExtract struct { Destination func(childComplexity int) int FileExtractToEnvironment func(childComplexity int) int - HclID func(childComplexity int) int + HCLID func(childComplexity int) int ID func(childComplexity int) int Source func(childComplexity int) int Tags func(childComplexity int) int @@ -287,7 +289,7 @@ type ComplexityRoot struct { Description func(childComplexity int) int ExposedTCPPorts func(childComplexity int) int ExposedUDPPorts func(childComplexity int) int - HclID func(childComplexity int) int + HCLID func(childComplexity int) int HostToDisk func(childComplexity int) int HostToEnvironment func(childComplexity int) int Hostname func(childComplexity int) int @@ -307,7 +309,7 @@ type ComplexityRoot struct { Description func(childComplexity int) int Email func(childComplexity int) int FirstName func(childComplexity int) int - HclID func(childComplexity int) int + HCLID func(childComplexity int) int ID func(childComplexity int) int IdentityToEnvironment func(childComplexity int) int LastName func(childComplexity int) int @@ -347,7 +349,7 @@ type ComplexityRoot struct { Network struct { Cidr func(childComplexity int) int - HclID func(childComplexity int) int + HCLID func(childComplexity int) int ID func(childComplexity int) int Name func(childComplexity int) int NetworkToEnvironment func(childComplexity int) int @@ -489,7 +491,7 @@ type ComplexityRoot struct { Cooldown func(childComplexity int) int Description func(childComplexity int) int Disabled func(childComplexity int) int - HclID func(childComplexity int) int + HCLID func(childComplexity int) int ID func(childComplexity int) int IgnoreErrors func(childComplexity int) int Language func(childComplexity int) int @@ -791,17 +793,21 @@ type UserResolver interface { } type executableSchema struct { + schema *ast.Schema resolvers ResolverRoot directives DirectiveRoot complexity ComplexityRoot } func (e *executableSchema) Schema() *ast.Schema { + if e.schema != nil { + return e.schema + } return parsedSchema } func (e *executableSchema) Complexity(typeName, field string, childComplexity int, rawArgs map[string]interface{}) (int, bool) { - ec := executionContext{nil, e} + ec := executionContext{nil, e, 0, 0, nil} _ = ec switch typeName + "." + field { @@ -1233,11 +1239,11 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.Command.Disabled(childComplexity), true case "Command.hcl_id": - if e.complexity.Command.HclID == nil { + if e.complexity.Command.HCLID == nil { break } - return e.complexity.Command.HclID(childComplexity), true + return e.complexity.Command.HCLID(childComplexity), true case "Command.id": if e.complexity.Command.ID == nil { @@ -1317,11 +1323,11 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.Competition.Config(childComplexity), true case "Competition.hcl_id": - if e.complexity.Competition.HclID == nil { + if e.complexity.Competition.HCLID == nil { break } - return e.complexity.Competition.HclID(childComplexity), true + return e.complexity.Competition.HCLID(childComplexity), true case "Competition.id": if e.complexity.Competition.ID == nil { @@ -1373,11 +1379,11 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.DNS.DNSToEnvironment(childComplexity), true case "DNS.hcl_id": - if e.complexity.DNS.HclID == nil { + if e.complexity.DNS.HCLID == nil { break } - return e.complexity.DNS.HclID(childComplexity), true + return e.complexity.DNS.HCLID(childComplexity), true case "DNS.id": if e.complexity.DNS.ID == nil { @@ -1422,11 +1428,11 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.DNSRecord.Disabled(childComplexity), true case "DNSRecord.hcl_id": - if e.complexity.DNSRecord.HclID == nil { + if e.complexity.DNSRecord.HCLID == nil { break } - return e.complexity.DNSRecord.HclID(childComplexity), true + return e.complexity.DNSRecord.HCLID(childComplexity), true case "DNSRecord.id": if e.complexity.DNSRecord.ID == nil { @@ -1639,11 +1645,11 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.Environment.ExposedVdiPorts(childComplexity), true case "Environment.hcl_id": - if e.complexity.Environment.HclID == nil { + if e.complexity.Environment.HCLID == nil { break } - return e.complexity.Environment.HclID(childComplexity), true + return e.complexity.Environment.HCLID(childComplexity), true case "Environment.id": if e.complexity.Environment.ID == nil { @@ -1688,11 +1694,11 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.FileDelete.FileDeleteToEnvironment(childComplexity), true case "FileDelete.hcl_id": - if e.complexity.FileDelete.HclID == nil { + if e.complexity.FileDelete.HCLID == nil { break } - return e.complexity.FileDelete.HclID(childComplexity), true + return e.complexity.FileDelete.HCLID(childComplexity), true case "FileDelete.id": if e.complexity.FileDelete.ID == nil { @@ -1744,11 +1750,11 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.FileDownload.FileDownloadToEnvironment(childComplexity), true case "FileDownload.hcl_id": - if e.complexity.FileDownload.HclID == nil { + if e.complexity.FileDownload.HCLID == nil { break } - return e.complexity.FileDownload.HclID(childComplexity), true + return e.complexity.FileDownload.HCLID(childComplexity), true case "FileDownload.id": if e.complexity.FileDownload.ID == nil { @@ -1814,11 +1820,11 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.FileExtract.FileExtractToEnvironment(childComplexity), true case "FileExtract.hcl_id": - if e.complexity.FileExtract.HclID == nil { + if e.complexity.FileExtract.HCLID == nil { break } - return e.complexity.FileExtract.HclID(childComplexity), true + return e.complexity.FileExtract.HCLID(childComplexity), true case "FileExtract.id": if e.complexity.FileExtract.ID == nil { @@ -1933,11 +1939,11 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.Host.ExposedUDPPorts(childComplexity), true case "Host.hcl_id": - if e.complexity.Host.HclID == nil { + if e.complexity.Host.HCLID == nil { break } - return e.complexity.Host.HclID(childComplexity), true + return e.complexity.Host.HCLID(childComplexity), true case "Host.HostToDisk": if e.complexity.Host.HostToDisk == nil { @@ -2052,11 +2058,11 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.Identity.FirstName(childComplexity), true case "Identity.hcl_id": - if e.complexity.Identity.HclID == nil { + if e.complexity.Identity.HCLID == nil { break } - return e.complexity.Identity.HclID(childComplexity), true + return e.complexity.Identity.HCLID(childComplexity), true case "Identity.id": if e.complexity.Identity.ID == nil { @@ -2369,11 +2375,11 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.Network.Cidr(childComplexity), true case "Network.hcl_id": - if e.complexity.Network.HclID == nil { + if e.complexity.Network.HCLID == nil { break } - return e.complexity.Network.HclID(childComplexity), true + return e.complexity.Network.HCLID(childComplexity), true case "Network.id": if e.complexity.Network.ID == nil { @@ -3248,11 +3254,11 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.Script.Disabled(childComplexity), true case "Script.hcl_id": - if e.complexity.Script.HclID == nil { + if e.complexity.Script.HCLID == nil { break } - return e.complexity.Script.HclID(childComplexity), true + return e.complexity.Script.HCLID(childComplexity), true case "Script.id": if e.complexity.Script.ID == nil { @@ -3664,25 +3670,40 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in func (e *executableSchema) Exec(ctx context.Context) graphql.ResponseHandler { rc := graphql.GetOperationContext(ctx) - ec := executionContext{rc, e} + ec := executionContext{rc, e, 0, 0, make(chan graphql.DeferredResult)} inputUnmarshalMap := graphql.BuildUnmarshalerMap() first := true switch rc.Operation.Operation { case ast.Query: return func(ctx context.Context) *graphql.Response { - if !first { - return nil + var response graphql.Response + var data graphql.Marshaler + if first { + first = false + ctx = graphql.WithUnmarshalerMap(ctx, inputUnmarshalMap) + data = ec._Query(ctx, rc.Operation.SelectionSet) + } else { + if atomic.LoadInt32(&ec.pendingDeferred) > 0 { + result := <-ec.deferredResults + atomic.AddInt32(&ec.pendingDeferred, -1) + data = result.Result + response.Path = result.Path + response.Label = result.Label + response.Errors = result.Errors + } else { + return nil + } } - first = false - ctx = graphql.WithUnmarshalerMap(ctx, inputUnmarshalMap) - data := ec._Query(ctx, rc.Operation.SelectionSet) var buf bytes.Buffer data.MarshalGQL(&buf) - - return &graphql.Response{ - Data: buf.Bytes(), + response.Data = buf.Bytes() + if atomic.LoadInt32(&ec.deferred) > 0 { + hasNext := atomic.LoadInt32(&ec.pendingDeferred) > 0 + response.HasNext = &hasNext } + + return &response } case ast.Mutation: return func(ctx context.Context) *graphql.Response { @@ -3725,20 +3746,42 @@ func (e *executableSchema) Exec(ctx context.Context) graphql.ResponseHandler { type executionContext struct { *graphql.OperationContext *executableSchema + deferred int32 + pendingDeferred int32 + deferredResults chan graphql.DeferredResult +} + +func (ec *executionContext) processDeferredGroup(dg graphql.DeferredGroup) { + atomic.AddInt32(&ec.pendingDeferred, 1) + go func() { + ctx := graphql.WithFreshResponseContext(dg.Context) + dg.FieldSet.Dispatch(ctx) + ds := graphql.DeferredResult{ + Path: dg.Path, + Label: dg.Label, + Result: dg.FieldSet, + Errors: graphql.GetErrors(ctx), + } + // null fields should bubble up + if dg.FieldSet.Invalids > 0 { + ds.Result = graphql.Null + } + ec.deferredResults <- ds + }() } func (ec *executionContext) introspectSchema() (*introspection.Schema, error) { if ec.DisableIntrospection { return nil, errors.New("introspection disabled") } - return introspection.WrapSchema(parsedSchema), nil + return introspection.WrapSchema(ec.Schema()), nil } func (ec *executionContext) introspectType(name string) (*introspection.Type, error) { if ec.DisableIntrospection { return nil, errors.New("introspection disabled") } - return introspection.WrapTypeFromDef(parsedSchema, parsedSchema.Types[name]), nil + return introspection.WrapTypeFromDef(ec.Schema(), ec.Schema().Types[name]), nil } var sources = []*ast.Source{ @@ -8268,7 +8311,7 @@ func (ec *executionContext) _Command_hcl_id(ctx context.Context, field graphql.C }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return obj.HclID, nil + return obj.HCLID, nil }) if err != nil { ec.Error(ctx, err) @@ -8902,7 +8945,7 @@ func (ec *executionContext) _Competition_hcl_id(ctx context.Context, field graph }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return obj.HclID, nil + return obj.HCLID, nil }) if err != nil { ec.Error(ctx, err) @@ -9366,7 +9409,7 @@ func (ec *executionContext) _DNS_hcl_id(ctx context.Context, field graphql.Colle }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return obj.HclID, nil + return obj.HCLID, nil }) if err != nil { ec.Error(ctx, err) @@ -9839,7 +9882,7 @@ func (ec *executionContext) _DNSRecord_hcl_id(ctx context.Context, field graphql }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return obj.HclID, nil + return obj.HCLID, nil }) if err != nil { ec.Error(ctx, err) @@ -10471,7 +10514,7 @@ func (ec *executionContext) _Environment_hcl_id(ctx context.Context, field graph }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return obj.HclID, nil + return obj.HCLID, nil }) if err != nil { ec.Error(ctx, err) @@ -11997,7 +12040,7 @@ func (ec *executionContext) _FileDelete_hcl_id(ctx context.Context, field graphq }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return obj.HclID, nil + return obj.HCLID, nil }) if err != nil { ec.Error(ctx, err) @@ -12279,7 +12322,7 @@ func (ec *executionContext) _FileDownload_hcl_id(ctx context.Context, field grap }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return obj.HclID, nil + return obj.HCLID, nil }) if err != nil { ec.Error(ctx, err) @@ -12869,7 +12912,7 @@ func (ec *executionContext) _FileExtract_hcl_id(ctx context.Context, field graph }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return obj.HclID, nil + return obj.HCLID, nil }) if err != nil { ec.Error(ctx, err) @@ -13699,7 +13742,7 @@ func (ec *executionContext) _Host_hcl_id(ctx context.Context, field graphql.Coll }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return obj.HclID, nil + return obj.HCLID, nil }) if err != nil { ec.Error(ctx, err) @@ -14518,7 +14561,7 @@ func (ec *executionContext) _Identity_hcl_id(ctx context.Context, field graphql. }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return obj.HclID, nil + return obj.HCLID, nil }) if err != nil { ec.Error(ctx, err) @@ -15227,7 +15270,7 @@ func (ec *executionContext) fieldContext_Mutation_loadEnvironment(ctx context.Co ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Mutation_loadEnvironment_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -15333,7 +15376,7 @@ func (ec *executionContext) fieldContext_Mutation_createBuild(ctx context.Contex ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Mutation_createBuild_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -15412,7 +15455,7 @@ func (ec *executionContext) fieldContext_Mutation_deleteUser(ctx context.Context ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Mutation_deleteUser_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -15518,7 +15561,7 @@ func (ec *executionContext) fieldContext_Mutation_executePlan(ctx context.Contex ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Mutation_executePlan_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -15597,7 +15640,7 @@ func (ec *executionContext) fieldContext_Mutation_deleteBuild(ctx context.Contex ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Mutation_deleteBuild_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -15676,7 +15719,7 @@ func (ec *executionContext) fieldContext_Mutation_createTask(ctx context.Context ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Mutation_createTask_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -15755,7 +15798,7 @@ func (ec *executionContext) fieldContext_Mutation_dumpBuild(ctx context.Context, ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Mutation_dumpBuild_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -15834,7 +15877,7 @@ func (ec *executionContext) fieldContext_Mutation_rebuild(ctx context.Context, f ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Mutation_rebuild_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -15913,7 +15956,7 @@ func (ec *executionContext) fieldContext_Mutation_approveCommit(ctx context.Cont ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Mutation_approveCommit_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -15992,7 +16035,7 @@ func (ec *executionContext) fieldContext_Mutation_cancelCommit(ctx context.Conte ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Mutation_cancelCommit_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -16087,7 +16130,7 @@ func (ec *executionContext) fieldContext_Mutation_createAgentTasks(ctx context.C ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Mutation_createAgentTasks_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -16182,7 +16225,7 @@ func (ec *executionContext) fieldContext_Mutation_createBatchAgentTasks(ctx cont ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Mutation_createBatchAgentTasks_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -16317,7 +16360,7 @@ func (ec *executionContext) fieldContext_Mutation_createEnviromentFromRepo(ctx c ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Mutation_createEnviromentFromRepo_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -16452,7 +16495,7 @@ func (ec *executionContext) fieldContext_Mutation_updateEnviromentViaPull(ctx co ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Mutation_updateEnviromentViaPull_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -16531,7 +16574,7 @@ func (ec *executionContext) fieldContext_Mutation_cancelBuild(ctx context.Contex ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Mutation_cancelBuild_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -16610,7 +16653,7 @@ func (ec *executionContext) fieldContext_Mutation_modifySelfPassword(ctx context ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Mutation_modifySelfPassword_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -16710,7 +16753,7 @@ func (ec *executionContext) fieldContext_Mutation_modifySelfUserInfo(ctx context ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Mutation_modifySelfUserInfo_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -16810,7 +16853,7 @@ func (ec *executionContext) fieldContext_Mutation_createUser(ctx context.Context ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Mutation_createUser_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -16910,7 +16953,7 @@ func (ec *executionContext) fieldContext_Mutation_modifyAdminUserInfo(ctx contex ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Mutation_modifyAdminUserInfo_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -16989,7 +17032,7 @@ func (ec *executionContext) fieldContext_Mutation_modifyAdminPassword(ctx contex ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Mutation_modifyAdminPassword_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -17126,7 +17169,7 @@ func (ec *executionContext) _Network_hcl_id(ctx context.Context, field graphql.C }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return obj.HclID, nil + return obj.HCLID, nil }) if err != nil { ec.Error(ctx, err) @@ -21077,7 +21120,7 @@ func (ec *executionContext) fieldContext_Query_environment(ctx context.Context, ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Query_environment_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -21171,7 +21214,7 @@ func (ec *executionContext) fieldContext_Query_provisionedHost(ctx context.Conte ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Query_provisionedHost_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -21267,7 +21310,7 @@ func (ec *executionContext) fieldContext_Query_provisionedNetwork(ctx context.Co ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Query_provisionedNetwork_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -21369,7 +21412,7 @@ func (ec *executionContext) fieldContext_Query_provisionedStep(ctx context.Conte ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Query_provisionedStep_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -21473,7 +21516,7 @@ func (ec *executionContext) fieldContext_Query_plan(ctx context.Context, field g ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Query_plan_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -21674,7 +21717,7 @@ func (ec *executionContext) fieldContext_Query_build(ctx context.Context, field ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Query_build_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -21768,7 +21811,7 @@ func (ec *executionContext) fieldContext_Query_getBuildCommits(ctx context.Conte ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Query_getBuildCommits_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -21862,7 +21905,7 @@ func (ec *executionContext) fieldContext_Query_getBuildCommit(ctx context.Contex ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Query_getBuildCommit_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -21956,7 +21999,7 @@ func (ec *executionContext) fieldContext_Query_status(ctx context.Context, field ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Query_status_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -22062,7 +22105,7 @@ func (ec *executionContext) fieldContext_Query_agentStatus(ctx context.Context, ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Query_agentStatus_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -22510,7 +22553,7 @@ func (ec *executionContext) fieldContext_Query_getAgentTasks(ctx context.Context ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Query_getAgentTasks_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -22616,7 +22659,7 @@ func (ec *executionContext) fieldContext_Query_listAgentStatuses(ctx context.Con ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Query_listAgentStatuses_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -22710,7 +22753,7 @@ func (ec *executionContext) fieldContext_Query_listBuildStatuses(ctx context.Con ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Query_listBuildStatuses_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -22792,7 +22835,7 @@ func (ec *executionContext) fieldContext_Query_getAllAgentStatus(ctx context.Con ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Query_getAllAgentStatus_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -22874,7 +22917,7 @@ func (ec *executionContext) fieldContext_Query_getAllPlanStatus(ctx context.Cont ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Query_getAllPlanStatus_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -22981,7 +23024,7 @@ func (ec *executionContext) fieldContext_Query_getPlanStatusCounts(ctx context.C ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Query_getPlanStatusCounts_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -23060,7 +23103,7 @@ func (ec *executionContext) fieldContext_Query_viewServerTaskLogs(ctx context.Co ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Query_viewServerTaskLogs_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -23155,7 +23198,7 @@ func (ec *executionContext) fieldContext_Query_viewAgentTask(ctx context.Context ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Query_viewAgentTask_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -23255,7 +23298,7 @@ func (ec *executionContext) fieldContext_Query_serverTasks(ctx context.Context, ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Query_serverTasks_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -23329,7 +23372,7 @@ func (ec *executionContext) fieldContext_Query___type(ctx context.Context, field ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Query___type_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -24141,7 +24184,7 @@ func (ec *executionContext) _Script_hcl_id(ctx context.Context, field graphql.Co }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return obj.HclID, nil + return obj.HCLID, nil }) if err != nil { ec.Error(ctx, err) @@ -26697,7 +26740,7 @@ func (ec *executionContext) fieldContext_Subscription_streamServerTaskLog(ctx co ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Subscription_streamServerTaskLog_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -28659,7 +28702,7 @@ func (ec *executionContext) fieldContext___Type_fields(ctx context.Context, fiel ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field___Type_fields_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -28847,7 +28890,7 @@ func (ec *executionContext) fieldContext___Type_enumValues(ctx context.Context, ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field___Type_enumValues_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -29375,109 +29418,94 @@ var agentStatusImplementors = []string{"AgentStatus"} func (ec *executionContext) _AgentStatus(ctx context.Context, sel ast.SelectionSet, obj *ent.AgentStatus) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, agentStatusImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("AgentStatus") case "clientId": - out.Values[i] = ec._AgentStatus_clientId(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "hostname": - out.Values[i] = ec._AgentStatus_hostname(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "upTime": - out.Values[i] = ec._AgentStatus_upTime(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "bootTime": - out.Values[i] = ec._AgentStatus_bootTime(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "numProcs": - out.Values[i] = ec._AgentStatus_numProcs(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "OS": - out.Values[i] = ec._AgentStatus_OS(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "hostID": - out.Values[i] = ec._AgentStatus_hostID(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "load1": - out.Values[i] = ec._AgentStatus_load1(ctx, field, obj) - case "load5": - out.Values[i] = ec._AgentStatus_load5(ctx, field, obj) - case "load15": - out.Values[i] = ec._AgentStatus_load15(ctx, field, obj) - case "totalMem": - out.Values[i] = ec._AgentStatus_totalMem(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "freeMem": - out.Values[i] = ec._AgentStatus_freeMem(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "usedMem": - out.Values[i] = ec._AgentStatus_usedMem(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "timestamp": - out.Values[i] = ec._AgentStatus_timestamp(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -29485,34 +29513,43 @@ var agentStatusBatchImplementors = []string{"AgentStatusBatch"} func (ec *executionContext) _AgentStatusBatch(ctx context.Context, sel ast.SelectionSet, obj *model.AgentStatusBatch) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, agentStatusBatchImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("AgentStatusBatch") case "agentStatuses": - out.Values[i] = ec._AgentStatusBatch_agentStatuses(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "pageInfo": - out.Values[i] = ec._AgentStatusBatch_pageInfo(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -29520,8 +29557,9 @@ var agentTaskImplementors = []string{"AgentTask"} func (ec *executionContext) _AgentTask(ctx context.Context, sel ast.SelectionSet, obj *ent.AgentTask) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, agentTaskImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": @@ -29529,7 +29567,7 @@ func (ec *executionContext) _AgentTask(ctx context.Context, sel ast.SelectionSet case "id": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -29537,23 +29575,37 @@ func (ec *executionContext) _AgentTask(ctx context.Context, sel ast.SelectionSet }() res = ec._AgentTask_id(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) - case "args": + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + case "args": out.Values[i] = ec._AgentTask_args(ctx, field, obj) - case "command": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -29561,30 +29613,42 @@ func (ec *executionContext) _AgentTask(ctx context.Context, sel ast.SelectionSet }() res = ec._AgentTask_command(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) - case "number": + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + case "number": out.Values[i] = ec._AgentTask_number(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "output": - out.Values[i] = ec._AgentTask_output(ctx, field, obj) - case "state": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -29592,27 +29656,53 @@ func (ec *executionContext) _AgentTask(ctx context.Context, sel ast.SelectionSet }() res = ec._AgentTask_state(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) - case "error_message": + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + case "error_message": out.Values[i] = ec._AgentTask_error_message(ctx, field, obj) - default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -29620,8 +29710,9 @@ var authUserImplementors = []string{"AuthUser"} func (ec *executionContext) _AuthUser(ctx context.Context, sel ast.SelectionSet, obj *ent.AuthUser) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, authUserImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": @@ -29629,7 +29720,7 @@ func (ec *executionContext) _AuthUser(ctx context.Context, sel ast.SelectionSet, case "id": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -29637,26 +29728,40 @@ func (ec *executionContext) _AuthUser(ctx context.Context, sel ast.SelectionSet, }() res = ec._AuthUser_id(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) - case "username": + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + case "username": out.Values[i] = ec._AuthUser_username(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "role": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -29664,19 +29769,35 @@ func (ec *executionContext) _AuthUser(ctx context.Context, sel ast.SelectionSet, }() res = ec._AuthUser_role(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "provider": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -29684,61 +29805,65 @@ func (ec *executionContext) _AuthUser(ctx context.Context, sel ast.SelectionSet, }() res = ec._AuthUser_provider(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) - case "first_name": + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + case "first_name": out.Values[i] = ec._AuthUser_first_name(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "last_name": - out.Values[i] = ec._AuthUser_last_name(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "email": - out.Values[i] = ec._AuthUser_email(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "phone": - out.Values[i] = ec._AuthUser_phone(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "company": - out.Values[i] = ec._AuthUser_company(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "occupation": - out.Values[i] = ec._AuthUser_occupation(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "publicKey": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -29746,23 +29871,51 @@ func (ec *executionContext) _AuthUser(ctx context.Context, sel ast.SelectionSet, }() res = ec._AuthUser_publicKey(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -29770,8 +29923,9 @@ var buildImplementors = []string{"Build"} func (ec *executionContext) _Build(ctx context.Context, sel ast.SelectionSet, obj *ent.Build) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, buildImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": @@ -29779,7 +29933,7 @@ func (ec *executionContext) _Build(ctx context.Context, sel ast.SelectionSet, ob case "id": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -29787,40 +29941,50 @@ func (ec *executionContext) _Build(ctx context.Context, sel ast.SelectionSet, ob }() res = ec._Build_id(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) - case "revision": + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + case "revision": out.Values[i] = ec._Build_revision(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "environment_revision": - out.Values[i] = ec._Build_environment_revision(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "completed_plan": - out.Values[i] = ec._Build_completed_plan(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "buildToStatus": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -29828,19 +29992,35 @@ func (ec *executionContext) _Build(ctx context.Context, sel ast.SelectionSet, ob }() res = ec._Build_buildToStatus(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "buildToEnvironment": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -29848,19 +30028,35 @@ func (ec *executionContext) _Build(ctx context.Context, sel ast.SelectionSet, ob }() res = ec._Build_buildToEnvironment(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "buildToCompetition": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -29868,19 +30064,35 @@ func (ec *executionContext) _Build(ctx context.Context, sel ast.SelectionSet, ob }() res = ec._Build_buildToCompetition(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "buildToProvisionedNetwork": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -29888,19 +30100,35 @@ func (ec *executionContext) _Build(ctx context.Context, sel ast.SelectionSet, ob }() res = ec._Build_buildToProvisionedNetwork(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "buildToTeam": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -29908,19 +30136,35 @@ func (ec *executionContext) _Build(ctx context.Context, sel ast.SelectionSet, ob }() res = ec._Build_buildToTeam(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "buildToPlan": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -29928,19 +30172,35 @@ func (ec *executionContext) _Build(ctx context.Context, sel ast.SelectionSet, ob }() res = ec._Build_buildToPlan(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "BuildToLatestBuildCommit": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -29950,14 +30210,30 @@ func (ec *executionContext) _Build(ctx context.Context, sel ast.SelectionSet, ob return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "BuildToBuildCommits": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -29965,19 +30241,35 @@ func (ec *executionContext) _Build(ctx context.Context, sel ast.SelectionSet, ob }() res = ec._Build_BuildToBuildCommits(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "BuildToRepoCommit": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -29985,19 +30277,35 @@ func (ec *executionContext) _Build(ctx context.Context, sel ast.SelectionSet, ob }() res = ec._Build_BuildToRepoCommit(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "BuildToServerTasks": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -30005,23 +30313,51 @@ func (ec *executionContext) _Build(ctx context.Context, sel ast.SelectionSet, ob }() res = ec._Build_BuildToServerTasks(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -30029,8 +30365,9 @@ var buildCommitImplementors = []string{"BuildCommit"} func (ec *executionContext) _BuildCommit(ctx context.Context, sel ast.SelectionSet, obj *ent.BuildCommit) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, buildCommitImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": @@ -30038,7 +30375,7 @@ func (ec *executionContext) _BuildCommit(ctx context.Context, sel ast.SelectionS case "id": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -30046,19 +30383,35 @@ func (ec *executionContext) _BuildCommit(ctx context.Context, sel ast.SelectionS }() res = ec._BuildCommit_id(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "type": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -30066,26 +30419,40 @@ func (ec *executionContext) _BuildCommit(ctx context.Context, sel ast.SelectionS }() res = ec._BuildCommit_type(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) - case "revision": + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + case "revision": out.Values[i] = ec._BuildCommit_revision(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "state": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -30093,26 +30460,40 @@ func (ec *executionContext) _BuildCommit(ctx context.Context, sel ast.SelectionS }() res = ec._BuildCommit_state(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) - case "createdAt": + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + case "createdAt": out.Values[i] = ec._BuildCommit_createdAt(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "BuildCommitToBuild": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -30120,19 +30501,35 @@ func (ec *executionContext) _BuildCommit(ctx context.Context, sel ast.SelectionS }() res = ec._BuildCommit_BuildCommitToBuild(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "BuildCommitToPlanDiffs": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -30140,19 +30537,35 @@ func (ec *executionContext) _BuildCommit(ctx context.Context, sel ast.SelectionS }() res = ec._BuildCommit_BuildCommitToPlanDiffs(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "BuildCommitToServerTask": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -30160,23 +30573,51 @@ func (ec *executionContext) _BuildCommit(ctx context.Context, sel ast.SelectionS }() res = ec._BuildCommit_BuildCommitToServerTask(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -30184,8 +30625,9 @@ var commandImplementors = []string{"Command"} func (ec *executionContext) _Command(ctx context.Context, sel ast.SelectionSet, obj *ent.Command) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, commandImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": @@ -30193,7 +30635,7 @@ func (ec *executionContext) _Command(ctx context.Context, sel ast.SelectionSet, case "id": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -30201,82 +30643,80 @@ func (ec *executionContext) _Command(ctx context.Context, sel ast.SelectionSet, }() res = ec._Command_id(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) - case "hcl_id": + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + case "hcl_id": out.Values[i] = ec._Command_hcl_id(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "name": - out.Values[i] = ec._Command_name(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "description": - out.Values[i] = ec._Command_description(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "program": - out.Values[i] = ec._Command_program(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "args": - out.Values[i] = ec._Command_args(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "ignoreErrors": - out.Values[i] = ec._Command_ignoreErrors(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "disabled": - out.Values[i] = ec._Command_disabled(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "cooldown": - out.Values[i] = ec._Command_cooldown(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "timeout": - out.Values[i] = ec._Command_timeout(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "vars": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -30286,14 +30726,30 @@ func (ec *executionContext) _Command(ctx context.Context, sel ast.SelectionSet, return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) + + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "tags": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -30303,14 +30759,30 @@ func (ec *executionContext) _Command(ctx context.Context, sel ast.SelectionSet, return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "CommandToEnvironment": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -30318,23 +30790,51 @@ func (ec *executionContext) _Command(ctx context.Context, sel ast.SelectionSet, }() res = ec._Command_CommandToEnvironment(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -30342,8 +30842,9 @@ var competitionImplementors = []string{"Competition"} func (ec *executionContext) _Competition(ctx context.Context, sel ast.SelectionSet, obj *ent.Competition) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, competitionImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": @@ -30351,7 +30852,7 @@ func (ec *executionContext) _Competition(ctx context.Context, sel ast.SelectionS case "id": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -30359,33 +30860,45 @@ func (ec *executionContext) _Competition(ctx context.Context, sel ast.SelectionS }() res = ec._Competition_id(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) + + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "hcl_id": - out.Values[i] = ec._Competition_hcl_id(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "root_password": - out.Values[i] = ec._Competition_root_password(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "config": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -30395,14 +30908,30 @@ func (ec *executionContext) _Competition(ctx context.Context, sel ast.SelectionS return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "tags": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -30412,14 +30941,30 @@ func (ec *executionContext) _Competition(ctx context.Context, sel ast.SelectionS return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "competitionToDNS": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -30427,19 +30972,35 @@ func (ec *executionContext) _Competition(ctx context.Context, sel ast.SelectionS }() res = ec._Competition_competitionToDNS(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) + + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "CompetitionToEnvironment": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -30447,19 +31008,35 @@ func (ec *executionContext) _Competition(ctx context.Context, sel ast.SelectionS }() res = ec._Competition_CompetitionToEnvironment(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "CompetitionToBuild": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -30467,23 +31044,51 @@ func (ec *executionContext) _Competition(ctx context.Context, sel ast.SelectionS }() res = ec._Competition_CompetitionToBuild(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) + + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -30491,8 +31096,9 @@ var dNSImplementors = []string{"DNS"} func (ec *executionContext) _DNS(ctx context.Context, sel ast.SelectionSet, obj *ent.DNS) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, dNSImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": @@ -30500,7 +31106,7 @@ func (ec *executionContext) _DNS(ctx context.Context, sel ast.SelectionSet, obj case "id": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -30508,54 +31114,60 @@ func (ec *executionContext) _DNS(ctx context.Context, sel ast.SelectionSet, obj }() res = ec._DNS_id(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) - case "hcl_id": + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + case "hcl_id": out.Values[i] = ec._DNS_hcl_id(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "type": - out.Values[i] = ec._DNS_type(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "root_domain": - out.Values[i] = ec._DNS_root_domain(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "dns_servers": - out.Values[i] = ec._DNS_dns_servers(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "ntp_servers": - out.Values[i] = ec._DNS_ntp_servers(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "config": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -30565,14 +31177,30 @@ func (ec *executionContext) _DNS(ctx context.Context, sel ast.SelectionSet, obj return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "DNSToEnvironment": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -30580,19 +31208,35 @@ func (ec *executionContext) _DNS(ctx context.Context, sel ast.SelectionSet, obj }() res = ec._DNS_DNSToEnvironment(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "DNSToCompetition": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -30600,23 +31244,51 @@ func (ec *executionContext) _DNS(ctx context.Context, sel ast.SelectionSet, obj }() res = ec._DNS_DNSToCompetition(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -30624,8 +31296,9 @@ var dNSRecordImplementors = []string{"DNSRecord"} func (ec *executionContext) _DNSRecord(ctx context.Context, sel ast.SelectionSet, obj *ent.DNSRecord) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, dNSRecordImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": @@ -30633,7 +31306,7 @@ func (ec *executionContext) _DNSRecord(ctx context.Context, sel ast.SelectionSet case "id": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -30641,54 +31314,60 @@ func (ec *executionContext) _DNSRecord(ctx context.Context, sel ast.SelectionSet }() res = ec._DNSRecord_id(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) - case "hcl_id": + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + case "hcl_id": out.Values[i] = ec._DNSRecord_hcl_id(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "name": - out.Values[i] = ec._DNSRecord_name(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "values": - out.Values[i] = ec._DNSRecord_values(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "type": - out.Values[i] = ec._DNSRecord_type(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "zone": - out.Values[i] = ec._DNSRecord_zone(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "vars": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -30696,26 +31375,40 @@ func (ec *executionContext) _DNSRecord(ctx context.Context, sel ast.SelectionSet }() res = ec._DNSRecord_vars(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) - case "disabled": + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + case "disabled": out.Values[i] = ec._DNSRecord_disabled(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "tags": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -30723,19 +31416,35 @@ func (ec *executionContext) _DNSRecord(ctx context.Context, sel ast.SelectionSet }() res = ec._DNSRecord_tags(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "DNSRecordToEnvironment": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -30743,23 +31452,51 @@ func (ec *executionContext) _DNSRecord(ctx context.Context, sel ast.SelectionSet }() res = ec._DNSRecord_DNSRecordToEnvironment(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -30767,23 +31504,22 @@ var diskImplementors = []string{"Disk"} func (ec *executionContext) _Disk(ctx context.Context, sel ast.SelectionSet, obj *ent.Disk) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, diskImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("Disk") case "size": - out.Values[i] = ec._Disk_size(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "DiskToHost": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -30791,23 +31527,51 @@ func (ec *executionContext) _Disk(ctx context.Context, sel ast.SelectionSet, obj }() res = ec._Disk_DiskToHost(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -30815,8 +31579,9 @@ var environmentImplementors = []string{"Environment"} func (ec *executionContext) _Environment(ctx context.Context, sel ast.SelectionSet, obj *ent.Environment) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, environmentImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": @@ -30824,7 +31589,7 @@ func (ec *executionContext) _Environment(ctx context.Context, sel ast.SelectionS case "id": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -30832,82 +31597,80 @@ func (ec *executionContext) _Environment(ctx context.Context, sel ast.SelectionS }() res = ec._Environment_id(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) - case "hcl_id": + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + case "hcl_id": out.Values[i] = ec._Environment_hcl_id(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "competition_id": - out.Values[i] = ec._Environment_competition_id(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "name": - out.Values[i] = ec._Environment_name(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "description": - out.Values[i] = ec._Environment_description(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "builder": - out.Values[i] = ec._Environment_builder(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "team_count": - out.Values[i] = ec._Environment_team_count(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "revision": - out.Values[i] = ec._Environment_revision(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "admin_cidrs": - out.Values[i] = ec._Environment_admin_cidrs(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "exposed_vdi_ports": - out.Values[i] = ec._Environment_exposed_vdi_ports(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "config": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -30917,14 +31680,30 @@ func (ec *executionContext) _Environment(ctx context.Context, sel ast.SelectionS return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "tags": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -30934,14 +31713,30 @@ func (ec *executionContext) _Environment(ctx context.Context, sel ast.SelectionS return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "EnvironmentToUser": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -30949,19 +31744,35 @@ func (ec *executionContext) _Environment(ctx context.Context, sel ast.SelectionS }() res = ec._Environment_EnvironmentToUser(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) + + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "EnvironmentToHost": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -30969,19 +31780,35 @@ func (ec *executionContext) _Environment(ctx context.Context, sel ast.SelectionS }() res = ec._Environment_EnvironmentToHost(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "EnvironmentToCompetition": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -30989,19 +31816,35 @@ func (ec *executionContext) _Environment(ctx context.Context, sel ast.SelectionS }() res = ec._Environment_EnvironmentToCompetition(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) + + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "EnvironmentToIdentity": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -31009,19 +31852,35 @@ func (ec *executionContext) _Environment(ctx context.Context, sel ast.SelectionS }() res = ec._Environment_EnvironmentToIdentity(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "EnvironmentToCommand": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -31029,19 +31888,35 @@ func (ec *executionContext) _Environment(ctx context.Context, sel ast.SelectionS }() res = ec._Environment_EnvironmentToCommand(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "EnvironmentToScript": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -31049,19 +31924,35 @@ func (ec *executionContext) _Environment(ctx context.Context, sel ast.SelectionS }() res = ec._Environment_EnvironmentToScript(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "EnvironmentToFileDownload": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -31069,19 +31960,35 @@ func (ec *executionContext) _Environment(ctx context.Context, sel ast.SelectionS }() res = ec._Environment_EnvironmentToFileDownload(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "EnvironmentToFileDelete": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -31089,19 +31996,35 @@ func (ec *executionContext) _Environment(ctx context.Context, sel ast.SelectionS }() res = ec._Environment_EnvironmentToFileDelete(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "EnvironmentToFileExtract": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -31109,19 +32032,35 @@ func (ec *executionContext) _Environment(ctx context.Context, sel ast.SelectionS }() res = ec._Environment_EnvironmentToFileExtract(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "EnvironmentToDNSRecord": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -31129,19 +32068,35 @@ func (ec *executionContext) _Environment(ctx context.Context, sel ast.SelectionS }() res = ec._Environment_EnvironmentToDNSRecord(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "EnvironmentToDNS": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -31149,19 +32104,35 @@ func (ec *executionContext) _Environment(ctx context.Context, sel ast.SelectionS }() res = ec._Environment_EnvironmentToDNS(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "EnvironmentToNetwork": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -31169,19 +32140,35 @@ func (ec *executionContext) _Environment(ctx context.Context, sel ast.SelectionS }() res = ec._Environment_EnvironmentToNetwork(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "EnvironmentToBuild": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -31189,19 +32176,35 @@ func (ec *executionContext) _Environment(ctx context.Context, sel ast.SelectionS }() res = ec._Environment_EnvironmentToBuild(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "EnvironmentToRepository": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -31209,19 +32212,35 @@ func (ec *executionContext) _Environment(ctx context.Context, sel ast.SelectionS }() res = ec._Environment_EnvironmentToRepository(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "EnvironmentToServerTask": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -31229,23 +32248,51 @@ func (ec *executionContext) _Environment(ctx context.Context, sel ast.SelectionS }() res = ec._Environment_EnvironmentToServerTask(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -31253,8 +32300,9 @@ var fileDeleteImplementors = []string{"FileDelete"} func (ec *executionContext) _FileDelete(ctx context.Context, sel ast.SelectionSet, obj *ent.FileDelete) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, fileDeleteImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": @@ -31262,7 +32310,7 @@ func (ec *executionContext) _FileDelete(ctx context.Context, sel ast.SelectionSe case "id": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -31270,33 +32318,45 @@ func (ec *executionContext) _FileDelete(ctx context.Context, sel ast.SelectionSe }() res = ec._FileDelete_id(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) - case "hcl_id": + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + case "hcl_id": out.Values[i] = ec._FileDelete_hcl_id(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "path": - out.Values[i] = ec._FileDelete_path(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "tags": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -31304,19 +32364,35 @@ func (ec *executionContext) _FileDelete(ctx context.Context, sel ast.SelectionSe }() res = ec._FileDelete_tags(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "FileDeleteToEnvironment": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -31324,23 +32400,51 @@ func (ec *executionContext) _FileDelete(ctx context.Context, sel ast.SelectionSe }() res = ec._FileDelete_FileDeleteToEnvironment(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -31348,8 +32452,9 @@ var fileDownloadImplementors = []string{"FileDownload"} func (ec *executionContext) _FileDownload(ctx context.Context, sel ast.SelectionSet, obj *ent.FileDownload) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, fileDownloadImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": @@ -31357,7 +32462,7 @@ func (ec *executionContext) _FileDownload(ctx context.Context, sel ast.Selection case "id": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -31365,82 +32470,80 @@ func (ec *executionContext) _FileDownload(ctx context.Context, sel ast.Selection }() res = ec._FileDownload_id(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) - case "hcl_id": + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + case "hcl_id": out.Values[i] = ec._FileDownload_hcl_id(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "sourceType": - out.Values[i] = ec._FileDownload_sourceType(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "source": - out.Values[i] = ec._FileDownload_source(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "destination": - out.Values[i] = ec._FileDownload_destination(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "template": - out.Values[i] = ec._FileDownload_template(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "perms": - out.Values[i] = ec._FileDownload_perms(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "disabled": - out.Values[i] = ec._FileDownload_disabled(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "md5": - out.Values[i] = ec._FileDownload_md5(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "absPath": - out.Values[i] = ec._FileDownload_absPath(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "tags": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -31448,19 +32551,35 @@ func (ec *executionContext) _FileDownload(ctx context.Context, sel ast.Selection }() res = ec._FileDownload_tags(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) + + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "FileDownloadToEnvironment": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -31468,23 +32587,51 @@ func (ec *executionContext) _FileDownload(ctx context.Context, sel ast.Selection }() res = ec._FileDownload_FileDownloadToEnvironment(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -31492,8 +32639,9 @@ var fileExtractImplementors = []string{"FileExtract"} func (ec *executionContext) _FileExtract(ctx context.Context, sel ast.SelectionSet, obj *ent.FileExtract) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, fileExtractImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": @@ -31501,7 +32649,7 @@ func (ec *executionContext) _FileExtract(ctx context.Context, sel ast.SelectionS case "id": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -31509,47 +32657,55 @@ func (ec *executionContext) _FileExtract(ctx context.Context, sel ast.SelectionS }() res = ec._FileExtract_id(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) - case "hcl_id": + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + case "hcl_id": out.Values[i] = ec._FileExtract_hcl_id(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "source": - out.Values[i] = ec._FileExtract_source(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "destination": - out.Values[i] = ec._FileExtract_destination(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "type": - out.Values[i] = ec._FileExtract_type(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "tags": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -31557,19 +32713,35 @@ func (ec *executionContext) _FileExtract(ctx context.Context, sel ast.SelectionS }() res = ec._FileExtract_tags(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "FileExtractToEnvironment": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -31577,23 +32749,51 @@ func (ec *executionContext) _FileExtract(ctx context.Context, sel ast.SelectionS }() res = ec._FileExtract_FileExtractToEnvironment(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -31601,30 +32801,27 @@ var findingImplementors = []string{"Finding"} func (ec *executionContext) _Finding(ctx context.Context, sel ast.SelectionSet, obj *ent.Finding) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, findingImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("Finding") case "name": - out.Values[i] = ec._Finding_name(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "description": - out.Values[i] = ec._Finding_description(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "severity": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -31632,19 +32829,35 @@ func (ec *executionContext) _Finding(ctx context.Context, sel ast.SelectionSet, }() res = ec._Finding_severity(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "difficulty": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -31652,19 +32865,35 @@ func (ec *executionContext) _Finding(ctx context.Context, sel ast.SelectionSet, }() res = ec._Finding_difficulty(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "tags": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -31672,19 +32901,35 @@ func (ec *executionContext) _Finding(ctx context.Context, sel ast.SelectionSet, }() res = ec._Finding_tags(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "FindingToUser": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -31692,19 +32937,35 @@ func (ec *executionContext) _Finding(ctx context.Context, sel ast.SelectionSet, }() res = ec._Finding_FindingToUser(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "FindingToScript": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -31712,19 +32973,35 @@ func (ec *executionContext) _Finding(ctx context.Context, sel ast.SelectionSet, }() res = ec._Finding_FindingToScript(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "FindingToEnvironment": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -31732,23 +33009,51 @@ func (ec *executionContext) _Finding(ctx context.Context, sel ast.SelectionSet, }() res = ec._Finding_FindingToEnvironment(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -31756,8 +33061,9 @@ var hostImplementors = []string{"Host"} func (ec *executionContext) _Host(ctx context.Context, sel ast.SelectionSet, obj *ent.Host) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, hostImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": @@ -31765,7 +33071,7 @@ func (ec *executionContext) _Host(ctx context.Context, sel ast.SelectionSet, obj case "id": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -31773,89 +33079,85 @@ func (ec *executionContext) _Host(ctx context.Context, sel ast.SelectionSet, obj }() res = ec._Host_id(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) - case "hcl_id": + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + case "hcl_id": out.Values[i] = ec._Host_hcl_id(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "hostname": - out.Values[i] = ec._Host_hostname(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "description": - out.Values[i] = ec._Host_description(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "OS": - out.Values[i] = ec._Host_OS(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "last_octet": - out.Values[i] = ec._Host_last_octet(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "instance_size": - out.Values[i] = ec._Host_instance_size(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "allow_mac_changes": - out.Values[i] = ec._Host_allow_mac_changes(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "exposed_tcp_ports": - out.Values[i] = ec._Host_exposed_tcp_ports(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "exposed_udp_ports": - out.Values[i] = ec._Host_exposed_udp_ports(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "override_password": - out.Values[i] = ec._Host_override_password(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "vars": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -31865,28 +33167,40 @@ func (ec *executionContext) _Host(ctx context.Context, sel ast.SelectionSet, obj return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) - case "user_groups": + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + case "user_groups": out.Values[i] = ec._Host_user_groups(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "provision_steps": - out.Values[i] = ec._Host_provision_steps(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "tags": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -31894,19 +33208,35 @@ func (ec *executionContext) _Host(ctx context.Context, sel ast.SelectionSet, obj }() res = ec._Host_tags(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "HostToDisk": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -31914,19 +33244,35 @@ func (ec *executionContext) _Host(ctx context.Context, sel ast.SelectionSet, obj }() res = ec._Host_HostToDisk(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "HostToEnvironment": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -31934,23 +33280,51 @@ func (ec *executionContext) _Host(ctx context.Context, sel ast.SelectionSet, obj }() res = ec._Host_HostToEnvironment(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -31958,8 +33332,9 @@ var identityImplementors = []string{"Identity"} func (ec *executionContext) _Identity(ctx context.Context, sel ast.SelectionSet, obj *ent.Identity) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, identityImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": @@ -31967,7 +33342,7 @@ func (ec *executionContext) _Identity(ctx context.Context, sel ast.SelectionSet, case "id": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -31975,68 +33350,70 @@ func (ec *executionContext) _Identity(ctx context.Context, sel ast.SelectionSet, }() res = ec._Identity_id(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) - case "hcl_id": + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + case "hcl_id": out.Values[i] = ec._Identity_hcl_id(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "first_name": - out.Values[i] = ec._Identity_first_name(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "last_name": - out.Values[i] = ec._Identity_last_name(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "email": - out.Values[i] = ec._Identity_email(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "password": - out.Values[i] = ec._Identity_password(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "description": - out.Values[i] = ec._Identity_description(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "avatar_file": - out.Values[i] = ec._Identity_avatar_file(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "vars": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -32044,19 +33421,35 @@ func (ec *executionContext) _Identity(ctx context.Context, sel ast.SelectionSet, }() res = ec._Identity_vars(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) + + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "tags": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -32064,19 +33457,35 @@ func (ec *executionContext) _Identity(ctx context.Context, sel ast.SelectionSet, }() res = ec._Identity_tags(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "IdentityToEnvironment": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -32084,23 +33493,51 @@ func (ec *executionContext) _Identity(ctx context.Context, sel ast.SelectionSet, }() res = ec._Identity_IdentityToEnvironment(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) + + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -32108,34 +33545,43 @@ var laForgePageInfoImplementors = []string{"LaForgePageInfo"} func (ec *executionContext) _LaForgePageInfo(ctx context.Context, sel ast.SelectionSet, obj *model.LaForgePageInfo) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, laForgePageInfoImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("LaForgePageInfo") case "total": - out.Values[i] = ec._LaForgePageInfo_total(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "nextOffset": - out.Values[i] = ec._LaForgePageInfo_nextOffset(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -32148,7 +33594,7 @@ func (ec *executionContext) _Mutation(ctx context.Context, sel ast.SelectionSet) }) out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { innerCtx := graphql.WithRootFieldContext(ctx, &graphql.RootFieldContext{ Object: field.Name, @@ -32159,184 +33605,154 @@ func (ec *executionContext) _Mutation(ctx context.Context, sel ast.SelectionSet) case "__typename": out.Values[i] = graphql.MarshalString("Mutation") case "loadEnvironment": - out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) { return ec._Mutation_loadEnvironment(ctx, field) }) - case "createBuild": - out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) { return ec._Mutation_createBuild(ctx, field) }) - case "deleteUser": - out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) { return ec._Mutation_deleteUser(ctx, field) }) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "executePlan": - out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) { return ec._Mutation_executePlan(ctx, field) }) - case "deleteBuild": - out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) { return ec._Mutation_deleteBuild(ctx, field) }) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "createTask": - out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) { return ec._Mutation_createTask(ctx, field) }) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "dumpBuild": - out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) { return ec._Mutation_dumpBuild(ctx, field) }) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "rebuild": - out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) { return ec._Mutation_rebuild(ctx, field) }) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "approveCommit": - out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) { return ec._Mutation_approveCommit(ctx, field) }) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "cancelCommit": - out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) { return ec._Mutation_cancelCommit(ctx, field) }) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "createAgentTasks": - out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) { return ec._Mutation_createAgentTasks(ctx, field) }) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "createBatchAgentTasks": - out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) { return ec._Mutation_createBatchAgentTasks(ctx, field) }) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "createEnviromentFromRepo": - out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) { return ec._Mutation_createEnviromentFromRepo(ctx, field) }) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "updateEnviromentViaPull": - out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) { return ec._Mutation_updateEnviromentViaPull(ctx, field) }) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "cancelBuild": - out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) { return ec._Mutation_cancelBuild(ctx, field) }) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "modifySelfPassword": - out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) { return ec._Mutation_modifySelfPassword(ctx, field) }) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "modifySelfUserInfo": - out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) { return ec._Mutation_modifySelfUserInfo(ctx, field) }) - case "createUser": - out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) { return ec._Mutation_createUser(ctx, field) }) - case "modifyAdminUserInfo": - out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) { return ec._Mutation_modifyAdminUserInfo(ctx, field) }) - case "modifyAdminPassword": - out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) { return ec._Mutation_modifyAdminPassword(ctx, field) }) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "nukeBackend": - out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) { return ec._Mutation_nukeBackend(ctx, field) }) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -32344,8 +33760,9 @@ var networkImplementors = []string{"Network"} func (ec *executionContext) _Network(ctx context.Context, sel ast.SelectionSet, obj *ent.Network) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, networkImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": @@ -32353,7 +33770,7 @@ func (ec *executionContext) _Network(ctx context.Context, sel ast.SelectionSet, case "id": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -32361,47 +33778,55 @@ func (ec *executionContext) _Network(ctx context.Context, sel ast.SelectionSet, }() res = ec._Network_id(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) - case "hcl_id": + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + case "hcl_id": out.Values[i] = ec._Network_hcl_id(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "name": - out.Values[i] = ec._Network_name(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "cidr": - out.Values[i] = ec._Network_cidr(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "vdi_visible": - out.Values[i] = ec._Network_vdi_visible(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "vars": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -32411,14 +33836,30 @@ func (ec *executionContext) _Network(ctx context.Context, sel ast.SelectionSet, return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "tags": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -32426,19 +33867,35 @@ func (ec *executionContext) _Network(ctx context.Context, sel ast.SelectionSet, }() res = ec._Network_tags(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) + + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "NetworkToEnvironment": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -32446,23 +33903,51 @@ func (ec *executionContext) _Network(ctx context.Context, sel ast.SelectionSet, }() res = ec._Network_NetworkToEnvironment(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -32470,8 +33955,9 @@ var planImplementors = []string{"Plan"} func (ec *executionContext) _Plan(ctx context.Context, sel ast.SelectionSet, obj *ent.Plan) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, planImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": @@ -32479,7 +33965,7 @@ func (ec *executionContext) _Plan(ctx context.Context, sel ast.SelectionSet, obj case "id": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -32487,26 +33973,40 @@ func (ec *executionContext) _Plan(ctx context.Context, sel ast.SelectionSet, obj }() res = ec._Plan_id(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) - case "step_number": + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + case "step_number": out.Values[i] = ec._Plan_step_number(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "type": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -32514,26 +34014,40 @@ func (ec *executionContext) _Plan(ctx context.Context, sel ast.SelectionSet, obj }() res = ec._Plan_type(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) - case "build_id": + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + case "build_id": out.Values[i] = ec._Plan_build_id(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "NextPlan": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -32541,19 +34055,35 @@ func (ec *executionContext) _Plan(ctx context.Context, sel ast.SelectionSet, obj }() res = ec._Plan_NextPlan(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "PrevPlan": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -32561,19 +34091,35 @@ func (ec *executionContext) _Plan(ctx context.Context, sel ast.SelectionSet, obj }() res = ec._Plan_PrevPlan(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "PlanToBuild": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -32581,19 +34127,35 @@ func (ec *executionContext) _Plan(ctx context.Context, sel ast.SelectionSet, obj }() res = ec._Plan_PlanToBuild(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "PlanToTeam": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -32601,19 +34163,35 @@ func (ec *executionContext) _Plan(ctx context.Context, sel ast.SelectionSet, obj }() res = ec._Plan_PlanToTeam(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "PlanToProvisionedNetwork": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -32621,19 +34199,35 @@ func (ec *executionContext) _Plan(ctx context.Context, sel ast.SelectionSet, obj }() res = ec._Plan_PlanToProvisionedNetwork(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "PlanToProvisionedHost": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -32641,19 +34235,35 @@ func (ec *executionContext) _Plan(ctx context.Context, sel ast.SelectionSet, obj }() res = ec._Plan_PlanToProvisionedHost(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "PlanToProvisioningStep": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -32661,19 +34271,35 @@ func (ec *executionContext) _Plan(ctx context.Context, sel ast.SelectionSet, obj }() res = ec._Plan_PlanToProvisioningStep(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "PlanToStatus": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -32681,19 +34307,35 @@ func (ec *executionContext) _Plan(ctx context.Context, sel ast.SelectionSet, obj }() res = ec._Plan_PlanToStatus(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "PlanToPlanDiffs": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -32701,23 +34343,51 @@ func (ec *executionContext) _Plan(ctx context.Context, sel ast.SelectionSet, obj }() res = ec._Plan_PlanToPlanDiffs(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) + + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -32725,111 +34395,98 @@ var planCountsImplementors = []string{"PlanCounts"} func (ec *executionContext) _PlanCounts(ctx context.Context, sel ast.SelectionSet, obj *model.PlanCounts) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, planCountsImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("PlanCounts") case "planning": - out.Values[i] = ec._PlanCounts_planning(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "awaiting": - out.Values[i] = ec._PlanCounts_awaiting(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "parentAwaiting": - out.Values[i] = ec._PlanCounts_parentAwaiting(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "inProgress": - out.Values[i] = ec._PlanCounts_inProgress(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "failed": - out.Values[i] = ec._PlanCounts_failed(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "complete": - out.Values[i] = ec._PlanCounts_complete(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "tainted": - out.Values[i] = ec._PlanCounts_tainted(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "undefined": - out.Values[i] = ec._PlanCounts_undefined(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "toDelete": - out.Values[i] = ec._PlanCounts_toDelete(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "deleteInProgress": - out.Values[i] = ec._PlanCounts_deleteInProgress(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "deleted": - out.Values[i] = ec._PlanCounts_deleted(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "toRebuild": - out.Values[i] = ec._PlanCounts_toRebuild(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "cancelled": - out.Values[i] = ec._PlanCounts_cancelled(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -32837,8 +34494,9 @@ var planDiffImplementors = []string{"PlanDiff"} func (ec *executionContext) _PlanDiff(ctx context.Context, sel ast.SelectionSet, obj *ent.PlanDiff) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, planDiffImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": @@ -32846,7 +34504,7 @@ func (ec *executionContext) _PlanDiff(ctx context.Context, sel ast.SelectionSet, case "id": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -32854,26 +34512,40 @@ func (ec *executionContext) _PlanDiff(ctx context.Context, sel ast.SelectionSet, }() res = ec._PlanDiff_id(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) - case "revision": + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + case "revision": out.Values[i] = ec._PlanDiff_revision(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "new_state": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -32881,19 +34553,35 @@ func (ec *executionContext) _PlanDiff(ctx context.Context, sel ast.SelectionSet, }() res = ec._PlanDiff_new_state(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "PlanDiffToBuildCommit": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -32901,19 +34589,35 @@ func (ec *executionContext) _PlanDiff(ctx context.Context, sel ast.SelectionSet, }() res = ec._PlanDiff_PlanDiffToBuildCommit(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "PlanDiffToPlan": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -32921,23 +34625,51 @@ func (ec *executionContext) _PlanDiff(ctx context.Context, sel ast.SelectionSet, }() res = ec._PlanDiff_PlanDiffToPlan(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -32945,8 +34677,9 @@ var provisionedHostImplementors = []string{"ProvisionedHost"} func (ec *executionContext) _ProvisionedHost(ctx context.Context, sel ast.SelectionSet, obj *ent.ProvisionedHost) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, provisionedHostImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": @@ -32954,7 +34687,7 @@ func (ec *executionContext) _ProvisionedHost(ctx context.Context, sel ast.Select case "id": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -32962,26 +34695,40 @@ func (ec *executionContext) _ProvisionedHost(ctx context.Context, sel ast.Select }() res = ec._ProvisionedHost_id(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) - case "subnet_ip": + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + case "subnet_ip": out.Values[i] = ec._ProvisionedHost_subnet_ip(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "ProvisionedHostToStatus": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -32989,19 +34736,35 @@ func (ec *executionContext) _ProvisionedHost(ctx context.Context, sel ast.Select }() res = ec._ProvisionedHost_ProvisionedHostToStatus(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "ProvisionedHostToProvisionedNetwork": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33009,19 +34772,35 @@ func (ec *executionContext) _ProvisionedHost(ctx context.Context, sel ast.Select }() res = ec._ProvisionedHost_ProvisionedHostToProvisionedNetwork(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "ProvisionedHostToHost": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33029,19 +34808,35 @@ func (ec *executionContext) _ProvisionedHost(ctx context.Context, sel ast.Select }() res = ec._ProvisionedHost_ProvisionedHostToHost(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "ProvisionedHostToProvisioningStep": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33049,19 +34844,35 @@ func (ec *executionContext) _ProvisionedHost(ctx context.Context, sel ast.Select }() res = ec._ProvisionedHost_ProvisionedHostToProvisioningStep(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "ProvisionedHostToAgentStatus": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33071,14 +34882,30 @@ func (ec *executionContext) _ProvisionedHost(ctx context.Context, sel ast.Select return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) + + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "ProvisionedHostToPlan": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33086,23 +34913,51 @@ func (ec *executionContext) _ProvisionedHost(ctx context.Context, sel ast.Select }() res = ec._ProvisionedHost_ProvisionedHostToPlan(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -33110,8 +34965,9 @@ var provisionedNetworkImplementors = []string{"ProvisionedNetwork"} func (ec *executionContext) _ProvisionedNetwork(ctx context.Context, sel ast.SelectionSet, obj *ent.ProvisionedNetwork) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, provisionedNetworkImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": @@ -33119,7 +34975,7 @@ func (ec *executionContext) _ProvisionedNetwork(ctx context.Context, sel ast.Sel case "id": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33127,33 +34983,45 @@ func (ec *executionContext) _ProvisionedNetwork(ctx context.Context, sel ast.Sel }() res = ec._ProvisionedNetwork_id(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) - case "name": + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + case "name": out.Values[i] = ec._ProvisionedNetwork_name(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "cidr": - out.Values[i] = ec._ProvisionedNetwork_cidr(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "ProvisionedNetworkToStatus": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33161,19 +35029,35 @@ func (ec *executionContext) _ProvisionedNetwork(ctx context.Context, sel ast.Sel }() res = ec._ProvisionedNetwork_ProvisionedNetworkToStatus(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "ProvisionedNetworkToNetwork": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33181,19 +35065,35 @@ func (ec *executionContext) _ProvisionedNetwork(ctx context.Context, sel ast.Sel }() res = ec._ProvisionedNetwork_ProvisionedNetworkToNetwork(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "ProvisionedNetworkToBuild": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33201,19 +35101,35 @@ func (ec *executionContext) _ProvisionedNetwork(ctx context.Context, sel ast.Sel }() res = ec._ProvisionedNetwork_ProvisionedNetworkToBuild(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "ProvisionedNetworkToTeam": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33221,19 +35137,35 @@ func (ec *executionContext) _ProvisionedNetwork(ctx context.Context, sel ast.Sel }() res = ec._ProvisionedNetwork_ProvisionedNetworkToTeam(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) + + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "ProvisionedNetworkToProvisionedHost": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33241,19 +35173,35 @@ func (ec *executionContext) _ProvisionedNetwork(ctx context.Context, sel ast.Sel }() res = ec._ProvisionedNetwork_ProvisionedNetworkToProvisionedHost(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "ProvisionedNetworkToPlan": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33261,23 +35209,51 @@ func (ec *executionContext) _ProvisionedNetwork(ctx context.Context, sel ast.Sel }() res = ec._ProvisionedNetwork_ProvisionedNetworkToPlan(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -33285,8 +35261,9 @@ var provisioningStepImplementors = []string{"ProvisioningStep"} func (ec *executionContext) _ProvisioningStep(ctx context.Context, sel ast.SelectionSet, obj *ent.ProvisioningStep) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, provisioningStepImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": @@ -33294,7 +35271,7 @@ func (ec *executionContext) _ProvisioningStep(ctx context.Context, sel ast.Selec case "id": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33302,19 +35279,35 @@ func (ec *executionContext) _ProvisioningStep(ctx context.Context, sel ast.Selec }() res = ec._ProvisioningStep_id(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "type": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33322,26 +35315,40 @@ func (ec *executionContext) _ProvisioningStep(ctx context.Context, sel ast.Selec }() res = ec._ProvisioningStep_type(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) - case "step_number": + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + case "step_number": out.Values[i] = ec._ProvisioningStep_step_number(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "ProvisioningStepToStatus": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33349,19 +35356,35 @@ func (ec *executionContext) _ProvisioningStep(ctx context.Context, sel ast.Selec }() res = ec._ProvisioningStep_ProvisioningStepToStatus(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "ProvisioningStepToProvisionedHost": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33369,19 +35392,35 @@ func (ec *executionContext) _ProvisioningStep(ctx context.Context, sel ast.Selec }() res = ec._ProvisioningStep_ProvisioningStepToProvisionedHost(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "ProvisioningStepToScript": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33391,14 +35430,30 @@ func (ec *executionContext) _ProvisioningStep(ctx context.Context, sel ast.Selec return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "ProvisioningStepToCommand": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33408,14 +35463,30 @@ func (ec *executionContext) _ProvisioningStep(ctx context.Context, sel ast.Selec return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "ProvisioningStepToDNSRecord": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33425,14 +35496,30 @@ func (ec *executionContext) _ProvisioningStep(ctx context.Context, sel ast.Selec return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) + + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "ProvisioningStepToFileDelete": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33442,14 +35529,30 @@ func (ec *executionContext) _ProvisioningStep(ctx context.Context, sel ast.Selec return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "ProvisioningStepToFileDownload": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33459,14 +35562,30 @@ func (ec *executionContext) _ProvisioningStep(ctx context.Context, sel ast.Selec return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "ProvisioningStepToFileExtract": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33476,14 +35595,30 @@ func (ec *executionContext) _ProvisioningStep(ctx context.Context, sel ast.Selec return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "ProvisioningStepToPlan": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33493,18 +35628,46 @@ func (ec *executionContext) _ProvisioningStep(ctx context.Context, sel ast.Selec return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -33517,7 +35680,7 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr }) out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { innerCtx := graphql.WithRootFieldContext(ctx, &graphql.RootFieldContext{ Object: field.Name, @@ -33530,7 +35693,7 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr case "environments": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33541,16 +35704,15 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr } rrm := func(ctx context.Context) graphql.Marshaler { - return ec.OperationContext.RootResolverMiddleware(ctx, innerFunc) + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) } - out.Concurrently(i, func() graphql.Marshaler { - return rrm(innerCtx) - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "environment": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33561,16 +35723,15 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr } rrm := func(ctx context.Context) graphql.Marshaler { - return ec.OperationContext.RootResolverMiddleware(ctx, innerFunc) + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) } - out.Concurrently(i, func() graphql.Marshaler { - return rrm(innerCtx) - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "provisionedHost": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33581,16 +35742,15 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr } rrm := func(ctx context.Context) graphql.Marshaler { - return ec.OperationContext.RootResolverMiddleware(ctx, innerFunc) + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) } - out.Concurrently(i, func() graphql.Marshaler { - return rrm(innerCtx) - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "provisionedNetwork": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33601,16 +35761,15 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr } rrm := func(ctx context.Context) graphql.Marshaler { - return ec.OperationContext.RootResolverMiddleware(ctx, innerFunc) + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) } - out.Concurrently(i, func() graphql.Marshaler { - return rrm(innerCtx) - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "provisionedStep": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33621,16 +35780,15 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr } rrm := func(ctx context.Context) graphql.Marshaler { - return ec.OperationContext.RootResolverMiddleware(ctx, innerFunc) + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) } - out.Concurrently(i, func() graphql.Marshaler { - return rrm(innerCtx) - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "plan": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33641,16 +35799,15 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr } rrm := func(ctx context.Context) graphql.Marshaler { - return ec.OperationContext.RootResolverMiddleware(ctx, innerFunc) + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) } - out.Concurrently(i, func() graphql.Marshaler { - return rrm(innerCtx) - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "getBuilds": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33661,16 +35818,15 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr } rrm := func(ctx context.Context) graphql.Marshaler { - return ec.OperationContext.RootResolverMiddleware(ctx, innerFunc) + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) } - out.Concurrently(i, func() graphql.Marshaler { - return rrm(innerCtx) - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "build": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33681,16 +35837,15 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr } rrm := func(ctx context.Context) graphql.Marshaler { - return ec.OperationContext.RootResolverMiddleware(ctx, innerFunc) + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) } - out.Concurrently(i, func() graphql.Marshaler { - return rrm(innerCtx) - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "getBuildCommits": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33701,16 +35856,15 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr } rrm := func(ctx context.Context) graphql.Marshaler { - return ec.OperationContext.RootResolverMiddleware(ctx, innerFunc) + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) } - out.Concurrently(i, func() graphql.Marshaler { - return rrm(innerCtx) - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "getBuildCommit": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33721,16 +35875,15 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr } rrm := func(ctx context.Context) graphql.Marshaler { - return ec.OperationContext.RootResolverMiddleware(ctx, innerFunc) + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) } - out.Concurrently(i, func() graphql.Marshaler { - return rrm(innerCtx) - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "status": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33741,16 +35894,15 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr } rrm := func(ctx context.Context) graphql.Marshaler { - return ec.OperationContext.RootResolverMiddleware(ctx, innerFunc) + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) } - out.Concurrently(i, func() graphql.Marshaler { - return rrm(innerCtx) - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "agentStatus": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33761,16 +35913,15 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr } rrm := func(ctx context.Context) graphql.Marshaler { - return ec.OperationContext.RootResolverMiddleware(ctx, innerFunc) + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) } - out.Concurrently(i, func() graphql.Marshaler { - return rrm(innerCtx) - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "getServerTasks": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33781,16 +35932,15 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr } rrm := func(ctx context.Context) graphql.Marshaler { - return ec.OperationContext.RootResolverMiddleware(ctx, innerFunc) + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) } - out.Concurrently(i, func() graphql.Marshaler { - return rrm(innerCtx) - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "currentUser": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33801,16 +35951,15 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr } rrm := func(ctx context.Context) graphql.Marshaler { - return ec.OperationContext.RootResolverMiddleware(ctx, innerFunc) + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) } - out.Concurrently(i, func() graphql.Marshaler { - return rrm(innerCtx) - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "getUserList": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33821,16 +35970,15 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr } rrm := func(ctx context.Context) graphql.Marshaler { - return ec.OperationContext.RootResolverMiddleware(ctx, innerFunc) + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) } - out.Concurrently(i, func() graphql.Marshaler { - return rrm(innerCtx) - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "getCurrentUserTasks": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33841,16 +35989,15 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr } rrm := func(ctx context.Context) graphql.Marshaler { - return ec.OperationContext.RootResolverMiddleware(ctx, innerFunc) + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) } - out.Concurrently(i, func() graphql.Marshaler { - return rrm(innerCtx) - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "getAgentTasks": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33861,16 +36008,15 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr } rrm := func(ctx context.Context) graphql.Marshaler { - return ec.OperationContext.RootResolverMiddleware(ctx, innerFunc) + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) } - out.Concurrently(i, func() graphql.Marshaler { - return rrm(innerCtx) - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "listAgentStatuses": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33881,16 +36027,15 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr } rrm := func(ctx context.Context) graphql.Marshaler { - return ec.OperationContext.RootResolverMiddleware(ctx, innerFunc) + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) } - out.Concurrently(i, func() graphql.Marshaler { - return rrm(innerCtx) - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "listBuildStatuses": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33901,16 +36046,15 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr } rrm := func(ctx context.Context) graphql.Marshaler { - return ec.OperationContext.RootResolverMiddleware(ctx, innerFunc) + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) } - out.Concurrently(i, func() graphql.Marshaler { - return rrm(innerCtx) - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "getAllAgentStatus": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33921,16 +36065,15 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr } rrm := func(ctx context.Context) graphql.Marshaler { - return ec.OperationContext.RootResolverMiddleware(ctx, innerFunc) + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) } - out.Concurrently(i, func() graphql.Marshaler { - return rrm(innerCtx) - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "getAllPlanStatus": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33941,16 +36084,15 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr } rrm := func(ctx context.Context) graphql.Marshaler { - return ec.OperationContext.RootResolverMiddleware(ctx, innerFunc) + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) } - out.Concurrently(i, func() graphql.Marshaler { - return rrm(innerCtx) - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "getPlanStatusCounts": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33958,22 +36100,21 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr }() res = ec._Query_getPlanStatusCounts(ctx, field) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } rrm := func(ctx context.Context) graphql.Marshaler { - return ec.OperationContext.RootResolverMiddleware(ctx, innerFunc) + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) } - out.Concurrently(i, func() graphql.Marshaler { - return rrm(innerCtx) - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "viewServerTaskLogs": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -33981,22 +36122,21 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr }() res = ec._Query_viewServerTaskLogs(ctx, field) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } rrm := func(ctx context.Context) graphql.Marshaler { - return ec.OperationContext.RootResolverMiddleware(ctx, innerFunc) + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) } - out.Concurrently(i, func() graphql.Marshaler { - return rrm(innerCtx) - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "viewAgentTask": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -34004,22 +36144,21 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr }() res = ec._Query_viewAgentTask(ctx, field) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } rrm := func(ctx context.Context) graphql.Marshaler { - return ec.OperationContext.RootResolverMiddleware(ctx, innerFunc) + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) } - out.Concurrently(i, func() graphql.Marshaler { - return rrm(innerCtx) - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "serverTasks": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -34030,32 +36169,39 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr } rrm := func(ctx context.Context) graphql.Marshaler { - return ec.OperationContext.RootResolverMiddleware(ctx, innerFunc) + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) } - out.Concurrently(i, func() graphql.Marshaler { - return rrm(innerCtx) - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "__type": - out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) { return ec._Query___type(ctx, field) }) - case "__schema": - out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) { return ec._Query___schema(ctx, field) }) - default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -34063,8 +36209,9 @@ var repoCommitImplementors = []string{"RepoCommit"} func (ec *executionContext) _RepoCommit(ctx context.Context, sel ast.SelectionSet, obj *ent.RepoCommit) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, repoCommitImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": @@ -34072,7 +36219,7 @@ func (ec *executionContext) _RepoCommit(ctx context.Context, sel ast.SelectionSe case "id": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -34080,33 +36227,45 @@ func (ec *executionContext) _RepoCommit(ctx context.Context, sel ast.SelectionSe }() res = ec._RepoCommit_id(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) - case "revision": + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + case "revision": out.Values[i] = ec._RepoCommit_revision(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "hash": - out.Values[i] = ec._RepoCommit_hash(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "author": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -34114,19 +36273,35 @@ func (ec *executionContext) _RepoCommit(ctx context.Context, sel ast.SelectionSe }() res = ec._RepoCommit_author(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "committer": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -34134,47 +36309,55 @@ func (ec *executionContext) _RepoCommit(ctx context.Context, sel ast.SelectionSe }() res = ec._RepoCommit_committer(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) - case "pgp_signature": + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + case "pgp_signature": out.Values[i] = ec._RepoCommit_pgp_signature(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "message": - out.Values[i] = ec._RepoCommit_message(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "tree_hash": - out.Values[i] = ec._RepoCommit_tree_hash(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "parent_hashes": - out.Values[i] = ec._RepoCommit_parent_hashes(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "RepoCommitToRepository": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -34182,23 +36365,51 @@ func (ec *executionContext) _RepoCommit(ctx context.Context, sel ast.SelectionSe }() res = ec._RepoCommit_RepoCommitToRepository(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -34206,8 +36417,9 @@ var repositoryImplementors = []string{"Repository"} func (ec *executionContext) _Repository(ctx context.Context, sel ast.SelectionSet, obj *ent.Repository) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, repositoryImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": @@ -34215,7 +36427,7 @@ func (ec *executionContext) _Repository(ctx context.Context, sel ast.SelectionSe case "id": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -34223,33 +36435,45 @@ func (ec *executionContext) _Repository(ctx context.Context, sel ast.SelectionSe }() res = ec._Repository_id(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) - case "repo_url": + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + case "repo_url": out.Values[i] = ec._Repository_repo_url(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "branch_name": - out.Values[i] = ec._Repository_branch_name(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "environment_filepath": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -34257,19 +36481,35 @@ func (ec *executionContext) _Repository(ctx context.Context, sel ast.SelectionSe }() res = ec._Repository_environment_filepath(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) + + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "RepositoryToRepoCommit": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -34277,23 +36517,51 @@ func (ec *executionContext) _Repository(ctx context.Context, sel ast.SelectionSe }() res = ec._Repository_RepositoryToRepoCommit(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -34301,8 +36569,9 @@ var scriptImplementors = []string{"Script"} func (ec *executionContext) _Script(ctx context.Context, sel ast.SelectionSet, obj *ent.Script) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, scriptImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": @@ -34310,7 +36579,7 @@ func (ec *executionContext) _Script(ctx context.Context, sel ast.SelectionSet, o case "id": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -34318,96 +36587,90 @@ func (ec *executionContext) _Script(ctx context.Context, sel ast.SelectionSet, o }() res = ec._Script_id(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) - case "hcl_id": + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + case "hcl_id": out.Values[i] = ec._Script_hcl_id(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "name": - out.Values[i] = ec._Script_name(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "language": - out.Values[i] = ec._Script_language(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "description": - out.Values[i] = ec._Script_description(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "source": - out.Values[i] = ec._Script_source(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "source_type": - out.Values[i] = ec._Script_source_type(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "cooldown": - out.Values[i] = ec._Script_cooldown(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "timeout": - out.Values[i] = ec._Script_timeout(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "ignore_errors": - out.Values[i] = ec._Script_ignore_errors(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "args": - out.Values[i] = ec._Script_args(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "disabled": - out.Values[i] = ec._Script_disabled(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "vars": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -34417,21 +36680,35 @@ func (ec *executionContext) _Script(ctx context.Context, sel ast.SelectionSet, o return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) - case "absPath": + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + case "absPath": out.Values[i] = ec._Script_absPath(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "tags": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -34441,14 +36718,30 @@ func (ec *executionContext) _Script(ctx context.Context, sel ast.SelectionSet, o return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "scriptToFinding": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -34456,19 +36749,35 @@ func (ec *executionContext) _Script(ctx context.Context, sel ast.SelectionSet, o }() res = ec._Script_scriptToFinding(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "ScriptToEnvironment": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -34476,23 +36785,51 @@ func (ec *executionContext) _Script(ctx context.Context, sel ast.SelectionSet, o }() res = ec._Script_ScriptToEnvironment(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -34500,8 +36837,9 @@ var serverTaskImplementors = []string{"ServerTask"} func (ec *executionContext) _ServerTask(ctx context.Context, sel ast.SelectionSet, obj *ent.ServerTask) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, serverTaskImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": @@ -34509,7 +36847,7 @@ func (ec *executionContext) _ServerTask(ctx context.Context, sel ast.SelectionSe case "id": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -34517,19 +36855,35 @@ func (ec *executionContext) _ServerTask(ctx context.Context, sel ast.SelectionSe }() res = ec._ServerTask_id(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "type": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -34537,35 +36891,43 @@ func (ec *executionContext) _ServerTask(ctx context.Context, sel ast.SelectionSe }() res = ec._ServerTask_type(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) - case "start_time": + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + case "start_time": out.Values[i] = ec._ServerTask_start_time(ctx, field, obj) - case "end_time": - out.Values[i] = ec._ServerTask_end_time(ctx, field, obj) - case "errors": - out.Values[i] = ec._ServerTask_errors(ctx, field, obj) - case "log_file_path": - out.Values[i] = ec._ServerTask_log_file_path(ctx, field, obj) - case "ServerTaskToAuthUser": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -34573,19 +36935,35 @@ func (ec *executionContext) _ServerTask(ctx context.Context, sel ast.SelectionSe }() res = ec._ServerTask_ServerTaskToAuthUser(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "ServerTaskToStatus": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -34593,19 +36971,35 @@ func (ec *executionContext) _ServerTask(ctx context.Context, sel ast.SelectionSe }() res = ec._ServerTask_ServerTaskToStatus(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "ServerTaskToEnvironment": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -34615,14 +37009,30 @@ func (ec *executionContext) _ServerTask(ctx context.Context, sel ast.SelectionSe return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) + + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "ServerTaskToBuild": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -34632,14 +37042,30 @@ func (ec *executionContext) _ServerTask(ctx context.Context, sel ast.SelectionSe return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "ServerTaskToBuildCommit": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -34649,18 +37075,46 @@ func (ec *executionContext) _ServerTask(ctx context.Context, sel ast.SelectionSe return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -34668,8 +37122,9 @@ var statusImplementors = []string{"Status"} func (ec *executionContext) _Status(ctx context.Context, sel ast.SelectionSet, obj *ent.Status) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, statusImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": @@ -34677,7 +37132,7 @@ func (ec *executionContext) _Status(ctx context.Context, sel ast.SelectionSet, o case "id": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -34685,19 +37140,35 @@ func (ec *executionContext) _Status(ctx context.Context, sel ast.SelectionSet, o }() res = ec._Status_id(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "state": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -34705,19 +37176,35 @@ func (ec *executionContext) _Status(ctx context.Context, sel ast.SelectionSet, o }() res = ec._Status_state(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "status_for": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -34725,19 +37212,35 @@ func (ec *executionContext) _Status(ctx context.Context, sel ast.SelectionSet, o }() res = ec._Status_status_for(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) + + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "started_at": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -34745,19 +37248,35 @@ func (ec *executionContext) _Status(ctx context.Context, sel ast.SelectionSet, o }() res = ec._Status_started_at(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "ended_at": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -34765,41 +37284,63 @@ func (ec *executionContext) _Status(ctx context.Context, sel ast.SelectionSet, o }() res = ec._Status_ended_at(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) - case "failed": + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + case "failed": out.Values[i] = ec._Status_failed(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "completed": - out.Values[i] = ec._Status_completed(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "error": - out.Values[i] = ec._Status_error(ctx, field, obj) - default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -34807,34 +37348,43 @@ var statusBatchImplementors = []string{"StatusBatch"} func (ec *executionContext) _StatusBatch(ctx context.Context, sel ast.SelectionSet, obj *model.StatusBatch) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, statusBatchImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("StatusBatch") case "statuses": - out.Values[i] = ec._StatusBatch_statuses(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "pageInfo": - out.Values[i] = ec._StatusBatch_pageInfo(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -34874,8 +37424,9 @@ var teamImplementors = []string{"Team"} func (ec *executionContext) _Team(ctx context.Context, sel ast.SelectionSet, obj *ent.Team) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, teamImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": @@ -34883,7 +37434,7 @@ func (ec *executionContext) _Team(ctx context.Context, sel ast.SelectionSet, obj case "id": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -34891,26 +37442,40 @@ func (ec *executionContext) _Team(ctx context.Context, sel ast.SelectionSet, obj }() res = ec._Team_id(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) - case "team_number": + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + case "team_number": out.Values[i] = ec._Team_team_number(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "TeamToBuild": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -34918,19 +37483,35 @@ func (ec *executionContext) _Team(ctx context.Context, sel ast.SelectionSet, obj }() res = ec._Team_TeamToBuild(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "TeamToStatus": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -34938,19 +37519,35 @@ func (ec *executionContext) _Team(ctx context.Context, sel ast.SelectionSet, obj }() res = ec._Team_TeamToStatus(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "TeamToProvisionedNetwork": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -34958,19 +37555,35 @@ func (ec *executionContext) _Team(ctx context.Context, sel ast.SelectionSet, obj }() res = ec._Team_TeamToProvisionedNetwork(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "TeamToPlan": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -34978,23 +37591,51 @@ func (ec *executionContext) _Team(ctx context.Context, sel ast.SelectionSet, obj }() res = ec._Team_TeamToPlan(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -35002,8 +37643,9 @@ var userImplementors = []string{"User"} func (ec *executionContext) _User(ctx context.Context, sel ast.SelectionSet, obj *ent.User) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, userImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": @@ -35011,7 +37653,7 @@ func (ec *executionContext) _User(ctx context.Context, sel ast.SelectionSet, obj case "id": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -35019,44 +37661,66 @@ func (ec *executionContext) _User(ctx context.Context, sel ast.SelectionSet, obj }() res = ec._User_id(ctx, field, obj) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } - out.Concurrently(i, func() graphql.Marshaler { - return innerFunc(ctx) + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) - }) - case "name": + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + case "name": out.Values[i] = ec._User_name(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "uuid": - out.Values[i] = ec._User_uuid(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } case "email": - out.Values[i] = ec._User_email(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&out.Invalids, 1) } default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -35064,52 +37728,55 @@ var __DirectiveImplementors = []string{"__Directive"} func (ec *executionContext) ___Directive(ctx context.Context, sel ast.SelectionSet, obj *introspection.Directive) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, __DirectiveImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("__Directive") case "name": - out.Values[i] = ec.___Directive_name(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "description": - out.Values[i] = ec.___Directive_description(ctx, field, obj) - case "locations": - out.Values[i] = ec.___Directive_locations(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "args": - out.Values[i] = ec.___Directive_args(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "isRepeatable": - out.Values[i] = ec.___Directive_isRepeatable(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -35117,42 +37784,47 @@ var __EnumValueImplementors = []string{"__EnumValue"} func (ec *executionContext) ___EnumValue(ctx context.Context, sel ast.SelectionSet, obj *introspection.EnumValue) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, __EnumValueImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("__EnumValue") case "name": - out.Values[i] = ec.___EnumValue_name(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "description": - out.Values[i] = ec.___EnumValue_description(ctx, field, obj) - case "isDeprecated": - out.Values[i] = ec.___EnumValue_isDeprecated(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "deprecationReason": - out.Values[i] = ec.___EnumValue_deprecationReason(ctx, field, obj) - default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -35160,56 +37832,57 @@ var __FieldImplementors = []string{"__Field"} func (ec *executionContext) ___Field(ctx context.Context, sel ast.SelectionSet, obj *introspection.Field) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, __FieldImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("__Field") case "name": - out.Values[i] = ec.___Field_name(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "description": - out.Values[i] = ec.___Field_description(ctx, field, obj) - case "args": - out.Values[i] = ec.___Field_args(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "type": - out.Values[i] = ec.___Field_type(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "isDeprecated": - out.Values[i] = ec.___Field_isDeprecated(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "deprecationReason": - out.Values[i] = ec.___Field_deprecationReason(ctx, field, obj) - default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -35217,42 +37890,47 @@ var __InputValueImplementors = []string{"__InputValue"} func (ec *executionContext) ___InputValue(ctx context.Context, sel ast.SelectionSet, obj *introspection.InputValue) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, __InputValueImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("__InputValue") case "name": - out.Values[i] = ec.___InputValue_name(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "description": - out.Values[i] = ec.___InputValue_description(ctx, field, obj) - case "type": - out.Values[i] = ec.___InputValue_type(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "defaultValue": - out.Values[i] = ec.___InputValue_defaultValue(ctx, field, obj) - default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -35260,53 +37938,54 @@ var __SchemaImplementors = []string{"__Schema"} func (ec *executionContext) ___Schema(ctx context.Context, sel ast.SelectionSet, obj *introspection.Schema) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, __SchemaImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("__Schema") case "description": - out.Values[i] = ec.___Schema_description(ctx, field, obj) - case "types": - out.Values[i] = ec.___Schema_types(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "queryType": - out.Values[i] = ec.___Schema_queryType(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "mutationType": - out.Values[i] = ec.___Schema_mutationType(ctx, field, obj) - case "subscriptionType": - out.Values[i] = ec.___Schema_subscriptionType(ctx, field, obj) - case "directives": - out.Values[i] = ec.___Schema_directives(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -35314,63 +37993,56 @@ var __TypeImplementors = []string{"__Type"} func (ec *executionContext) ___Type(ctx context.Context, sel ast.SelectionSet, obj *introspection.Type) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, __TypeImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("__Type") case "kind": - out.Values[i] = ec.___Type_kind(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "name": - out.Values[i] = ec.___Type_name(ctx, field, obj) - case "description": - out.Values[i] = ec.___Type_description(ctx, field, obj) - case "fields": - out.Values[i] = ec.___Type_fields(ctx, field, obj) - case "interfaces": - out.Values[i] = ec.___Type_interfaces(ctx, field, obj) - case "possibleTypes": - out.Values[i] = ec.___Type_possibleTypes(ctx, field, obj) - case "enumValues": - out.Values[i] = ec.___Type_enumValues(ctx, field, obj) - case "inputFields": - out.Values[i] = ec.___Type_inputFields(ctx, field, obj) - case "ofType": - out.Values[i] = ec.___Type_ofType(ctx, field, obj) - case "specifiedByURL": - out.Values[i] = ec.___Type_specifiedByURL(ctx, field, obj) - default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -35378,34 +38050,43 @@ var configMapImplementors = []string{"configMap"} func (ec *executionContext) _configMap(ctx context.Context, sel ast.SelectionSet, obj *model.ConfigMap) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, configMapImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("configMap") case "key": - out.Values[i] = ec._configMap_key(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "value": - out.Values[i] = ec._configMap_value(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -35413,34 +38094,43 @@ var intMapImplementors = []string{"intMap"} func (ec *executionContext) _intMap(ctx context.Context, sel ast.SelectionSet, obj *model.IntMap) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, intMapImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("intMap") case "key": - out.Values[i] = ec._intMap_key(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "value": - out.Values[i] = ec._intMap_value(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -35448,34 +38138,43 @@ var tagMapImplementors = []string{"tagMap"} func (ec *executionContext) _tagMap(ctx context.Context, sel ast.SelectionSet, obj *model.TagMap) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, tagMapImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("tagMap") case "key": - out.Values[i] = ec._tagMap_key(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "value": - out.Values[i] = ec._tagMap_value(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -35483,34 +38182,43 @@ var varsMapImplementors = []string{"varsMap"} func (ec *executionContext) _varsMap(ctx context.Context, sel ast.SelectionSet, obj *model.VarsMap) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, varsMapImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("varsMap") case "key": - out.Values[i] = ec._varsMap_key(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "value": - out.Values[i] = ec._varsMap_value(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } diff --git a/graphql/graph/schema.resolvers.go b/graphql/graph/schema.resolvers.go index 6cae2433..d71d5ebe 100755 --- a/graphql/graph/schema.resolvers.go +++ b/graphql/graph/schema.resolvers.go @@ -2,6 +2,7 @@ package graph // This file will be automatically regenerated based on the schema, any resolver implementations // will be copied through when generating and any unknown code will be moved to the end. +// Code generated by github.com/99designs/gqlgen version v0.17.41 import ( "context" @@ -40,30 +41,37 @@ import ( "golang.org/x/crypto/bcrypt" ) +// ID is the resolver for the id field. func (r *agentTaskResolver) ID(ctx context.Context, obj *ent.AgentTask) (string, error) { return obj.ID.String(), nil } +// Command is the resolver for the command field. func (r *agentTaskResolver) Command(ctx context.Context, obj *ent.AgentTask) (model.AgentCommand, error) { return model.AgentCommand(obj.Command), nil } +// State is the resolver for the state field. func (r *agentTaskResolver) State(ctx context.Context, obj *ent.AgentTask) (model.AgentTaskState, error) { return model.AgentTaskState(obj.State), nil } +// ID is the resolver for the id field. func (r *authUserResolver) ID(ctx context.Context, obj *ent.AuthUser) (string, error) { return obj.ID.String(), nil } +// Role is the resolver for the role field. func (r *authUserResolver) Role(ctx context.Context, obj *ent.AuthUser) (model.RoleLevel, error) { return model.RoleLevel(obj.Role), nil } +// Provider is the resolver for the provider field. func (r *authUserResolver) Provider(ctx context.Context, obj *ent.AuthUser) (model.ProviderType, error) { return model.ProviderType(obj.Provider), nil } +// PublicKey is the resolver for the publicKey field. func (r *authUserResolver) PublicKey(ctx context.Context, obj *ent.AuthUser) (string, error) { currentUser, err := auth.ForContext(ctx) if err != nil { @@ -81,26 +89,32 @@ func (r *authUserResolver) PublicKey(ctx context.Context, obj *ent.AuthUser) (st return text, nil } +// ID is the resolver for the id field. func (r *buildResolver) ID(ctx context.Context, obj *ent.Build) (string, error) { return obj.ID.String(), nil } +// ID is the resolver for the id field. func (r *buildCommitResolver) ID(ctx context.Context, obj *ent.BuildCommit) (string, error) { return obj.ID.String(), nil } +// Type is the resolver for the type field. func (r *buildCommitResolver) Type(ctx context.Context, obj *ent.BuildCommit) (model.BuildCommitType, error) { return model.BuildCommitType(obj.Type), nil } +// State is the resolver for the state field. func (r *buildCommitResolver) State(ctx context.Context, obj *ent.BuildCommit) (model.BuildCommitState, error) { return model.BuildCommitState(obj.State), nil } +// ID is the resolver for the id field. func (r *commandResolver) ID(ctx context.Context, obj *ent.Command) (string, error) { return obj.ID.String(), nil } +// Vars is the resolver for the vars field. func (r *commandResolver) Vars(ctx context.Context, obj *ent.Command) ([]*model.VarsMap, error) { results := make([]*model.VarsMap, 0) for varKey, varValue := range obj.Vars { @@ -113,6 +127,7 @@ func (r *commandResolver) Vars(ctx context.Context, obj *ent.Command) ([]*model. return results, nil } +// Tags is the resolver for the tags field. func (r *commandResolver) Tags(ctx context.Context, obj *ent.Command) ([]*model.TagMap, error) { results := make([]*model.TagMap, 0) for tagKey, tagValue := range obj.Tags { @@ -125,10 +140,12 @@ func (r *commandResolver) Tags(ctx context.Context, obj *ent.Command) ([]*model. return results, nil } +// ID is the resolver for the id field. func (r *competitionResolver) ID(ctx context.Context, obj *ent.Competition) (string, error) { return obj.ID.String(), nil } +// Config is the resolver for the config field. func (r *competitionResolver) Config(ctx context.Context, obj *ent.Competition) ([]*model.ConfigMap, error) { results := make([]*model.ConfigMap, 0) for configKey, configValue := range obj.Config { @@ -141,6 +158,7 @@ func (r *competitionResolver) Config(ctx context.Context, obj *ent.Competition) return results, nil } +// Tags is the resolver for the tags field. func (r *competitionResolver) Tags(ctx context.Context, obj *ent.Competition) ([]*model.TagMap, error) { results := make([]*model.TagMap, 0) for tagKey, tagValue := range obj.Tags { @@ -153,10 +171,12 @@ func (r *competitionResolver) Tags(ctx context.Context, obj *ent.Competition) ([ return results, nil } +// ID is the resolver for the id field. func (r *dNSResolver) ID(ctx context.Context, obj *ent.DNS) (string, error) { return obj.ID.String(), nil } +// Config is the resolver for the config field. func (r *dNSResolver) Config(ctx context.Context, obj *ent.DNS) ([]*model.ConfigMap, error) { results := make([]*model.ConfigMap, 0) for configKey, configValue := range obj.Config { @@ -169,10 +189,12 @@ func (r *dNSResolver) Config(ctx context.Context, obj *ent.DNS) ([]*model.Config return results, nil } +// ID is the resolver for the id field. func (r *dNSRecordResolver) ID(ctx context.Context, obj *ent.DNSRecord) (string, error) { return obj.ID.String(), nil } +// Vars is the resolver for the vars field. func (r *dNSRecordResolver) Vars(ctx context.Context, obj *ent.DNSRecord) ([]*model.VarsMap, error) { results := make([]*model.VarsMap, 0) for varKey, varValue := range obj.Vars { @@ -185,6 +207,7 @@ func (r *dNSRecordResolver) Vars(ctx context.Context, obj *ent.DNSRecord) ([]*mo return results, nil } +// Tags is the resolver for the tags field. func (r *dNSRecordResolver) Tags(ctx context.Context, obj *ent.DNSRecord) ([]*model.TagMap, error) { results := make([]*model.TagMap, 0) for tagKey, tagValue := range obj.Tags { @@ -197,10 +220,12 @@ func (r *dNSRecordResolver) Tags(ctx context.Context, obj *ent.DNSRecord) ([]*mo return results, nil } +// ID is the resolver for the id field. func (r *environmentResolver) ID(ctx context.Context, obj *ent.Environment) (string, error) { return obj.ID.String(), nil } +// Config is the resolver for the config field. func (r *environmentResolver) Config(ctx context.Context, obj *ent.Environment) ([]*model.ConfigMap, error) { results := make([]*model.ConfigMap, 0) for configKey, configValue := range obj.Config { @@ -213,6 +238,7 @@ func (r *environmentResolver) Config(ctx context.Context, obj *ent.Environment) return results, nil } +// Tags is the resolver for the tags field. func (r *environmentResolver) Tags(ctx context.Context, obj *ent.Environment) ([]*model.TagMap, error) { results := make([]*model.TagMap, 0) for tagKey, tagValue := range obj.Tags { @@ -225,10 +251,12 @@ func (r *environmentResolver) Tags(ctx context.Context, obj *ent.Environment) ([ return results, nil } +// ID is the resolver for the id field. func (r *fileDeleteResolver) ID(ctx context.Context, obj *ent.FileDelete) (string, error) { return obj.ID.String(), nil } +// Tags is the resolver for the tags field. func (r *fileDeleteResolver) Tags(ctx context.Context, obj *ent.FileDelete) ([]*model.TagMap, error) { results := make([]*model.TagMap, 0) for tagKey, tagValue := range obj.Tags { @@ -241,10 +269,12 @@ func (r *fileDeleteResolver) Tags(ctx context.Context, obj *ent.FileDelete) ([]* return results, nil } +// ID is the resolver for the id field. func (r *fileDownloadResolver) ID(ctx context.Context, obj *ent.FileDownload) (string, error) { return obj.ID.String(), nil } +// Tags is the resolver for the tags field. func (r *fileDownloadResolver) Tags(ctx context.Context, obj *ent.FileDownload) ([]*model.TagMap, error) { results := make([]*model.TagMap, 0) for tagKey, tagValue := range obj.Tags { @@ -257,10 +287,12 @@ func (r *fileDownloadResolver) Tags(ctx context.Context, obj *ent.FileDownload) return results, nil } +// ID is the resolver for the id field. func (r *fileExtractResolver) ID(ctx context.Context, obj *ent.FileExtract) (string, error) { return obj.ID.String(), nil } +// Tags is the resolver for the tags field. func (r *fileExtractResolver) Tags(ctx context.Context, obj *ent.FileExtract) ([]*model.TagMap, error) { results := make([]*model.TagMap, 0) for tagKey, tagValue := range obj.Tags { @@ -273,14 +305,17 @@ func (r *fileExtractResolver) Tags(ctx context.Context, obj *ent.FileExtract) ([ return results, nil } +// Severity is the resolver for the severity field. func (r *findingResolver) Severity(ctx context.Context, obj *ent.Finding) (model.FindingSeverity, error) { return model.FindingSeverity(obj.Severity), nil } +// Difficulty is the resolver for the difficulty field. func (r *findingResolver) Difficulty(ctx context.Context, obj *ent.Finding) (model.FindingDifficulty, error) { return model.FindingDifficulty(obj.Difficulty), nil } +// Tags is the resolver for the tags field. func (r *findingResolver) Tags(ctx context.Context, obj *ent.Finding) ([]*model.TagMap, error) { results := make([]*model.TagMap, 0) for tagKey, tagValue := range obj.Tags { @@ -293,10 +328,12 @@ func (r *findingResolver) Tags(ctx context.Context, obj *ent.Finding) ([]*model. return results, nil } +// ID is the resolver for the id field. func (r *hostResolver) ID(ctx context.Context, obj *ent.Host) (string, error) { return obj.ID.String(), nil } +// Vars is the resolver for the vars field. func (r *hostResolver) Vars(ctx context.Context, obj *ent.Host) ([]*model.VarsMap, error) { results := make([]*model.VarsMap, 0) for varKey, varValue := range obj.Vars { @@ -309,6 +346,7 @@ func (r *hostResolver) Vars(ctx context.Context, obj *ent.Host) ([]*model.VarsMa return results, nil } +// Tags is the resolver for the tags field. func (r *hostResolver) Tags(ctx context.Context, obj *ent.Host) ([]*model.TagMap, error) { results := make([]*model.TagMap, 0) for tagKey, tagValue := range obj.Tags { @@ -321,10 +359,12 @@ func (r *hostResolver) Tags(ctx context.Context, obj *ent.Host) ([]*model.TagMap return results, nil } +// ID is the resolver for the id field. func (r *identityResolver) ID(ctx context.Context, obj *ent.Identity) (string, error) { return obj.ID.String(), nil } +// Vars is the resolver for the vars field. func (r *identityResolver) Vars(ctx context.Context, obj *ent.Identity) ([]*model.VarsMap, error) { results := make([]*model.VarsMap, 0) for varKey, varValue := range obj.Vars { @@ -337,6 +377,7 @@ func (r *identityResolver) Vars(ctx context.Context, obj *ent.Identity) ([]*mode return results, nil } +// Tags is the resolver for the tags field. func (r *identityResolver) Tags(ctx context.Context, obj *ent.Identity) ([]*model.TagMap, error) { results := make([]*model.TagMap, 0) for tagKey, tagValue := range obj.Tags { @@ -349,6 +390,7 @@ func (r *identityResolver) Tags(ctx context.Context, obj *ent.Identity) ([]*mode return results, nil } +// LoadEnvironment is the resolver for the loadEnvironment field. func (r *mutationResolver) LoadEnvironment(ctx context.Context, envFilePath string) ([]*ent.Environment, error) { currentUser, err := auth.ForContext(ctx) if err != nil { @@ -382,6 +424,7 @@ func (r *mutationResolver) LoadEnvironment(ctx context.Context, envFilePath stri return results, nil } +// CreateBuild is the resolver for the createBuild field. func (r *mutationResolver) CreateBuild(ctx context.Context, envUUID string, renderFiles bool) (*ent.Build, error) { currentUser, err := auth.ForContext(ctx) if err != nil { @@ -410,6 +453,7 @@ func (r *mutationResolver) CreateBuild(ctx context.Context, envUUID string, rend return planner.CreateBuild(ctx, r.client, r.rdb, r.laforgeConfig, currentUser, entEnvironment) } +// DeleteUser is the resolver for the deleteUser field. func (r *mutationResolver) DeleteUser(ctx context.Context, userUUID string) (bool, error) { uuid, err := uuid.Parse(userUUID) @@ -425,6 +469,7 @@ func (r *mutationResolver) DeleteUser(ctx context.Context, userUUID string) (boo return true, err } +// ExecutePlan is the resolver for the executePlan field. func (r *mutationResolver) ExecutePlan(ctx context.Context, buildUUID string) (*ent.Build, error) { currentUser, err := auth.ForContext(ctx) if err != nil { @@ -472,6 +517,7 @@ func (r *mutationResolver) ExecutePlan(ctx context.Context, buildUUID string) (* return b, nil } +// DeleteBuild is the resolver for the deleteBuild field. func (r *mutationResolver) DeleteBuild(ctx context.Context, buildUUID string) (string, error) { currentUser, err := auth.ForContext(ctx) if err != nil { @@ -532,6 +578,7 @@ func (r *mutationResolver) DeleteBuild(ctx context.Context, buildUUID string) (s return "", fmt.Errorf("unknown error occurred") } +// CreateTask is the resolver for the createTask field. func (r *mutationResolver) CreateTask(ctx context.Context, proHostUUID string, command model.AgentCommand, args string) (bool, error) { uuid, err := uuid.Parse(proHostUUID) @@ -560,6 +607,7 @@ func (r *mutationResolver) CreateTask(ctx context.Context, proHostUUID string, c return true, nil } +// DumpBuild is the resolver for the dumpBuild field. func (r *mutationResolver) DumpBuild(ctx context.Context, buildUUID string) (string, error) { uuid, err := uuid.Parse(buildUUID) @@ -576,6 +624,7 @@ func (r *mutationResolver) DumpBuild(ctx context.Context, buildUUID string) (str return utils.GenerateBuildConf(ctx, r.client, entBuild) } +// Rebuild is the resolver for the rebuild field. func (r *mutationResolver) Rebuild(ctx context.Context, rootPlans []*string) (bool, error) { currentUser, err := auth.ForContext(ctx) if err != nil { @@ -633,6 +682,7 @@ func (r *mutationResolver) Rebuild(ctx context.Context, rootPlans []*string) (bo return false, nil } +// ApproveCommit is the resolver for the approveCommit field. func (r *mutationResolver) ApproveCommit(ctx context.Context, commitUUID string) (bool, error) { uuid, err := uuid.Parse(commitUUID) if err != nil { @@ -646,6 +696,7 @@ func (r *mutationResolver) ApproveCommit(ctx context.Context, commitUUID string) return true, nil } +// CancelCommit is the resolver for the cancelCommit field. func (r *mutationResolver) CancelCommit(ctx context.Context, commitUUID string) (bool, error) { uuid, err := uuid.Parse(commitUUID) if err != nil { @@ -682,6 +733,7 @@ func (r *mutationResolver) CancelCommit(ctx context.Context, commitUUID string) return true, nil } +// CreateAgentTasks is the resolver for the createAgentTasks field. func (r *mutationResolver) CreateAgentTasks(ctx context.Context, hostHclid string, command model.AgentCommand, buildUUID string, args []string, teams []int) ([]*ent.AgentTask, error) { uuid, err := uuid.Parse(buildUUID) @@ -702,7 +754,7 @@ func (r *mutationResolver) CreateAgentTasks(ctx context.Context, hostHclid strin if err != nil { return nil, fmt.Errorf("failed querying team: %v", err) } - entProvisionedHost, err := entTeam.QueryTeamToProvisionedNetwork().QueryProvisionedNetworkToProvisionedHost().Where(provisionedhost.HasProvisionedHostToHostWith(host.HclIDEQ(hostHclid))).All(ctx) + entProvisionedHost, err := entTeam.QueryTeamToProvisionedNetwork().QueryProvisionedNetworkToProvisionedHost().Where(provisionedhost.HasProvisionedHostToHostWith(host.HCLIDEQ(hostHclid))).All(ctx) if err != nil { return nil, fmt.Errorf("failed querying provisoned hosts: %v", err) } @@ -729,6 +781,7 @@ func (r *mutationResolver) CreateAgentTasks(ctx context.Context, hostHclid strin return agentTasksReturn, nil } +// CreateBatchAgentTasks is the resolver for the createBatchAgentTasks field. func (r *mutationResolver) CreateBatchAgentTasks(ctx context.Context, proHostUUIDs []string, command model.AgentCommand, args []string) ([]*ent.AgentTask, error) { agentTasksReturn := []*ent.AgentTask{} @@ -764,6 +817,7 @@ func (r *mutationResolver) CreateBatchAgentTasks(ctx context.Context, proHostUUI return agentTasksReturn, nil } +// CreateEnviromentFromRepo is the resolver for the createEnviromentFromRepo field. func (r *mutationResolver) CreateEnviromentFromRepo(ctx context.Context, repoURL string, branchName string, envFilePath string) ([]*ent.Environment, error) { currentUser, err := auth.ForContext(ctx) if err != nil { @@ -848,6 +902,7 @@ func (r *mutationResolver) CreateEnviromentFromRepo(ctx context.Context, repoURL return loadedEnviroments, nil } +// UpdateEnviromentViaPull is the resolver for the updateEnviromentViaPull field. func (r *mutationResolver) UpdateEnviromentViaPull(ctx context.Context, envUUID string) ([]*ent.Environment, error) { currentUser, err := auth.ForContext(ctx) if err != nil { @@ -904,6 +959,7 @@ func (r *mutationResolver) UpdateEnviromentViaPull(ctx context.Context, envUUID return r.LoadEnvironment(ctx, envPath) } +// CancelBuild is the resolver for the cancelBuild field. func (r *mutationResolver) CancelBuild(ctx context.Context, buildUUID string) (bool, error) { uuid, err := uuid.Parse(buildUUID) @@ -913,6 +969,7 @@ func (r *mutationResolver) CancelBuild(ctx context.Context, buildUUID string) (b return planner.CancelBuild(uuid), nil } +// ModifySelfPassword is the resolver for the modifySelfPassword field. func (r *mutationResolver) ModifySelfPassword(ctx context.Context, currentPassword string, newPassword string) (bool, error) { currentUser, err := auth.ForContext(ctx) if err != nil { @@ -934,6 +991,7 @@ func (r *mutationResolver) ModifySelfPassword(ctx context.Context, currentPasswo } } +// ModifySelfUserInfo is the resolver for the modifySelfUserInfo field. func (r *mutationResolver) ModifySelfUserInfo(ctx context.Context, firstName *string, lastName *string, email *string, phone *string, company *string, occupation *string) (*ent.AuthUser, error) { currentUser, err := auth.ForContext(ctx) if err != nil { @@ -990,6 +1048,7 @@ func (r *mutationResolver) ModifySelfUserInfo(ctx context.Context, firstName *st return currentUser, nil } +// CreateUser is the resolver for the createUser field. func (r *mutationResolver) CreateUser(ctx context.Context, username string, password string, role model.RoleLevel, provider model.ProviderType) (*ent.AuthUser, error) { sshFolderPath := fmt.Sprintf(utils.UserKeyPath, strings.ToLower(authuser.ProviderLOCAL.String()), username) @@ -1021,6 +1080,7 @@ func (r *mutationResolver) CreateUser(ctx context.Context, username string, pass return entAuthUser, nil } +// ModifyAdminUserInfo is the resolver for the modifyAdminUserInfo field. func (r *mutationResolver) ModifyAdminUserInfo(ctx context.Context, userID string, username *string, firstName *string, lastName *string, email *string, phone *string, company *string, occupation *string, role *model.RoleLevel, provider *model.ProviderType) (*ent.AuthUser, error) { uuid, err := uuid.Parse(userID) @@ -1104,6 +1164,7 @@ func (r *mutationResolver) ModifyAdminUserInfo(ctx context.Context, userID strin return entAuthUser, nil } +// ModifyAdminPassword is the resolver for the modifyAdminPassword field. func (r *mutationResolver) ModifyAdminPassword(ctx context.Context, userID string, newPassword string) (bool, error) { uuid, err := uuid.Parse(userID) @@ -1128,6 +1189,7 @@ func (r *mutationResolver) ModifyAdminPassword(ctx context.Context, userID strin return true, nil } +// NukeBackend is the resolver for the nukeBackend field. func (r *mutationResolver) NukeBackend(ctx context.Context) ([]*model.IntMap, error) { results := make([]*model.IntMap, 0) returnedResults, err := utils.ClearDB(ctx, r.client, r.laforgeConfig) @@ -1144,10 +1206,12 @@ func (r *mutationResolver) NukeBackend(ctx context.Context) ([]*model.IntMap, er return results, nil } +// ID is the resolver for the id field. func (r *networkResolver) ID(ctx context.Context, obj *ent.Network) (string, error) { return obj.ID.String(), nil } +// Vars is the resolver for the vars field. func (r *networkResolver) Vars(ctx context.Context, obj *ent.Network) ([]*model.VarsMap, error) { results := make([]*model.VarsMap, 0) for varKey, varValue := range obj.Vars { @@ -1160,6 +1224,7 @@ func (r *networkResolver) Vars(ctx context.Context, obj *ent.Network) ([]*model. return results, nil } +// Tags is the resolver for the tags field. func (r *networkResolver) Tags(ctx context.Context, obj *ent.Network) ([]*model.TagMap, error) { results := make([]*model.TagMap, 0) for tagKey, tagValue := range obj.Tags { @@ -1172,26 +1237,32 @@ func (r *networkResolver) Tags(ctx context.Context, obj *ent.Network) ([]*model. return results, nil } +// ID is the resolver for the id field. func (r *planResolver) ID(ctx context.Context, obj *ent.Plan) (string, error) { return obj.ID.String(), nil } +// Type is the resolver for the type field. func (r *planResolver) Type(ctx context.Context, obj *ent.Plan) (model.PlanType, error) { return model.PlanType(obj.Type), nil } +// ID is the resolver for the id field. func (r *planDiffResolver) ID(ctx context.Context, obj *ent.PlanDiff) (string, error) { return obj.ID.String(), nil } +// NewState is the resolver for the new_state field. func (r *planDiffResolver) NewState(ctx context.Context, obj *ent.PlanDiff) (model.ProvisionStatus, error) { return model.ProvisionStatus(obj.NewState), nil } +// ID is the resolver for the id field. func (r *provisionedHostResolver) ID(ctx context.Context, obj *ent.ProvisionedHost) (string, error) { return obj.ID.String(), nil } +// ProvisionedHostToAgentStatus is the resolver for the ProvisionedHostToAgentStatus field. func (r *provisionedHostResolver) ProvisionedHostToAgentStatus(ctx context.Context, obj *ent.ProvisionedHost) (*ent.AgentStatus, error) { check, err := obj.QueryProvisionedHostToAgentStatus().Exist(ctx) @@ -1212,18 +1283,22 @@ func (r *provisionedHostResolver) ProvisionedHostToAgentStatus(ctx context.Conte return nil, nil } +// ID is the resolver for the id field. func (r *provisionedNetworkResolver) ID(ctx context.Context, obj *ent.ProvisionedNetwork) (string, error) { return obj.ID.String(), nil } +// ID is the resolver for the id field. func (r *provisioningStepResolver) ID(ctx context.Context, obj *ent.ProvisioningStep) (string, error) { return obj.ID.String(), nil } +// Type is the resolver for the type field. func (r *provisioningStepResolver) Type(ctx context.Context, obj *ent.ProvisioningStep) (model.ProvisioningStepType, error) { return model.ProvisioningStepType(obj.Type), nil } +// Environments is the resolver for the environments field. func (r *queryResolver) Environments(ctx context.Context) ([]*ent.Environment, error) { e, err := r.client.Environment.Query().Order(ent.Asc(environment.FieldID)).All(ctx) @@ -1234,6 +1309,7 @@ func (r *queryResolver) Environments(ctx context.Context) ([]*ent.Environment, e return e, nil } +// Environment is the resolver for the environment field. func (r *queryResolver) Environment(ctx context.Context, envUUID string) (*ent.Environment, error) { uuid, err := uuid.Parse(envUUID) @@ -1250,6 +1326,7 @@ func (r *queryResolver) Environment(ctx context.Context, envUUID string) (*ent.E return e, nil } +// ProvisionedHost is the resolver for the provisionedHost field. func (r *queryResolver) ProvisionedHost(ctx context.Context, proHostUUID string) (*ent.ProvisionedHost, error) { uuid, err := uuid.Parse(proHostUUID) @@ -1266,6 +1343,7 @@ func (r *queryResolver) ProvisionedHost(ctx context.Context, proHostUUID string) return ph, nil } +// ProvisionedNetwork is the resolver for the provisionedNetwork field. func (r *queryResolver) ProvisionedNetwork(ctx context.Context, proNetUUID string) (*ent.ProvisionedNetwork, error) { uuid, err := uuid.Parse(proNetUUID) @@ -1282,6 +1360,7 @@ func (r *queryResolver) ProvisionedNetwork(ctx context.Context, proNetUUID strin return pn, nil } +// ProvisionedStep is the resolver for the provisionedStep field. func (r *queryResolver) ProvisionedStep(ctx context.Context, proStepUUID string) (*ent.ProvisioningStep, error) { uuid, err := uuid.Parse(proStepUUID) @@ -1298,6 +1377,7 @@ func (r *queryResolver) ProvisionedStep(ctx context.Context, proStepUUID string) return ps, nil } +// Plan is the resolver for the plan field. func (r *queryResolver) Plan(ctx context.Context, planUUID string) (*ent.Plan, error) { uuid, err := uuid.Parse(planUUID) @@ -1314,6 +1394,7 @@ func (r *queryResolver) Plan(ctx context.Context, planUUID string) (*ent.Plan, e return plan, nil } +// GetBuilds is the resolver for the getBuilds field. func (r *queryResolver) GetBuilds(ctx context.Context) ([]*ent.Build, error) { builds, err := r.client.Environment.Query().Order(ent.Asc(environment.FieldID)).QueryEnvironmentToBuild().All(ctx) @@ -1324,6 +1405,7 @@ func (r *queryResolver) GetBuilds(ctx context.Context) ([]*ent.Build, error) { return builds, nil } +// Build is the resolver for the build field. func (r *queryResolver) Build(ctx context.Context, buildUUID string) (*ent.Build, error) { uuid, err := uuid.Parse(buildUUID) @@ -1340,6 +1422,7 @@ func (r *queryResolver) Build(ctx context.Context, buildUUID string) (*ent.Build return build, nil } +// GetBuildCommits is the resolver for the getBuildCommits field. func (r *queryResolver) GetBuildCommits(ctx context.Context, envUUID string) ([]*ent.BuildCommit, error) { uuid, err := uuid.Parse(envUUID) @@ -1356,6 +1439,7 @@ func (r *queryResolver) GetBuildCommits(ctx context.Context, envUUID string) ([] return buildCommits, nil } +// GetBuildCommit is the resolver for the getBuildCommit field. func (r *queryResolver) GetBuildCommit(ctx context.Context, buildCommitUUID string) (*ent.BuildCommit, error) { uuid, err := uuid.Parse(buildCommitUUID) if err != nil { @@ -1370,6 +1454,7 @@ func (r *queryResolver) GetBuildCommit(ctx context.Context, buildCommitUUID stri return buildCommit, nil } +// Status is the resolver for the status field. func (r *queryResolver) Status(ctx context.Context, statusUUID string) (*ent.Status, error) { uuid, err := uuid.Parse(statusUUID) @@ -1386,6 +1471,7 @@ func (r *queryResolver) Status(ctx context.Context, statusUUID string) (*ent.Sta return status, nil } +// AgentStatus is the resolver for the agentStatus field. func (r *queryResolver) AgentStatus(ctx context.Context, clientID string) (*ent.AgentStatus, error) { uuid, err := uuid.Parse(clientID) @@ -1405,6 +1491,7 @@ func (r *queryResolver) AgentStatus(ctx context.Context, clientID string) (*ent. return status, nil } +// GetServerTasks is the resolver for the getServerTasks field. func (r *queryResolver) GetServerTasks(ctx context.Context) ([]*ent.ServerTask, error) { serverTasks, err := r.client.ServerTask.Query().All(ctx) if err != nil { @@ -1413,18 +1500,22 @@ func (r *queryResolver) GetServerTasks(ctx context.Context) ([]*ent.ServerTask, return serverTasks, nil } +// CurrentUser is the resolver for the currentUser field. func (r *queryResolver) CurrentUser(ctx context.Context) (*ent.AuthUser, error) { return auth.ForContext(ctx) } +// GetUserList is the resolver for the getUserList field. func (r *queryResolver) GetUserList(ctx context.Context) ([]*ent.AuthUser, error) { return r.client.AuthUser.Query().All(ctx) } +// GetCurrentUserTasks is the resolver for the getCurrentUserTasks field. func (r *queryResolver) GetCurrentUserTasks(ctx context.Context) ([]*ent.ServerTask, error) { return r.client.AuthUser.Query().QueryAuthUserToServerTasks().All(ctx) } +// GetAgentTasks is the resolver for the getAgentTasks field. func (r *queryResolver) GetAgentTasks(ctx context.Context, proStepUUID string) ([]*ent.AgentTask, error) { uuid, err := uuid.Parse(proStepUUID) if err != nil { @@ -1444,6 +1535,7 @@ func (r *queryResolver) GetAgentTasks(ctx context.Context, proStepUUID string) ( return agentTasks, err } +// ListAgentStatuses is the resolver for the listAgentStatuses field. func (r *queryResolver) ListAgentStatuses(ctx context.Context, buildUUID string) ([]*ent.AgentStatus, error) { uuid, err := uuid.Parse(buildUUID) if err != nil { @@ -1458,6 +1550,7 @@ func (r *queryResolver) ListAgentStatuses(ctx context.Context, buildUUID string) return agentStatuses, nil } +// ListBuildStatuses is the resolver for the listBuildStatuses field. func (r *queryResolver) ListBuildStatuses(ctx context.Context, buildUUID string) ([]*ent.Status, error) { uuid, err := uuid.Parse(buildUUID) if err != nil { @@ -1505,6 +1598,7 @@ func (r *queryResolver) ListBuildStatuses(ctx context.Context, buildUUID string) return statuses, nil } +// GetAllAgentStatus is the resolver for the getAllAgentStatus field. func (r *queryResolver) GetAllAgentStatus(ctx context.Context, buildUUID string, count int, offset int) (*model.AgentStatusBatch, error) { uuid, err := uuid.Parse(buildUUID) if err != nil { @@ -1530,6 +1624,7 @@ func (r *queryResolver) GetAllAgentStatus(ctx context.Context, buildUUID string, }, nil } +// GetAllPlanStatus is the resolver for the getAllPlanStatus field. func (r *queryResolver) GetAllPlanStatus(ctx context.Context, buildUUID string, count int, offset int) (*model.StatusBatch, error) { uuid, err := uuid.Parse(buildUUID) if err != nil { @@ -1555,6 +1650,7 @@ func (r *queryResolver) GetAllPlanStatus(ctx context.Context, buildUUID string, }, nil } +// GetPlanStatusCounts is the resolver for the getPlanStatusCounts field. func (r *queryResolver) GetPlanStatusCounts(ctx context.Context, buildUUID string) (*model.PlanCounts, error) { uuid, err := uuid.Parse(buildUUID) if err != nil { @@ -1631,6 +1727,7 @@ func (r *queryResolver) GetPlanStatusCounts(ctx context.Context, buildUUID strin }, nil } +// ViewServerTaskLogs is the resolver for the viewServerTaskLogs field. func (r *queryResolver) ViewServerTaskLogs(ctx context.Context, taskID string) (string, error) { uuid, err := uuid.Parse(taskID) @@ -1652,6 +1749,7 @@ func (r *queryResolver) ViewServerTaskLogs(ctx context.Context, taskID string) ( return fileString, nil } +// ViewAgentTask is the resolver for the viewAgentTask field. func (r *queryResolver) ViewAgentTask(ctx context.Context, taskID string) (*ent.AgentTask, error) { uuid, err := uuid.Parse(taskID) @@ -1662,6 +1760,7 @@ func (r *queryResolver) ViewAgentTask(ctx context.Context, taskID string) (*ent. return r.client.AgentTask.Get(ctx, uuid) } +// ServerTasks is the resolver for the serverTasks field. func (r *queryResolver) ServerTasks(ctx context.Context, taskUUIDs []*string) ([]*ent.ServerTask, error) { uuids := make([]uuid.UUID, 0) for _, taskUUID := range taskUUIDs { @@ -1680,30 +1779,37 @@ func (r *queryResolver) ServerTasks(ctx context.Context, taskUUIDs []*string) ([ return serverTasks, nil } +// ID is the resolver for the id field. func (r *repoCommitResolver) ID(ctx context.Context, obj *ent.RepoCommit) (string, error) { return obj.ID.String(), nil } +// Author is the resolver for the author field. func (r *repoCommitResolver) Author(ctx context.Context, obj *ent.RepoCommit) (string, error) { return obj.Author.String(), nil } +// Committer is the resolver for the committer field. func (r *repoCommitResolver) Committer(ctx context.Context, obj *ent.RepoCommit) (string, error) { return obj.Committer.String(), nil } +// ID is the resolver for the id field. func (r *repositoryResolver) ID(ctx context.Context, obj *ent.Repository) (string, error) { return obj.ID.String(), nil } +// EnvironmentFilepath is the resolver for the environment_filepath field. func (r *repositoryResolver) EnvironmentFilepath(ctx context.Context, obj *ent.Repository) (string, error) { return obj.EnviromentFilepath, nil } +// ID is the resolver for the id field. func (r *scriptResolver) ID(ctx context.Context, obj *ent.Script) (string, error) { return obj.ID.String(), nil } +// Vars is the resolver for the vars field. func (r *scriptResolver) Vars(ctx context.Context, obj *ent.Script) ([]*model.VarsMap, error) { results := make([]*model.VarsMap, 0) for varKey, varValue := range obj.Vars { @@ -1716,6 +1822,7 @@ func (r *scriptResolver) Vars(ctx context.Context, obj *ent.Script) ([]*model.Va return results, nil } +// Tags is the resolver for the tags field. func (r *scriptResolver) Tags(ctx context.Context, obj *ent.Script) ([]*model.TagMap, error) { results := make([]*model.TagMap, 0) for tagKey, tagValue := range obj.Tags { @@ -1728,34 +1835,42 @@ func (r *scriptResolver) Tags(ctx context.Context, obj *ent.Script) ([]*model.Ta return results, nil } +// ID is the resolver for the id field. func (r *serverTaskResolver) ID(ctx context.Context, obj *ent.ServerTask) (string, error) { return obj.ID.String(), nil } +// Type is the resolver for the type field. func (r *serverTaskResolver) Type(ctx context.Context, obj *ent.ServerTask) (model.ServerTaskType, error) { return model.ServerTaskType(obj.Type), nil } +// ID is the resolver for the id field. func (r *statusResolver) ID(ctx context.Context, obj *ent.Status) (string, error) { return obj.ID.String(), nil } +// State is the resolver for the state field. func (r *statusResolver) State(ctx context.Context, obj *ent.Status) (model.ProvisionStatus, error) { return model.ProvisionStatus(obj.State), nil } +// StatusFor is the resolver for the status_for field. func (r *statusResolver) StatusFor(ctx context.Context, obj *ent.Status) (model.ProvisionStatusFor, error) { return model.ProvisionStatusFor(obj.StatusFor), nil } +// StartedAt is the resolver for the started_at field. func (r *statusResolver) StartedAt(ctx context.Context, obj *ent.Status) (string, error) { return obj.StartedAt.String(), nil } +// EndedAt is the resolver for the ended_at field. func (r *statusResolver) EndedAt(ctx context.Context, obj *ent.Status) (string, error) { return obj.EndedAt.String(), nil } +// UpdatedAgentStatus is the resolver for the updatedAgentStatus field. func (r *subscriptionResolver) UpdatedAgentStatus(ctx context.Context) (<-chan *ent.AgentStatus, error) { newAgentStatus := make(chan *ent.AgentStatus, 1) go func() { @@ -1789,6 +1904,7 @@ func (r *subscriptionResolver) UpdatedAgentStatus(ctx context.Context) (<-chan * return newAgentStatus, nil } +// UpdatedStatus is the resolver for the updatedStatus field. func (r *subscriptionResolver) UpdatedStatus(ctx context.Context) (<-chan *ent.Status, error) { newStatus := make(chan *ent.Status, 1) go func() { @@ -1822,6 +1938,7 @@ func (r *subscriptionResolver) UpdatedStatus(ctx context.Context) (<-chan *ent.S return newStatus, nil } +// UpdatedServerTask is the resolver for the updatedServerTask field. func (r *subscriptionResolver) UpdatedServerTask(ctx context.Context) (<-chan *ent.ServerTask, error) { newServerTask := make(chan *ent.ServerTask, 1) go func() { @@ -1855,6 +1972,7 @@ func (r *subscriptionResolver) UpdatedServerTask(ctx context.Context) (<-chan *e return newServerTask, nil } +// UpdatedBuild is the resolver for the updatedBuild field. func (r *subscriptionResolver) UpdatedBuild(ctx context.Context) (<-chan *ent.Build, error) { newBuild := make(chan *ent.Build, 1) go func() { @@ -1888,6 +2006,7 @@ func (r *subscriptionResolver) UpdatedBuild(ctx context.Context) (<-chan *ent.Bu return newBuild, nil } +// UpdatedCommit is the resolver for the updatedCommit field. func (r *subscriptionResolver) UpdatedCommit(ctx context.Context) (<-chan *ent.BuildCommit, error) { newBuildCommit := make(chan *ent.BuildCommit, 1) go func() { @@ -1921,6 +2040,7 @@ func (r *subscriptionResolver) UpdatedCommit(ctx context.Context) (<-chan *ent.B return newBuildCommit, nil } +// UpdatedAgentTask is the resolver for the updatedAgentTask field. func (r *subscriptionResolver) UpdatedAgentTask(ctx context.Context) (<-chan *ent.AgentTask, error) { newAgentTask := make(chan *ent.AgentTask, 1) go func() { @@ -1954,6 +2074,7 @@ func (r *subscriptionResolver) UpdatedAgentTask(ctx context.Context) (<-chan *en return newAgentTask, nil } +// StreamServerTaskLog is the resolver for the streamServerTaskLog field. func (r *subscriptionResolver) StreamServerTaskLog(ctx context.Context, taskID string) (<-chan string, error) { logStream := make(chan string, 1) go func(taskID string, logStream chan<- string) { @@ -2001,10 +2122,12 @@ func (r *subscriptionResolver) StreamServerTaskLog(ctx context.Context, taskID s return logStream, nil } +// ID is the resolver for the id field. func (r *teamResolver) ID(ctx context.Context, obj *ent.Team) (string, error) { return obj.ID.String(), nil } +// ID is the resolver for the id field. func (r *userResolver) ID(ctx context.Context, obj *ent.User) (string, error) { return obj.ID.String(), nil } diff --git a/loader/parser.go b/loader/parser.go index 9557b1fe..a38417c1 100755 --- a/loader/parser.go +++ b/loader/parser.go @@ -93,6 +93,7 @@ type Loader struct { // FileGlobResolver is a modified FileResolver in the HCLv2 include extension that accounts for globbed // includes: +// // include { // path = "./foo/*.laforge" // } @@ -263,9 +264,9 @@ func (l *Loader) merger(filenames []string) (*DefinedConfigs, error) { for _, filename := range filenames { element := l.ConfigMap[filename] for _, x := range element.DefinedCompetitions { - obj, found := combinedConfigs.Competitions[x.HclID] + obj, found := combinedConfigs.Competitions[x.HCLID] if !found { - combinedConfigs.Competitions[x.HclID] = x + combinedConfigs.Competitions[x.HCLID] = x continue } if x.RootPassword != "" { @@ -280,19 +281,19 @@ func (l *Loader) merger(filenames []string) (*DefinedConfigs, error) { if x.HCLCompetitionToDNS != nil { obj.HCLCompetitionToDNS = x.HCLCompetitionToDNS } - combinedConfigs.Competitions[x.HclID] = obj + combinedConfigs.Competitions[x.HCLID] = obj } for _, x := range element.DefinedHosts { - _, found := combinedConfigs.Hosts[x.HclID] + _, found := combinedConfigs.Hosts[x.HCLID] if !found { - combinedConfigs.Hosts[x.HclID] = x + combinedConfigs.Hosts[x.HCLID] = x continue } } for _, x := range element.DefinedNetworks { - _, found := combinedConfigs.Networks[x.HclID] + _, found := combinedConfigs.Networks[x.HCLID] if !found { - combinedConfigs.Networks[x.HclID] = x + combinedConfigs.Networks[x.HCLID] = x continue } } @@ -302,53 +303,53 @@ func (l *Loader) merger(filenames []string) (*DefinedConfigs, error) { absPath := path.Join(dir, x.Source) x.AbsPath = absPath } - _, found := combinedConfigs.Scripts[x.HclID] + _, found := combinedConfigs.Scripts[x.HCLID] if !found { - combinedConfigs.Scripts[x.HclID] = x + combinedConfigs.Scripts[x.HCLID] = x continue } } for _, x := range element.DefinedCommands { - _, found := combinedConfigs.Commands[x.HclID] + _, found := combinedConfigs.Commands[x.HCLID] if !found { - combinedConfigs.Commands[x.HclID] = x + combinedConfigs.Commands[x.HCLID] = x continue } } for _, x := range element.DefinedDNSRecords { - _, found := combinedConfigs.DNSRecords[x.HclID] + _, found := combinedConfigs.DNSRecords[x.HCLID] if !found { - combinedConfigs.DNSRecords[x.HclID] = x + combinedConfigs.DNSRecords[x.HCLID] = x continue } } for _, x := range element.DefinedEnvironments { - _, found := combinedConfigs.Environments[x.HclID] + _, found := combinedConfigs.Environments[x.HCLID] if !found { - combinedConfigs.Environments[x.HclID] = x + combinedConfigs.Environments[x.HCLID] = x continue } } for _, x := range element.DefinedFileDownload { - _, found := combinedConfigs.FileDownload[x.HclID] + _, found := combinedConfigs.FileDownload[x.HCLID] dir := path.Dir(element.Filename) absPath := path.Join(dir, x.Source) x.AbsPath = absPath if !found { - combinedConfigs.FileDownload[x.HclID] = x + combinedConfigs.FileDownload[x.HCLID] = x continue } } for _, x := range element.DefinedFileDelete { - element.FileDelete[x.HclID] = x + element.FileDelete[x.HCLID] = x } for _, x := range element.DefinedFileExtract { - element.FileExtract[x.HclID] = x + element.FileExtract[x.HCLID] = x } for _, x := range element.DefinedIdentities { - _, found := combinedConfigs.Identities[x.HclID] + _, found := combinedConfigs.Identities[x.HCLID] if !found { - combinedConfigs.Identities[x.HclID] = x + combinedConfigs.Identities[x.HCLID] = x continue } } @@ -356,9 +357,9 @@ func (l *Loader) merger(filenames []string) (*DefinedConfigs, error) { dir := path.Dir(element.Filename) absPath := path.Join(dir, x.Source) x.AbsPath = absPath - _, found := combinedConfigs.Ansible[x.HclID] + _, found := combinedConfigs.Ansible[x.HCLID] if !found { - combinedConfigs.Ansible[x.HclID] = x + combinedConfigs.Ansible[x.HCLID] = x continue } } @@ -414,95 +415,95 @@ func createEnviroments(ctx context.Context, client *ent.Client, log *logging.Log } returnedEnvironment := []*ent.Environment{} for _, cEnviroment := range configEnvs { - log.Log.Debugf("Creating ENV: %v", cEnviroment.HclID) + log.Log.Debugf("Creating ENV: %v", cEnviroment.HCLID) environmentHosts := []string{} for _, cIncludedNetwork := range cEnviroment.HCLEnvironmentToIncludedNetwork { environmentHosts = append(environmentHosts, cIncludedNetwork.Hosts...) } - returnedCompetitions, returnedDNS, err := createCompetitions(txClient, ctx, log, loadedConfig.Competitions, cEnviroment.HclID) + returnedCompetitions, returnedDNS, err := createCompetitions(txClient, ctx, log, loadedConfig.Competitions, cEnviroment.HCLID) if err != nil { err = rollback(txClient, err) - log.Log.Errorf("Error loading in competition into env: %v, Err: %v", cEnviroment.HclID, err) + log.Log.Errorf("Error loading in competition into env: %v, Err: %v", cEnviroment.HCLID, err) return nil, err } - returnedScripts, returnedFindings, err := createScripts(txClient, ctx, log, loadedConfig.Scripts, cEnviroment.HclID) + returnedScripts, returnedFindings, err := createScripts(txClient, ctx, log, loadedConfig.Scripts, cEnviroment.HCLID) if err != nil { err = rollback(txClient, err) - log.Log.Errorf("Error loading in findings into env: %v, Err: %v", cEnviroment.HclID, err) + log.Log.Errorf("Error loading in findings into env: %v, Err: %v", cEnviroment.HCLID, err) return nil, err } - returnedCommands, err := createCommands(txClient, ctx, log, loadedConfig.Commands, cEnviroment.HclID) + returnedCommands, err := createCommands(txClient, ctx, log, loadedConfig.Commands, cEnviroment.HCLID) if err != nil { err = rollback(txClient, err) - log.Log.Errorf("Error loading in commands into env: %v, Err: %v", cEnviroment.HclID, err) + log.Log.Errorf("Error loading in commands into env: %v, Err: %v", cEnviroment.HCLID, err) return nil, err } - returnedDNSRecords, err := createDNSRecords(txClient, ctx, log, loadedConfig.DNSRecords, cEnviroment.HclID) + returnedDNSRecords, err := createDNSRecords(txClient, ctx, log, loadedConfig.DNSRecords, cEnviroment.HCLID) if err != nil { err = rollback(txClient, err) - log.Log.Errorf("Error loading in dns_records into env: %v, Err: %v", cEnviroment.HclID, err) + log.Log.Errorf("Error loading in dns_records into env: %v, Err: %v", cEnviroment.HCLID, err) return nil, err } - returnedFileDownloads, err := createFileDownload(txClient, ctx, log, loadedConfig.FileDownload, cEnviroment.HclID) + returnedFileDownloads, err := createFileDownload(txClient, ctx, log, loadedConfig.FileDownload, cEnviroment.HCLID) if err != nil { err = rollback(txClient, err) - log.Log.Errorf("Error loading in file_downloads into env: %v, Err: %v", cEnviroment.HclID, err) + log.Log.Errorf("Error loading in file_downloads into env: %v, Err: %v", cEnviroment.HCLID, err) return nil, err } - returnedFileDeletes, err := createFileDelete(txClient, ctx, log, loadedConfig.FileDelete, cEnviroment.HclID) + returnedFileDeletes, err := createFileDelete(txClient, ctx, log, loadedConfig.FileDelete, cEnviroment.HCLID) if err != nil { err = rollback(txClient, err) - log.Log.Errorf("Error loading in file_delets into env: %v, Err: %v", cEnviroment.HclID, err) + log.Log.Errorf("Error loading in file_delets into env: %v, Err: %v", cEnviroment.HCLID, err) return nil, err } - returnedFileExtracts, err := createFileExtract(txClient, ctx, log, loadedConfig.FileExtract, cEnviroment.HclID) + returnedFileExtracts, err := createFileExtract(txClient, ctx, log, loadedConfig.FileExtract, cEnviroment.HCLID) if err != nil { err = rollback(txClient, err) - log.Log.Errorf("Error loading in file_extracts into env: %v, Err: %v", cEnviroment.HclID, err) + log.Log.Errorf("Error loading in file_extracts into env: %v, Err: %v", cEnviroment.HCLID, err) return nil, err } - returnedIdentities, err := createIdentities(txClient, ctx, log, loadedConfig.Identities, cEnviroment.HclID) + returnedIdentities, err := createIdentities(txClient, ctx, log, loadedConfig.Identities, cEnviroment.HCLID) if err != nil { err = rollback(txClient, err) - log.Log.Errorf("Error loading in identities into env: %v, Err: %v", cEnviroment.HclID, err) + log.Log.Errorf("Error loading in identities into env: %v, Err: %v", cEnviroment.HCLID, err) return nil, err } - returnedNetworks, err := createNetworks(txClient, ctx, log, loadedConfig.Networks, cEnviroment.HCLEnvironmentToIncludedNetwork, cEnviroment.HclID) + returnedNetworks, err := createNetworks(txClient, ctx, log, loadedConfig.Networks, cEnviroment.HCLEnvironmentToIncludedNetwork, cEnviroment.HCLID) if err != nil { err = rollback(txClient, err) - log.Log.Errorf("Error loading in competition into env: %v, Err: %v", cEnviroment.HclID, err) + log.Log.Errorf("Error loading in competition into env: %v, Err: %v", cEnviroment.HCLID, err) return nil, err } returnedNetworkIDs := getNetworkIDs(returnedNetworks) - returnedAnsible, err := createAnsible(txClient, ctx, log, loadedConfig.Ansible, cEnviroment.HclID) + returnedAnsible, err := createAnsible(txClient, ctx, log, loadedConfig.Ansible, cEnviroment.HCLID) if err != nil { err = rollback(txClient, err) - log.Log.Errorf("Error loading in Ansible into env: %v, Err: %v", cEnviroment.HclID, err) + log.Log.Errorf("Error loading in Ansible into env: %v, Err: %v", cEnviroment.HCLID, err) return nil, err } // returnedHostDependencies is empty if ran once but ok when ran multiple times - returnedHosts, returnedHostDependencies, err := createHosts(txClient, ctx, log, loadedConfig.Hosts, cEnviroment.HclID, environmentHosts) + returnedHosts, returnedHostDependencies, err := createHosts(txClient, ctx, log, loadedConfig.Hosts, cEnviroment.HCLID, environmentHosts) if err != nil { err = rollback(txClient, err) - log.Log.Errorf("Error loading in Hosts into env: %v, Err: %v", cEnviroment.HclID, err) + log.Log.Errorf("Error loading in Hosts into env: %v, Err: %v", cEnviroment.HCLID, err) return nil, err } returnedHostIDs := getHostIDs(returnedHosts) - returnedIncludedNetworks, err := createIncludedNetwork(txClient, ctx, log, cEnviroment.HCLEnvironmentToIncludedNetwork, cEnviroment.HclID, returnedHostIDs, returnedNetworkIDs) + returnedIncludedNetworks, err := createIncludedNetwork(txClient, ctx, log, cEnviroment.HCLEnvironmentToIncludedNetwork, cEnviroment.HCLID, returnedHostIDs, returnedNetworkIDs) if err != nil { err = rollback(txClient, err) - log.Log.Errorf("Error loading in included_networks into env: %v, Err: %v", cEnviroment.HclID, err) + log.Log.Errorf("Error loading in included_networks into env: %v, Err: %v", cEnviroment.HCLID, err) return nil, err } entEnvironment, err := txClient.Environment. Query(). - Where(environment.HclIDEQ(cEnviroment.HclID)). + Where(environment.HCLIDEQ(cEnviroment.HCLID)). Only(ctx) if err != nil { if err == err.(*ent.NotFoundError) { newEnvironment, err := txClient.Environment.Create(). - SetHclID(cEnviroment.HclID). + SetHCLID(cEnviroment.HCLID). SetAdminCidrs(cEnviroment.AdminCidrs). SetBuilder(cEnviroment.Builder). SetCompetitionID(cEnviroment.CompetitionID). @@ -531,13 +532,13 @@ func createEnviroments(ctx context.Context, client *ent.Client, log *logging.Log Save(ctx) if err != nil { err = rollback(txClient, err) - log.Log.Errorf("Failed to Create Environment %v. Err: %v", cEnviroment.HclID, err) + log.Log.Errorf("Failed to Create Environment %v. Err: %v", cEnviroment.HCLID, err) return nil, err } - _, err = validateHostDependencies(txClient, ctx, log, returnedHostDependencies, cEnviroment.HclID) + _, err = validateHostDependencies(txClient, ctx, log, returnedHostDependencies, cEnviroment.HCLID) if err != nil { err = rollback(txClient, err) - log.Log.Errorf("Failed to Validate Host Dependencies in Environment %v. Err: %v", cEnviroment.HclID, err) + log.Log.Errorf("Failed to Validate Host Dependencies in Environment %v. Err: %v", cEnviroment.HCLID, err) return nil, err } returnedEnvironment = append(returnedEnvironment, newEnvironment) @@ -545,7 +546,7 @@ func createEnviroments(ctx context.Context, client *ent.Client, log *logging.Log } } entEnvironment, err = entEnvironment.Update(). - SetHclID(cEnviroment.HclID). + SetHCLID(cEnviroment.HCLID). SetAdminCidrs(cEnviroment.AdminCidrs). SetBuilder(cEnviroment.Builder). SetCompetitionID(cEnviroment.CompetitionID). @@ -574,7 +575,7 @@ func createEnviroments(ctx context.Context, client *ent.Client, log *logging.Log Save(ctx) if err != nil { err = rollback(txClient, err) - log.Log.Errorf("Failed to Update Environment %v. Err: %v", cEnviroment.HclID, err) + log.Log.Errorf("Failed to Update Environment %v. Err: %v", cEnviroment.HCLID, err) return nil, err } entEnvironment, err = entEnvironment.Update(). @@ -596,13 +597,13 @@ func createEnviroments(ctx context.Context, client *ent.Client, log *logging.Log Save(ctx) if err != nil { err = rollback(txClient, err) - log.Log.Errorf("Failed to Update Environment %v with it's edges. Err: %v", cEnviroment.HclID, err) + log.Log.Errorf("Failed to Update Environment %v with it's edges. Err: %v", cEnviroment.HCLID, err) return nil, err } - _, err = validateHostDependencies(txClient, ctx, log, returnedHostDependencies, cEnviroment.HclID) + _, err = validateHostDependencies(txClient, ctx, log, returnedHostDependencies, cEnviroment.HCLID) if err != nil { err = rollback(txClient, err) - log.Log.Errorf("Failed to Validate Host Dependencies in Environment %v. Err: %v", cEnviroment.HclID, err) + log.Log.Errorf("Failed to Validate Host Dependencies in Environment %v. Err: %v", cEnviroment.HCLID, err) return nil, err } returnedEnvironment = append(returnedEnvironment, entEnvironment) @@ -617,13 +618,13 @@ func createEnviroments(ctx context.Context, client *ent.Client, log *logging.Log return returnedEnvironment, nil } -func createCompetitions(txClient *ent.Tx, ctx context.Context, log *logging.Logger, configCompetitions map[string]*ent.Competition, envHclID string) ([]*ent.Competition, []*ent.DNS, error) { +func createCompetitions(txClient *ent.Tx, ctx context.Context, log *logging.Logger, configCompetitions map[string]*ent.Competition, envHCLID string) ([]*ent.Competition, []*ent.DNS, error) { bulk := []*ent.CompetitionCreate{} returnedCompetitions := []*ent.Competition{} returnedAllDNS := []*ent.DNS{} for _, cCompetition := range configCompetitions { - log.Log.Debugf("Creating Competition: %v for Env: %v", cCompetition.HclID, envHclID) - returnedDNS, err := createDNS(txClient, ctx, log, cCompetition.HCLCompetitionToDNS, envHclID) + log.Log.Debugf("Creating Competition: %v for Env: %v", cCompetition.HCLID, envHCLID) + returnedDNS, err := createDNS(txClient, ctx, log, cCompetition.HCLCompetitionToDNS, envHCLID) if err != nil { return nil, nil, err } @@ -631,8 +632,8 @@ func createCompetitions(txClient *ent.Tx, ctx context.Context, log *logging.Logg Query(). Where( competition.And( - competition.HclIDEQ(cCompetition.HclID), - competition.HasCompetitionToEnvironmentWith(environment.HclIDEQ(envHclID)), + competition.HCLIDEQ(cCompetition.HCLID), + competition.HasCompetitionToEnvironmentWith(environment.HCLIDEQ(envHCLID)), ), ). Only(ctx) @@ -640,7 +641,7 @@ func createCompetitions(txClient *ent.Tx, ctx context.Context, log *logging.Logg if err == err.(*ent.NotFoundError) { createdQuery := txClient.Competition.Create(). SetConfig(cCompetition.Config). - SetHclID(cCompetition.HclID). + SetHCLID(cCompetition.HCLID). SetRootPassword(cCompetition.RootPassword). SetTags(cCompetition.Tags). AddCompetitionToDNS(returnedDNS...) @@ -650,18 +651,18 @@ func createCompetitions(txClient *ent.Tx, ctx context.Context, log *logging.Logg } entCompetition, err = entCompetition.Update(). SetConfig(cCompetition.Config). - SetHclID(cCompetition.HclID). + SetHCLID(cCompetition.HCLID). SetRootPassword(cCompetition.RootPassword). SetTags(cCompetition.Tags). ClearCompetitionToDNS(). Save(ctx) if err != nil { - log.Log.Errorf("Failed to Update Competition %v. Err: %v", cCompetition.HclID, err) + log.Log.Errorf("Failed to Update Competition %v. Err: %v", cCompetition.HCLID, err) return nil, nil, err } _, err = entCompetition.Update().AddCompetitionToDNS(returnedDNS...).Save(ctx) if err != nil { - log.Log.Errorf("Failed to Update Competition %v with DNS. Err: %v", cCompetition.HclID, err) + log.Log.Errorf("Failed to Update Competition %v with DNS. Err: %v", cCompetition.HCLID, err) return nil, nil, err } returnedAllDNS = append(returnedAllDNS, returnedDNS...) @@ -694,18 +695,18 @@ func removeDuplicateValues(stringSlice []string) []string { return list } -func createHosts(txClient *ent.Tx, ctx context.Context, log *logging.Logger, configHosts map[string]*ent.Host, envHclID string, environmentHosts []string) ([]*ent.Host, []*ent.HostDependency, error) { +func createHosts(txClient *ent.Tx, ctx context.Context, log *logging.Logger, configHosts map[string]*ent.Host, envHCLID string, environmentHosts []string) ([]*ent.Host, []*ent.HostDependency, error) { returnedHosts := []*ent.Host{} returnedAllHostDependencies := []*ent.HostDependency{} environmentHosts = removeDuplicateValues(environmentHosts) for _, cHostID := range environmentHosts { - log.Log.Debugf("Creating Host: %v for Env: %v", cHostID, envHclID) + log.Log.Debugf("Creating Host: %v for Env: %v", cHostID, envHCLID) cHost, ok := configHosts[cHostID] if !ok { - log.Log.Errorf("Host %v was not defined in the Enviroment %v", cHostID, envHclID) - return nil, nil, fmt.Errorf("err: Host %v was not defined in the Enviroment %v", cHostID, envHclID) + log.Log.Errorf("Host %v was not defined in the Enviroment %v", cHostID, envHCLID) + return nil, nil, fmt.Errorf("err: Host %v was not defined in the Enviroment %v", cHostID, envHCLID) } - returnedDisk, err := createDisk(txClient, ctx, log, cHost.HCLHostToDisk, cHost.HclID, envHclID) + returnedDisk, err := createDisk(txClient, ctx, log, cHost.HCLHostToDisk, cHost.HCLID, envHCLID) if err != nil { return nil, nil, err } @@ -713,8 +714,8 @@ func createHosts(txClient *ent.Tx, ctx context.Context, log *logging.Logger, con Query(). Where( host.And( - host.HclIDEQ(cHost.HclID), - host.HasHostToEnvironmentWith(environment.HclIDEQ(envHclID)), + host.HCLIDEQ(cHost.HCLID), + host.HasHostToEnvironmentWith(environment.HCLIDEQ(envHCLID)), ), ). Only(ctx) @@ -725,7 +726,7 @@ func createHosts(txClient *ent.Tx, ctx context.Context, log *logging.Logger, con SetDescription(cHost.Description). SetExposedTCPPorts(cHost.ExposedTCPPorts). SetExposedUDPPorts(cHost.ExposedUDPPorts). - SetHclID(cHost.HclID). + SetHCLID(cHost.HCLID). SetHostname(cHost.Hostname). SetInstanceSize(cHost.InstanceSize). SetLastOctet(cHost.LastOctet). @@ -738,7 +739,7 @@ func createHosts(txClient *ent.Tx, ctx context.Context, log *logging.Logger, con SetHostToDisk(returnedDisk). Save(ctx) if err != nil { - log.Log.Errorf("Failed to Create Host %v. Err: %v", cHost.HclID, err) + log.Log.Errorf("Failed to Create Host %v. Err: %v", cHost.HCLID, err) return nil, nil, err } } else { @@ -750,7 +751,7 @@ func createHosts(txClient *ent.Tx, ctx context.Context, log *logging.Logger, con SetDescription(cHost.Description). SetExposedTCPPorts(cHost.ExposedTCPPorts). SetExposedUDPPorts(cHost.ExposedUDPPorts). - SetHclID(cHost.HclID). + SetHCLID(cHost.HCLID). SetHostname(cHost.Hostname). SetInstanceSize(cHost.InstanceSize). SetLastOctet(cHost.LastOctet). @@ -763,18 +764,18 @@ func createHosts(txClient *ent.Tx, ctx context.Context, log *logging.Logger, con ClearHostToDisk(). Save(ctx) if err != nil { - log.Log.Errorf("Failed to Update Host %v. Err: %v", cHost.HclID, err) + log.Log.Errorf("Failed to Update Host %v. Err: %v", cHost.HCLID, err) return nil, nil, err } _, err = entHost.Update().SetHostToDisk(returnedDisk).Save(ctx) if err != nil { - log.Log.Errorf("Failed to Update Disk to Host %v. Err: %v", cHost.HclID, err) + log.Log.Errorf("Failed to Update Disk to Host %v. Err: %v", cHost.HCLID, err) return nil, nil, err } } returnedHosts = append(returnedHosts, entHost) - returnedHostDependencies, err := createHostDependencies(txClient, ctx, log, cHost.HCLDependOnHostToHostDependency, envHclID, entHost) + returnedHostDependencies, err := createHostDependencies(txClient, ctx, log, cHost.HCLDependOnHostToHostDependency, envHCLID, entHost) if err != nil { return nil, nil, err } @@ -783,14 +784,14 @@ func createHosts(txClient *ent.Tx, ctx context.Context, log *logging.Logger, con return returnedHosts, returnedAllHostDependencies, nil } -func createNetworks(txClient *ent.Tx, ctx context.Context, log *logging.Logger, configNetworks map[string]*ent.Network, configIncludedNetworks []*ent.IncludedNetwork, envHclID string) ([]*ent.Network, error) { +func createNetworks(txClient *ent.Tx, ctx context.Context, log *logging.Logger, configNetworks map[string]*ent.Network, configIncludedNetworks []*ent.IncludedNetwork, envHCLID string) ([]*ent.Network, error) { bulk := []*ent.NetworkCreate{} returnedNetworks := []*ent.Network{} for _, cNetwork := range configNetworks { - log.Log.Debugf("Creating Network: %v for Env: %v", cNetwork.HclID, envHclID) + log.Log.Debugf("Creating Network: %v for Env: %v", cNetwork.HCLID, envHCLID) included := false for _, cIncludedNetwork := range configIncludedNetworks { - if cIncludedNetwork.Name == cNetwork.HclID { + if cIncludedNetwork.Name == cNetwork.HCLID { included = true break } @@ -802,8 +803,8 @@ func createNetworks(txClient *ent.Tx, ctx context.Context, log *logging.Logger, Query(). Where( network.And( - network.HclIDEQ(cNetwork.HclID), - network.HasNetworkToEnvironmentWith(environment.HclIDEQ(envHclID)), + network.HCLIDEQ(cNetwork.HCLID), + network.HasNetworkToEnvironmentWith(environment.HCLIDEQ(envHCLID)), ), ). Only(ctx) @@ -811,7 +812,7 @@ func createNetworks(txClient *ent.Tx, ctx context.Context, log *logging.Logger, if err == err.(*ent.NotFoundError) { createdQuery := txClient.Network.Create(). SetCidr(cNetwork.Cidr). - SetHclID(cNetwork.HclID). + SetHCLID(cNetwork.HCLID). SetName(cNetwork.Name). SetTags(cNetwork.Tags). SetVars(cNetwork.Vars). @@ -822,14 +823,14 @@ func createNetworks(txClient *ent.Tx, ctx context.Context, log *logging.Logger, } entNetwork, err = entNetwork.Update(). SetCidr(cNetwork.Cidr). - SetHclID(cNetwork.HclID). + SetHCLID(cNetwork.HCLID). SetName(cNetwork.Name). SetTags(cNetwork.Tags). SetVars(cNetwork.Vars). SetVdiVisible(cNetwork.VdiVisible). Save(ctx) if err != nil { - log.Log.Errorf("Failed to Update Network %v. Err: %v", cNetwork.HclID, err) + log.Log.Errorf("Failed to Update Network %v. Err: %v", cNetwork.HCLID, err) return nil, err } returnedNetworks = append(returnedNetworks, entNetwork) @@ -845,13 +846,13 @@ func createNetworks(txClient *ent.Tx, ctx context.Context, log *logging.Logger, return returnedNetworks, nil } -func createScripts(txClient *ent.Tx, ctx context.Context, log *logging.Logger, configScript map[string]*ent.Script, envHclID string) ([]*ent.Script, []*ent.Finding, error) { +func createScripts(txClient *ent.Tx, ctx context.Context, log *logging.Logger, configScript map[string]*ent.Script, envHCLID string) ([]*ent.Script, []*ent.Finding, error) { bulk := []*ent.ScriptCreate{} returnedScripts := []*ent.Script{} returnedAllFindings := []*ent.Finding{} for _, cScript := range configScript { - log.Log.Debugf("Creating Script: %v for Env: %v", cScript.HclID, envHclID) - returnedFindings, err := createFindings(txClient, ctx, log, cScript.HCLScriptToFinding, envHclID, cScript.HclID) + log.Log.Debugf("Creating Script: %v for Env: %v", cScript.HCLID, envHCLID) + returnedFindings, err := createFindings(txClient, ctx, log, cScript.HCLScriptToFinding, envHCLID, cScript.HCLID) if err != nil { return nil, nil, err } @@ -859,15 +860,15 @@ func createScripts(txClient *ent.Tx, ctx context.Context, log *logging.Logger, c Query(). Where( script.And( - script.HclIDEQ(cScript.HclID), - script.HasScriptToEnvironmentWith(environment.HclIDEQ(envHclID)), + script.HCLIDEQ(cScript.HCLID), + script.HasScriptToEnvironmentWith(environment.HCLIDEQ(envHCLID)), ), ). Only(ctx) if err != nil { if err == err.(*ent.NotFoundError) { createdQuery := txClient.Script.Create(). - SetHclID(cScript.HclID). + SetHCLID(cScript.HCLID). SetName(cScript.Name). SetLanguage(cScript.Language). SetDescription(cScript.Description). @@ -887,7 +888,7 @@ func createScripts(txClient *ent.Tx, ctx context.Context, log *logging.Logger, c } } entScript, err = entScript.Update(). - SetHclID(cScript.HclID). + SetHCLID(cScript.HCLID). SetName(cScript.Name). SetLanguage(cScript.Language). SetDescription(cScript.Description). @@ -904,12 +905,12 @@ func createScripts(txClient *ent.Tx, ctx context.Context, log *logging.Logger, c ClearScriptToFinding(). Save(ctx) if err != nil { - log.Log.Errorf("Failed to Update Script %v. Err: %v", cScript.HclID, err) + log.Log.Errorf("Failed to Update Script %v. Err: %v", cScript.HCLID, err) return nil, nil, err } _, err = entScript.Update().AddScriptToFinding(returnedFindings...).Save(ctx) if err != nil { - log.Log.Errorf("Failed to Update Script %v with it's Findings. Err: %v", cScript.HclID, err) + log.Log.Errorf("Failed to Update Script %v with it's Findings. Err: %v", cScript.HCLID, err) return nil, nil, err } returnedAllFindings = append(returnedAllFindings, returnedFindings...) @@ -926,17 +927,17 @@ func createScripts(txClient *ent.Tx, ctx context.Context, log *logging.Logger, c return returnedScripts, returnedAllFindings, nil } -func createAnsible(txClient *ent.Tx, ctx context.Context, log *logging.Logger, configAnsible map[string]*ent.Ansible, envHclID string) ([]*ent.Ansible, error) { +func createAnsible(txClient *ent.Tx, ctx context.Context, log *logging.Logger, configAnsible map[string]*ent.Ansible, envHCLID string) ([]*ent.Ansible, error) { bulk := []*ent.AnsibleCreate{} returnedAnsible := []*ent.Ansible{} for _, cAnsible := range configAnsible { - log.Log.Debugf("Creating Ansible: %v for Env: %v", cAnsible.HclID, envHclID) + log.Log.Debugf("Creating Ansible: %v for Env: %v", cAnsible.HCLID, envHCLID) entAnsible, err := txClient.Ansible. Query(). Where( ansible.And( - ansible.HclIDEQ(cAnsible.HclID), - ansible.HasAnsibleFromEnvironmentWith(environment.HclIDEQ(envHclID)), + ansible.HCLIDEQ(cAnsible.HCLID), + ansible.HasAnsibleFromEnvironmentWith(environment.HCLIDEQ(envHCLID)), ), ). Only(ctx) @@ -944,7 +945,7 @@ func createAnsible(txClient *ent.Tx, ctx context.Context, log *logging.Logger, c if err == err.(*ent.NotFoundError) { createdQuery := txClient.Ansible.Create(). SetName(cAnsible.Name). - SetHclID(cAnsible.HclID). + SetHCLID(cAnsible.HCLID). SetDescription(cAnsible.Description). SetSource(cAnsible.Source). SetPlaybookName(cAnsible.PlaybookName). @@ -958,7 +959,7 @@ func createAnsible(txClient *ent.Tx, ctx context.Context, log *logging.Logger, c } entAnsible, err = entAnsible.Update(). SetName(cAnsible.Name). - SetHclID(cAnsible.HclID). + SetHCLID(cAnsible.HCLID). SetDescription(cAnsible.Description). SetSource(cAnsible.Source). SetPlaybookName(cAnsible.PlaybookName). @@ -968,7 +969,7 @@ func createAnsible(txClient *ent.Tx, ctx context.Context, log *logging.Logger, c SetAbsPath(cAnsible.AbsPath). Save(ctx) if err != nil { - log.Log.Errorf("Failed to Update Ansible %v. Err: %v", cAnsible.HclID, err) + log.Log.Errorf("Failed to Update Ansible %v. Err: %v", cAnsible.HCLID, err) return nil, err } returnedAnsible = append(returnedAnsible, entAnsible) @@ -984,17 +985,17 @@ func createAnsible(txClient *ent.Tx, ctx context.Context, log *logging.Logger, c return returnedAnsible, nil } -func createCommands(txClient *ent.Tx, ctx context.Context, log *logging.Logger, configCommands map[string]*ent.Command, envHclID string) ([]*ent.Command, error) { +func createCommands(txClient *ent.Tx, ctx context.Context, log *logging.Logger, configCommands map[string]*ent.Command, envHCLID string) ([]*ent.Command, error) { bulk := []*ent.CommandCreate{} returnedCommands := []*ent.Command{} for _, cCommand := range configCommands { - log.Log.Debugf("Creating Command: %v for Env: %v", cCommand.HclID, envHclID) + log.Log.Debugf("Creating Command: %v for Env: %v", cCommand.HCLID, envHCLID) entCommand, err := txClient.Command. Query(). Where( command.And( - command.HclIDEQ(cCommand.HclID), - command.HasCommandToEnvironmentWith(environment.HclIDEQ(envHclID)), + command.HCLIDEQ(cCommand.HCLID), + command.HasCommandToEnvironmentWith(environment.HCLIDEQ(envHCLID)), ), ). Only(ctx) @@ -1005,7 +1006,7 @@ func createCommands(txClient *ent.Tx, ctx context.Context, log *logging.Logger, SetCooldown(cCommand.Cooldown). SetDescription(cCommand.Description). SetDisabled(cCommand.Disabled). - SetHclID(cCommand.HclID). + SetHCLID(cCommand.HCLID). SetIgnoreErrors(cCommand.IgnoreErrors). SetName(cCommand.Name). SetProgram(cCommand.Program). @@ -1021,7 +1022,7 @@ func createCommands(txClient *ent.Tx, ctx context.Context, log *logging.Logger, SetCooldown(cCommand.Cooldown). SetDescription(cCommand.Description). SetDisabled(cCommand.Disabled). - SetHclID(cCommand.HclID). + SetHCLID(cCommand.HCLID). SetIgnoreErrors(cCommand.IgnoreErrors). SetName(cCommand.Name). SetProgram(cCommand.Program). @@ -1030,7 +1031,7 @@ func createCommands(txClient *ent.Tx, ctx context.Context, log *logging.Logger, SetVars(cCommand.Vars). Save(ctx) if err != nil { - log.Log.Errorf("Failed to Update Command %v. Err: %v", cCommand.HclID, err) + log.Log.Errorf("Failed to Update Command %v. Err: %v", cCommand.HCLID, err) return nil, err } returnedCommands = append(returnedCommands, entCommand) @@ -1046,18 +1047,18 @@ func createCommands(txClient *ent.Tx, ctx context.Context, log *logging.Logger, return returnedCommands, nil } -func createDNSRecords(txClient *ent.Tx, ctx context.Context, log *logging.Logger, configDNSRecords map[string]*ent.DNSRecord, envHclID string) ([]*ent.DNSRecord, error) { +func createDNSRecords(txClient *ent.Tx, ctx context.Context, log *logging.Logger, configDNSRecords map[string]*ent.DNSRecord, envHCLID string) ([]*ent.DNSRecord, error) { bulk := []*ent.DNSRecordCreate{} returnedDNSRecords := []*ent.DNSRecord{} for _, cDNSRecord := range configDNSRecords { - log.Log.Debugf("Creating DNSRecord: %v for Env: %v", cDNSRecord.HclID, envHclID) + log.Log.Debugf("Creating DNSRecord: %v for Env: %v", cDNSRecord.HCLID, envHCLID) entDNSRecord, err := txClient.DNSRecord. Query(). Where( dnsrecord.And( - dnsrecord.HclIDEQ(cDNSRecord.HclID), - dnsrecord.HasDNSRecordToEnvironmentWith(environment.HclIDEQ(envHclID)), + dnsrecord.HCLIDEQ(cDNSRecord.HCLID), + dnsrecord.HasDNSRecordToEnvironmentWith(environment.HCLIDEQ(envHCLID)), ), ). Only(ctx) @@ -1065,7 +1066,7 @@ func createDNSRecords(txClient *ent.Tx, ctx context.Context, log *logging.Logger if err == err.(*ent.NotFoundError) { createdQuery := txClient.DNSRecord.Create(). SetDisabled(cDNSRecord.Disabled). - SetHclID(cDNSRecord.HclID). + SetHCLID(cDNSRecord.HCLID). SetName(cDNSRecord.Name). SetTags(cDNSRecord.Tags). SetType(cDNSRecord.Type). @@ -1078,7 +1079,7 @@ func createDNSRecords(txClient *ent.Tx, ctx context.Context, log *logging.Logger } entDNSRecord, err = entDNSRecord.Update(). SetDisabled(cDNSRecord.Disabled). - SetHclID(cDNSRecord.HclID). + SetHCLID(cDNSRecord.HCLID). SetName(cDNSRecord.Name). SetTags(cDNSRecord.Tags). SetType(cDNSRecord.Type). @@ -1087,7 +1088,7 @@ func createDNSRecords(txClient *ent.Tx, ctx context.Context, log *logging.Logger SetZone(cDNSRecord.Zone). Save(ctx) if err != nil { - log.Log.Errorf("Failed to Update DNS Record %v. Err: %v", cDNSRecord.HclID, err) + log.Log.Errorf("Failed to Update DNS Record %v. Err: %v", cDNSRecord.HCLID, err) return nil, err } returnedDNSRecords = append(returnedDNSRecords, entDNSRecord) @@ -1103,25 +1104,25 @@ func createDNSRecords(txClient *ent.Tx, ctx context.Context, log *logging.Logger return returnedDNSRecords, nil } -func createFileDownload(txClient *ent.Tx, ctx context.Context, log *logging.Logger, configFileDownloads map[string]*ent.FileDownload, envHclID string) ([]*ent.FileDownload, error) { +func createFileDownload(txClient *ent.Tx, ctx context.Context, log *logging.Logger, configFileDownloads map[string]*ent.FileDownload, envHCLID string) ([]*ent.FileDownload, error) { bulk := []*ent.FileDownloadCreate{} returnedFileDownloads := []*ent.FileDownload{} for _, cFileDownload := range configFileDownloads { - log.Log.Debugf("Creating FileDownload: %v for Env: %v", cFileDownload.HclID, envHclID) + log.Log.Debugf("Creating FileDownload: %v for Env: %v", cFileDownload.HCLID, envHCLID) entFileDownload, err := txClient.FileDownload. Query(). Where( filedownload.And( - filedownload.HclIDEQ(cFileDownload.HclID), - filedownload.HasFileDownloadToEnvironmentWith(environment.HclIDEQ(envHclID)), + filedownload.HCLIDEQ(cFileDownload.HCLID), + filedownload.HasFileDownloadToEnvironmentWith(environment.HCLIDEQ(envHCLID)), ), ). Only(ctx) if err != nil { if err == err.(*ent.NotFoundError) { createdQuery := txClient.FileDownload.Create(). - SetHclID(cFileDownload.HclID). + SetHCLID(cFileDownload.HCLID). SetSourceType(cFileDownload.SourceType). SetSource(cFileDownload.Source). SetDestination(cFileDownload.Destination). @@ -1137,7 +1138,7 @@ func createFileDownload(txClient *ent.Tx, ctx context.Context, log *logging.Logg } } entFileDownload, err = entFileDownload.Update(). - SetHclID(cFileDownload.HclID). + SetHCLID(cFileDownload.HCLID). SetSourceType(cFileDownload.SourceType). SetSource(cFileDownload.Source). SetDestination(cFileDownload.Destination). @@ -1150,7 +1151,7 @@ func createFileDownload(txClient *ent.Tx, ctx context.Context, log *logging.Logg SetIsTxt(cFileDownload.IsTxt). Save(ctx) if err != nil { - log.Log.Errorf("Failed to Update File Download %v. Err: %v", cFileDownload.HclID, err) + log.Log.Errorf("Failed to Update File Download %v. Err: %v", cFileDownload.HCLID, err) return nil, err } returnedFileDownloads = append(returnedFileDownloads, entFileDownload) @@ -1166,25 +1167,25 @@ func createFileDownload(txClient *ent.Tx, ctx context.Context, log *logging.Logg return returnedFileDownloads, nil } -func createFileDelete(txClient *ent.Tx, ctx context.Context, log *logging.Logger, configFileDeletes map[string]*ent.FileDelete, envHclID string) ([]*ent.FileDelete, error) { +func createFileDelete(txClient *ent.Tx, ctx context.Context, log *logging.Logger, configFileDeletes map[string]*ent.FileDelete, envHCLID string) ([]*ent.FileDelete, error) { bulk := []*ent.FileDeleteCreate{} returnedFileDeletes := []*ent.FileDelete{} for _, cFileDelete := range configFileDeletes { - log.Log.Debugf("Creating FileDelete: %v for Env: %v", cFileDelete.HclID, envHclID) + log.Log.Debugf("Creating FileDelete: %v for Env: %v", cFileDelete.HCLID, envHCLID) entFileDelete, err := txClient.FileDelete. Query(). Where( filedelete.And( - filedelete.HclIDEQ(cFileDelete.HclID), - filedelete.HasFileDeleteToEnvironmentWith(environment.HclIDEQ(envHclID)), + filedelete.HCLIDEQ(cFileDelete.HCLID), + filedelete.HasFileDeleteToEnvironmentWith(environment.HCLIDEQ(envHCLID)), ), ). Only(ctx) if err != nil { if err == err.(*ent.NotFoundError) { createdQuery := txClient.FileDelete.Create(). - SetHclID(cFileDelete.HclID). + SetHCLID(cFileDelete.HCLID). SetPath(cFileDelete.Path). SetTags(cFileDelete.Tags) bulk = append(bulk, createdQuery) @@ -1192,12 +1193,12 @@ func createFileDelete(txClient *ent.Tx, ctx context.Context, log *logging.Logger } } entFileDelete, err = entFileDelete.Update(). - SetHclID(cFileDelete.HclID). + SetHCLID(cFileDelete.HCLID). SetPath(cFileDelete.Path). SetTags(cFileDelete.Tags). Save(ctx) if err != nil { - log.Log.Errorf("Failed to Update File Delete %v. Err: %v", cFileDelete.HclID, err) + log.Log.Errorf("Failed to Update File Delete %v. Err: %v", cFileDelete.HCLID, err) return nil, err } returnedFileDeletes = append(returnedFileDeletes, entFileDelete) @@ -1213,18 +1214,18 @@ func createFileDelete(txClient *ent.Tx, ctx context.Context, log *logging.Logger return returnedFileDeletes, nil } -func createFileExtract(txClient *ent.Tx, ctx context.Context, log *logging.Logger, configFileExtracts map[string]*ent.FileExtract, envHclID string) ([]*ent.FileExtract, error) { +func createFileExtract(txClient *ent.Tx, ctx context.Context, log *logging.Logger, configFileExtracts map[string]*ent.FileExtract, envHCLID string) ([]*ent.FileExtract, error) { bulk := []*ent.FileExtractCreate{} returnedFileExtracts := []*ent.FileExtract{} for _, cFileExtract := range configFileExtracts { - log.Log.Debugf("Creating FileExtract: %v for Env: %v", cFileExtract.HclID, envHclID) + log.Log.Debugf("Creating FileExtract: %v for Env: %v", cFileExtract.HCLID, envHCLID) entFileExtract, err := txClient.FileExtract. Query(). Where( fileextract.And( - fileextract.HclIDEQ(cFileExtract.HclID), - fileextract.HasFileExtractToEnvironmentWith(environment.HclIDEQ(envHclID)), + fileextract.HCLIDEQ(cFileExtract.HCLID), + fileextract.HasFileExtractToEnvironmentWith(environment.HCLIDEQ(envHCLID)), ), ). Only(ctx) @@ -1232,7 +1233,7 @@ func createFileExtract(txClient *ent.Tx, ctx context.Context, log *logging.Logge if err == err.(*ent.NotFoundError) { createdQuery := txClient.FileExtract.Create(). SetDestination(cFileExtract.Destination). - SetHclID(cFileExtract.HclID). + SetHCLID(cFileExtract.HCLID). SetSource(cFileExtract.Source). SetTags(cFileExtract.Tags). SetType(cFileExtract.Type) @@ -1242,13 +1243,13 @@ func createFileExtract(txClient *ent.Tx, ctx context.Context, log *logging.Logge } entFileExtract, err = entFileExtract.Update(). SetDestination(cFileExtract.Destination). - SetHclID(cFileExtract.HclID). + SetHCLID(cFileExtract.HCLID). SetSource(cFileExtract.Source). SetTags(cFileExtract.Tags). SetType(cFileExtract.Type). Save(ctx) if err != nil { - log.Log.Errorf("Failed to Update File Extract %v. Err: %v", cFileExtract.HclID, err) + log.Log.Errorf("Failed to Update File Extract %v. Err: %v", cFileExtract.HCLID, err) return nil, err } returnedFileExtracts = append(returnedFileExtracts, entFileExtract) @@ -1264,18 +1265,18 @@ func createFileExtract(txClient *ent.Tx, ctx context.Context, log *logging.Logge return returnedFileExtracts, nil } -func createIdentities(txClient *ent.Tx, ctx context.Context, log *logging.Logger, configIdentities map[string]*ent.Identity, envHclID string) ([]*ent.Identity, error) { +func createIdentities(txClient *ent.Tx, ctx context.Context, log *logging.Logger, configIdentities map[string]*ent.Identity, envHCLID string) ([]*ent.Identity, error) { bulk := []*ent.IdentityCreate{} returnedIdentities := []*ent.Identity{} for _, cIdentity := range configIdentities { - log.Log.Debugf("Creating Identity: %v for Env: %v", cIdentity.HclID, envHclID) + log.Log.Debugf("Creating Identity: %v for Env: %v", cIdentity.HCLID, envHCLID) entIdentity, err := txClient.Identity. Query(). Where( identity.And( - identity.HclIDEQ(cIdentity.HclID), - identity.HasIdentityToEnvironmentWith(environment.HclIDEQ(envHclID)), + identity.HCLIDEQ(cIdentity.HCLID), + identity.HasIdentityToEnvironmentWith(environment.HCLIDEQ(envHCLID)), ), ). Only(ctx) @@ -1286,7 +1287,7 @@ func createIdentities(txClient *ent.Tx, ctx context.Context, log *logging.Logger SetDescription(cIdentity.Description). SetEmail(cIdentity.Email). SetFirstName(cIdentity.FirstName). - SetHclID(cIdentity.HclID). + SetHCLID(cIdentity.HCLID). SetLastName(cIdentity.LastName). SetPassword(cIdentity.Password). SetVars(cIdentity.Vars). @@ -1300,14 +1301,14 @@ func createIdentities(txClient *ent.Tx, ctx context.Context, log *logging.Logger SetDescription(cIdentity.Description). SetEmail(cIdentity.Email). SetFirstName(cIdentity.FirstName). - SetHclID(cIdentity.HclID). + SetHCLID(cIdentity.HCLID). SetLastName(cIdentity.LastName). SetPassword(cIdentity.Password). SetVars(cIdentity.Vars). SetTags(cIdentity.Tags). Save(ctx) if err != nil { - log.Log.Errorf("Failed to Update Identity %v. Err: %v", cIdentity.HclID, err) + log.Log.Errorf("Failed to Update Identity %v. Err: %v", cIdentity.HCLID, err) return nil, err } returnedIdentities = append(returnedIdentities, entIdentity) @@ -1323,19 +1324,19 @@ func createIdentities(txClient *ent.Tx, ctx context.Context, log *logging.Logger return returnedIdentities, nil } -func createFindings(txClient *ent.Tx, ctx context.Context, log *logging.Logger, configFindings []*ent.Finding, envHclID string, entScriptID string) ([]*ent.Finding, error) { +func createFindings(txClient *ent.Tx, ctx context.Context, log *logging.Logger, configFindings []*ent.Finding, envHCLID string, entScriptID string) ([]*ent.Finding, error) { bulk := []*ent.FindingCreate{} returnedFindings := []*ent.Finding{} for _, cFinding := range configFindings { - log.Log.Debugf("Creating Finding: %v for Env: %v", cFinding.Name, envHclID) + log.Log.Debugf("Creating Finding: %v for Env: %v", cFinding.Name, envHCLID) entFinding, err := txClient.Finding. Query(). Where( finding.And( finding.Name(cFinding.Name), - finding.HasFindingToEnvironmentWith(environment.HclIDEQ(envHclID)), - finding.HasFindingToScriptWith(script.HclID(entScriptID)), + finding.HasFindingToEnvironmentWith(environment.HCLIDEQ(envHCLID)), + finding.HasFindingToScriptWith(script.HCLID(entScriptID)), ), ). Only(ctx) @@ -1359,7 +1360,7 @@ func createFindings(txClient *ent.Tx, ctx context.Context, log *logging.Logger, SetTags(cFinding.Tags). Save(ctx) if err != nil { - log.Log.Errorf("Failed to Update Finding %v for Script %v in Enviroment %v. Err: %v", cFinding.Name, entScriptID, envHclID, err) + log.Log.Errorf("Failed to Update Finding %v for Script %v in Enviroment %v. Err: %v", cFinding.Name, entScriptID, envHCLID, err) return nil, err } returnedFindings = append(returnedFindings, entFinding) @@ -1375,20 +1376,20 @@ func createFindings(txClient *ent.Tx, ctx context.Context, log *logging.Logger, return returnedFindings, nil } -func createHostDependencies(txClient *ent.Tx, ctx context.Context, log *logging.Logger, configHostDependencies []*ent.HostDependency, envHclID string, dependByHost *ent.Host) ([]*ent.HostDependency, error) { +func createHostDependencies(txClient *ent.Tx, ctx context.Context, log *logging.Logger, configHostDependencies []*ent.HostDependency, envHCLID string, dependByHost *ent.Host) ([]*ent.HostDependency, error) { bulk := []*ent.HostDependencyCreate{} returnedHostDependencies := []*ent.HostDependency{} for _, cHostDependency := range configHostDependencies { - log.Log.Debugf("Creating HostDependency for host %v to host %v in network %v for Env: %v", dependByHost.HclID, cHostDependency.HostID, cHostDependency.NetworkID, envHclID) + log.Log.Debugf("Creating HostDependency for host %v to host %v in network %v for Env: %v", dependByHost.HCLID, cHostDependency.HostID, cHostDependency.NetworkID, envHCLID) entHostDependency, err := txClient.HostDependency. Query(). Where( hostdependency.And( - hostdependency.HasHostDependencyToDependByHostWith(host.HclIDEQ(dependByHost.HclID)), + hostdependency.HasHostDependencyToDependByHostWith(host.HCLIDEQ(dependByHost.HCLID)), hostdependency.HostIDEQ(cHostDependency.HostID), hostdependency.NetworkIDEQ(cHostDependency.NetworkID), - hostdependency.HasHostDependencyToEnvironmentWith(environment.HclIDEQ(envHclID)), + hostdependency.HasHostDependencyToEnvironmentWith(environment.HCLIDEQ(envHCLID)), ), ). Only(ctx) @@ -1408,14 +1409,14 @@ func createHostDependencies(txClient *ent.Tx, ctx context.Context, log *logging. ClearHostDependencyToNetwork(). Save(ctx) if err != nil { - log.Log.Errorf("Failed to Clear Host Dependency by %v on Host %v Err: %v", dependByHost.HclID, cHostDependency.HostID, err) + log.Log.Errorf("Failed to Clear Host Dependency by %v on Host %v Err: %v", dependByHost.HCLID, cHostDependency.HostID, err) return nil, err } entHostDependency, err = entHostDependency.Update(). SetHostDependencyToDependByHost(dependByHost). Save(ctx) if err != nil { - log.Log.Errorf("Failed to Update Host Dependency by %v on Host %v Err: %v", dependByHost.HclID, cHostDependency.HostID, err) + log.Log.Errorf("Failed to Update Host Dependency by %v on Host %v Err: %v", dependByHost.HCLID, cHostDependency.HostID, err) return nil, err } returnedHostDependencies = append(returnedHostDependencies, entHostDependency) @@ -1431,17 +1432,17 @@ func createHostDependencies(txClient *ent.Tx, ctx context.Context, log *logging. return returnedHostDependencies, nil } -func createDNS(txClient *ent.Tx, ctx context.Context, log *logging.Logger, configDNS []*ent.DNS, envHclID string) ([]*ent.DNS, error) { +func createDNS(txClient *ent.Tx, ctx context.Context, log *logging.Logger, configDNS []*ent.DNS, envHCLID string) ([]*ent.DNS, error) { bulk := []*ent.DNSCreate{} returnedDNS := []*ent.DNS{} for _, cDNS := range configDNS { - log.Log.Debugf("Creating DNS: %v for Env: %v", cDNS.HclID, envHclID) + log.Log.Debugf("Creating DNS: %v for Env: %v", cDNS.HCLID, envHCLID) entDNS, err := txClient.DNS. Query(). Where( dns.And( - dns.HclIDEQ(cDNS.HclID), - dns.HasDNSToEnvironmentWith(environment.HclIDEQ(envHclID)), + dns.HCLIDEQ(cDNS.HCLID), + dns.HasDNSToEnvironmentWith(environment.HCLIDEQ(envHCLID)), ), ). Only(ctx) @@ -1450,7 +1451,7 @@ func createDNS(txClient *ent.Tx, ctx context.Context, log *logging.Logger, confi createdQuery := txClient.DNS.Create(). SetConfig(cDNS.Config). SetDNSServers(cDNS.DNSServers). - SetHclID(cDNS.HclID). + SetHCLID(cDNS.HCLID). SetNtpServers(cDNS.NtpServers). SetRootDomain(cDNS.RootDomain). SetType(cDNS.Type) @@ -1461,13 +1462,13 @@ func createDNS(txClient *ent.Tx, ctx context.Context, log *logging.Logger, confi entDNS, err = entDNS.Update(). SetConfig(cDNS.Config). SetDNSServers(cDNS.DNSServers). - SetHclID(cDNS.HclID). + SetHCLID(cDNS.HCLID). SetNtpServers(cDNS.NtpServers). SetRootDomain(cDNS.RootDomain). SetType(cDNS.Type). Save(ctx) if err != nil { - log.Log.Errorf("Failed to Update DNS %v. Err: %v", cDNS.HclID, err) + log.Log.Errorf("Failed to Update DNS %v. Err: %v", cDNS.HCLID, err) return nil, err } returnedDNS = append(returnedDNS, entDNS) @@ -1483,15 +1484,15 @@ func createDNS(txClient *ent.Tx, ctx context.Context, log *logging.Logger, confi return returnedDNS, nil } -func createDisk(txClient *ent.Tx, ctx context.Context, log *logging.Logger, configDisk *ent.Disk, hostHclID string, envHclID string) (*ent.Disk, error) { +func createDisk(txClient *ent.Tx, ctx context.Context, log *logging.Logger, configDisk *ent.Disk, hostHCLID string, envHCLID string) (*ent.Disk, error) { entDisk, err := txClient.Disk. Query(). Where( disk.And( disk.HasDiskToHostWith( host.And( - host.HclIDEQ(hostHclID), - host.HasHostToEnvironmentWith(environment.HclIDEQ(envHclID)), + host.HCLIDEQ(hostHCLID), + host.HasHostToEnvironmentWith(environment.HCLIDEQ(envHCLID)), ), ), ), @@ -1503,7 +1504,7 @@ func createDisk(txClient *ent.Tx, ctx context.Context, log *logging.Logger, conf SetSize(configDisk.Size). Save(ctx) if err != nil { - log.Log.Errorf("Failed to create Disk for Host %v. Err: %v", hostHclID, err) + log.Log.Errorf("Failed to create Disk for Host %v. Err: %v", hostHCLID, err) return nil, err } } @@ -1512,44 +1513,44 @@ func createDisk(txClient *ent.Tx, ctx context.Context, log *logging.Logger, conf SetSize(configDisk.Size). Save(ctx) if err != nil { - log.Log.Errorf("Failed to update Disk Size for Host %v. Err: %v", hostHclID, err) + log.Log.Errorf("Failed to update Disk Size for Host %v. Err: %v", hostHCLID, err) return nil, err } return entDisk, nil } -func createIncludedNetwork(txClient *ent.Tx, ctx context.Context, log *logging.Logger, configIncludedNetworks []*ent.IncludedNetwork, envHclID string, returnedHostIDs, returnedNetworkIDS []uuid.UUID) ([]*ent.IncludedNetwork, error) { +func createIncludedNetwork(txClient *ent.Tx, ctx context.Context, log *logging.Logger, configIncludedNetworks []*ent.IncludedNetwork, envHCLID string, returnedHostIDs, returnedNetworkIDS []uuid.UUID) ([]*ent.IncludedNetwork, error) { bulk := []*ent.IncludedNetworkCreate{} returnedIncludedNetworks := []*ent.IncludedNetwork{} for _, cIncludedNetwork := range configIncludedNetworks { entNetwork, err := txClient.Network.Query().Where( network.And( - network.HclIDEQ(cIncludedNetwork.Name), + network.HCLIDEQ(cIncludedNetwork.Name), network.IDIn(returnedNetworkIDS...), // network.Or( // network.Not(network.HasNetworkToEnvironment()), - // network.HasNetworkToEnvironmentWith(environment.HclIDEQ(envHclID)), + // network.HasNetworkToEnvironmentWith(environment.HCLIDEQ(envHCLID)), // ), ), ).Only(ctx) if err != nil { - log.Log.Errorf("Unable to Query %v network in %v enviroment. Err: %v", cIncludedNetwork.Name, envHclID, err) + log.Log.Errorf("Unable to Query %v network in %v enviroment. Err: %v", cIncludedNetwork.Name, envHCLID, err) return nil, err } entHosts := []*ent.Host{} - for _, cHostHclID := range cIncludedNetwork.Hosts { + for _, cHostHCLID := range cIncludedNetwork.Hosts { entHost, err := txClient.Host.Query().Where( host.And( - host.HclIDEQ(cHostHclID), + host.HCLIDEQ(cHostHCLID), host.IDIn(returnedHostIDs...), // host.Or( // host.Not(host.HasHostToEnvironment()), - // host.HasHostToEnvironmentWith(environment.HclIDEQ(envHclID)), + // host.HasHostToEnvironmentWith(environment.HCLIDEQ(envHCLID)), // ), ), ).Only(ctx) if err != nil { - log.Log.Errorf("Unable to Query %v host in %v enviroment. Err: %v", cHostHclID, envHclID, err) + log.Log.Errorf("Unable to Query %v host in %v enviroment. Err: %v", cHostHCLID, envHCLID, err) return nil, err } entHosts = append(entHosts, entHost) @@ -1558,7 +1559,7 @@ func createIncludedNetwork(txClient *ent.Tx, ctx context.Context, log *logging.L Query(). Where( includednetwork.And( - includednetwork.HasIncludedNetworkToEnvironmentWith(environment.HclIDEQ(envHclID)), + includednetwork.HasIncludedNetworkToEnvironmentWith(environment.HCLIDEQ(envHCLID)), includednetwork.NameEQ(cIncludedNetwork.Name), ), ). @@ -1605,38 +1606,38 @@ func createIncludedNetwork(txClient *ent.Tx, ctx context.Context, log *logging.L return returnedIncludedNetworks, nil } -func validateHostDependencies(txClient *ent.Tx, ctx context.Context, log *logging.Logger, uncheckedHostDependencies []*ent.HostDependency, envHclID string) ([]*ent.HostDependency, error) { +func validateHostDependencies(txClient *ent.Tx, ctx context.Context, log *logging.Logger, uncheckedHostDependencies []*ent.HostDependency, envHCLID string) ([]*ent.HostDependency, error) { checkedHostDependencies := []*ent.HostDependency{} for _, uncheckedHostDependency := range uncheckedHostDependencies { entNetwork, err := txClient.Network.Query().Where( network.And( - network.HclIDEQ(uncheckedHostDependency.NetworkID), - network.HasNetworkToEnvironmentWith(environment.HclIDEQ(envHclID)), + network.HCLIDEQ(uncheckedHostDependency.NetworkID), + network.HasNetworkToEnvironmentWith(environment.HCLIDEQ(envHCLID)), ), ).Only(ctx) if err != nil { - log.Log.Errorf("Unable to Query %v network in %v enviroment. Err: %v", uncheckedHostDependency.NetworkID, envHclID, err) + log.Log.Errorf("Unable to Query %v network in %v enviroment. Err: %v", uncheckedHostDependency.NetworkID, envHCLID, err) return nil, err } entHost, err := txClient.Host.Query().Where( host.And( - host.HasHostToEnvironmentWith(environment.HclIDEQ(envHclID)), - host.HclIDEQ(uncheckedHostDependency.HostID), + host.HasHostToEnvironmentWith(environment.HCLIDEQ(envHCLID)), + host.HCLIDEQ(uncheckedHostDependency.HostID), ), ).Only(ctx) if err != nil { - log.Log.Errorf("Unable to Query %v host in %v enviroment. Err: %v", uncheckedHostDependency.HostID, envHclID, err) + log.Log.Errorf("Unable to Query %v host in %v enviroment. Err: %v", uncheckedHostDependency.HostID, envHCLID, err) return nil, err } _, err = txClient.IncludedNetwork.Query().Where( includednetwork.And( - includednetwork.HasIncludedNetworkToEnvironmentWith(environment.HclIDEQ(envHclID)), - includednetwork.HasIncludedNetworkToHostWith(host.HclIDEQ(uncheckedHostDependency.HostID)), - includednetwork.HasIncludedNetworkToNetworkWith(network.HclIDEQ(uncheckedHostDependency.NetworkID)), + includednetwork.HasIncludedNetworkToEnvironmentWith(environment.HCLIDEQ(envHCLID)), + includednetwork.HasIncludedNetworkToHostWith(host.HCLIDEQ(uncheckedHostDependency.HostID)), + includednetwork.HasIncludedNetworkToNetworkWith(network.HCLIDEQ(uncheckedHostDependency.NetworkID)), ), ).Only(ctx) if err != nil { - log.Log.Errorf("Unable to Verify %v host in %v network while loading %v enviroment. Err: %v", uncheckedHostDependency.HostID, uncheckedHostDependency.NetworkID, envHclID, err) + log.Log.Errorf("Unable to Verify %v host in %v network while loading %v enviroment. Err: %v", uncheckedHostDependency.HostID, uncheckedHostDependency.NetworkID, envHCLID, err) return nil, err } uncheckedHostDependency, err := uncheckedHostDependency.Update(). @@ -1649,7 +1650,7 @@ func validateHostDependencies(txClient *ent.Tx, ctx context.Context, log *loggin log.Log.Errorf("Unable to find the host depended by %v Err: %v", uncheckedHostDependency.HostID, queryErr) return nil, queryErr } - log.Log.Errorf("Failed to clear the Host dependency of %v which relies on %v host in %v network. Err: %v", dependedByHost.HclID, uncheckedHostDependency.HostID, uncheckedHostDependency.NetworkID, err) + log.Log.Errorf("Failed to clear the Host dependency of %v which relies on %v host in %v network. Err: %v", dependedByHost.HCLID, uncheckedHostDependency.HostID, uncheckedHostDependency.NetworkID, err) return nil, err } entHostDependency, err := uncheckedHostDependency.Update(). @@ -1662,7 +1663,7 @@ func validateHostDependencies(txClient *ent.Tx, ctx context.Context, log *loggin log.Log.Errorf("Unable to find the host depended by %v Err: %v", uncheckedHostDependency.HostID, queryErr) return nil, queryErr } - log.Log.Errorf("Failed to update the Host dependency of %v which relies on %v host in %v network. Err: %v", dependedByHost.HclID, uncheckedHostDependency.HostID, uncheckedHostDependency.NetworkID, err) + log.Log.Errorf("Failed to update the Host dependency of %v which relies on %v host in %v network. Err: %v", dependedByHost.HCLID, uncheckedHostDependency.HostID, uncheckedHostDependency.NetworkID, err) return nil, err } checkedHostDependencies = append(checkedHostDependencies, entHostDependency) diff --git a/planner/plan.go b/planner/plan.go index 0186db17..8f4ce357 100755 --- a/planner/plan.go +++ b/planner/plan.go @@ -125,9 +125,9 @@ func CreateBuild(ctx context.Context, client *ent.Client, rdb *redis.Client, laf } return nil, err } - entCompetition, err := entEnvironment.QueryEnvironmentToCompetition().Where(competition.HclIDEQ(entEnvironment.CompetitionID)).Only(ctx) + entCompetition, err := entEnvironment.QueryEnvironmentToCompetition().Where(competition.HCLIDEQ(entEnvironment.CompetitionID)).Only(ctx) if err != nil { - logger.Log.Errorf("Failed to Query Competition %v for Environment %v. Err: %v", len(entEnvironment.CompetitionID), entEnvironment.HclID, err) + logger.Log.Errorf("Failed to Query Competition %v for Environment %v. Err: %v", len(entEnvironment.CompetitionID), entEnvironment.HCLID, err) _, _, err = utils.FailServerTask(ctx, client, rdb, taskStatus, serverTask, err) if err != nil { return nil, fmt.Errorf("error failing server task: %v", err) @@ -136,7 +136,7 @@ func CreateBuild(ctx context.Context, client *ent.Client, rdb *redis.Client, laf } entRepoCommit, err := entEnvironment.QueryEnvironmentToRepository().QueryRepositoryToRepoCommit().Order(ent.Desc(repocommit.FieldRevision)).First(ctx) if err != nil { - logger.Log.Errorf("Failed to Query Repository from Environment %v. Err: %v", entEnvironment.HclID, err) + logger.Log.Errorf("Failed to Query Repository from Environment %v. Err: %v", entEnvironment.HCLID, err) _, _, err = utils.FailServerTask(ctx, client, rdb, taskStatus, serverTask, err) if err != nil { return nil, fmt.Errorf("error failing server task: %v", err) @@ -153,7 +153,7 @@ func CreateBuild(ctx context.Context, client *ent.Client, rdb *redis.Client, laf SetVars(map[string]string{}). Save(ctx) if err != nil { - logger.Log.Errorf("Failed to create Build %v for Environment %v. Err: %v", len(entEnvironment.Edges.EnvironmentToBuild), entEnvironment.HclID, err) + logger.Log.Errorf("Failed to create Build %v for Environment %v. Err: %v", len(entEnvironment.Edges.EnvironmentToBuild), entEnvironment.HCLID, err) _, _, err = utils.FailServerTask(ctx, client, rdb, taskStatus, serverTask, err) if err != nil { return nil, fmt.Errorf("error failing server task: %v", err) @@ -465,7 +465,7 @@ func createProvisionedHosts(ctx context.Context, client *ent.Client, laforgeConf ).Only(ctx) if err != nil { if err != err.(*ent.NotFoundError) { - logger.Log.Errorf("Failed to Query Existing Host %v. Err: %v", entHost.HclID, err) + logger.Log.Errorf("Failed to Query Existing Host %v. Err: %v", entHost.HCLID, err) return nil, err } } else { @@ -503,7 +503,7 @@ func createProvisionedHosts(ctx context.Context, client *ent.Client, laforgeConf ).WithProvisionedHostToPlan().Only(ctx) if err != nil { if err != err.(*ent.NotFoundError) { - logger.Log.Errorf("Failed to Query Depended On Host %v for Host %v. Err: %v", entHostDependency.Edges.HostDependencyToDependOnHost.HclID, entHost.HclID, err) + logger.Log.Errorf("Failed to Query Depended On Host %v for Host %v. Err: %v", entHostDependency.Edges.HostDependencyToDependOnHost.HCLID, entHost.HCLID, err) return nil, err } else { dependOnPnetwork, err := client.ProvisionedNetwork.Query().Where( @@ -520,7 +520,7 @@ func createProvisionedHosts(ctx context.Context, client *ent.Client, laforgeConf ), ).Only(ctx) if err != nil { - logger.Log.Errorf("Failed to Query Provined Network %v for Depended On Host %v. Err: %v", entHostDependency.Edges.HostDependencyToNetwork.HclID, entHostDependency.Edges.HostDependencyToDependOnHost.HclID, err) + logger.Log.Errorf("Failed to Query Provined Network %v for Depended On Host %v. Err: %v", entHostDependency.Edges.HostDependencyToNetwork.HCLID, entHostDependency.Edges.HostDependencyToDependOnHost.HCLID, err) } dependOnPnetworkPlan, err := dependOnPnetwork.QueryProvisionedNetworkToPlan().Only(ctx) if err != nil { @@ -536,7 +536,7 @@ func createProvisionedHosts(ctx context.Context, client *ent.Client, laforgeConf } dependOnPlan, err := entDependsOnHost.QueryProvisionedHostToEndStepPlan().Only(ctx) if err != nil && err != err.(*ent.NotFoundError) { - logger.Log.Errorf("Failed to Query Depended On Host %v Plan for Host %v. Err: %v", entHostDependency.Edges.HostDependencyToDependOnHost.HclID, entHost.HclID, err) + logger.Log.Errorf("Failed to Query Depended On Host %v Plan for Host %v. Err: %v", entHostDependency.Edges.HostDependencyToDependOnHost.HCLID, entHost.HCLID, err) return nil, err } prevPlans = append(prevPlans, dependOnPlan) @@ -594,7 +594,7 @@ func createProvisionedHosts(ctx context.Context, client *ent.Client, laforgeConf Save(ctx) if err != nil { - logger.Log.Errorf("Failed to create Plan Node for Provisioned Host %v. Err: %v", entHost.HclID, err) + logger.Log.Errorf("Failed to create Plan Node for Provisioned Host %v. Err: %v", entHost.HCLID, err) return nil, err } @@ -642,7 +642,7 @@ func createProvisionedHosts(ctx context.Context, client *ent.Client, laforgeConf script.HasScriptToEnvironmentWith( environment.IDEQ(currentEnvironment.ID), ), - script.HclIDEQ(userDataScriptID), + script.HCLIDEQ(userDataScriptID), ), ).Only(ctx) if err != nil { @@ -716,7 +716,7 @@ func createProvisioningStep(ctx context.Context, client *ent.Client, logger *log } logger.Log.WithFields(logrus.Fields{ "pHost": pHost.ID, - "pHost.HCLID": entHost.HclID, + "pHost.HCLID": entHost.HCLID, "pHost.SubnetIP": pHost.SubnetIP, "stepNumber": stepNumber, "prevPlan": prevPlan.ID, @@ -739,14 +739,14 @@ func createProvisioningStep(ctx context.Context, client *ent.Client, logger *log script.HasScriptToEnvironmentWith( environment.IDEQ(currentEnvironment.ID), ), - script.HclIDEQ(hclID), + script.HCLIDEQ(hclID), ), ).Only(ctx) if err != nil { if err != err.(*ent.NotFoundError) { logger.Log.WithFields(logrus.Fields{ "pHost": pHost.ID, - "pHost.HCLID": entHost.HclID, + "pHost.HCLID": entHost.HCLID, "pHost.SubnetIP": pHost.SubnetIP, "stepNumber": stepNumber, "prevPlan": prevPlan.ID, @@ -760,13 +760,13 @@ func createProvisioningStep(ctx context.Context, client *ent.Client, logger *log command.HasCommandToEnvironmentWith( environment.IDEQ(currentEnvironment.ID), ), - command.HclIDEQ(hclID), + command.HCLIDEQ(hclID), )).Only(ctx) if err != nil { if err != err.(*ent.NotFoundError) { logger.Log.WithFields(logrus.Fields{ "pHost": pHost.ID, - "pHost.HCLID": entHost.HclID, + "pHost.HCLID": entHost.HCLID, "pHost.SubnetIP": pHost.SubnetIP, "stepNumber": stepNumber, "prevPlan": prevPlan.ID, @@ -780,13 +780,13 @@ func createProvisioningStep(ctx context.Context, client *ent.Client, logger *log filedownload.HasFileDownloadToEnvironmentWith( environment.IDEQ(currentEnvironment.ID), ), - filedownload.HclIDEQ(hclID), + filedownload.HCLIDEQ(hclID), )).Only(ctx) if err != nil { if err != err.(*ent.NotFoundError) { logger.Log.WithFields(logrus.Fields{ "pHost": pHost.ID, - "pHost.HCLID": entHost.HclID, + "pHost.HCLID": entHost.HCLID, "pHost.SubnetIP": pHost.SubnetIP, "stepNumber": stepNumber, "prevPlan": prevPlan.ID, @@ -800,13 +800,13 @@ func createProvisioningStep(ctx context.Context, client *ent.Client, logger *log fileextract.HasFileExtractToEnvironmentWith( environment.IDEQ(currentEnvironment.ID), ), - fileextract.HclIDEQ(hclID), + fileextract.HCLIDEQ(hclID), )).Only(ctx) if err != nil { if err != err.(*ent.NotFoundError) { logger.Log.WithFields(logrus.Fields{ "pHost": pHost.ID, - "pHost.HCLID": entHost.HclID, + "pHost.HCLID": entHost.HCLID, "pHost.SubnetIP": pHost.SubnetIP, "stepNumber": stepNumber, "prevPlan": prevPlan.ID, @@ -820,13 +820,13 @@ func createProvisioningStep(ctx context.Context, client *ent.Client, logger *log filedelete.HasFileDeleteToEnvironmentWith( environment.IDEQ(currentEnvironment.ID), ), - filedelete.HclIDEQ(hclID), + filedelete.HCLIDEQ(hclID), )).Only(ctx) if err != nil { if err != err.(*ent.NotFoundError) { logger.Log.WithFields(logrus.Fields{ "pHost": pHost.ID, - "pHost.HCLID": entHost.HclID, + "pHost.HCLID": entHost.HCLID, "pHost.SubnetIP": pHost.SubnetIP, "stepNumber": stepNumber, "prevPlan": prevPlan.ID, @@ -840,13 +840,13 @@ func createProvisioningStep(ctx context.Context, client *ent.Client, logger *log dnsrecord.HasDNSRecordToEnvironmentWith( environment.IDEQ(currentEnvironment.ID), ), - dnsrecord.HclIDEQ(hclID), + dnsrecord.HCLIDEQ(hclID), )).Only(ctx) if err != nil { if err != err.(*ent.NotFoundError) { logger.Log.WithFields(logrus.Fields{ "pHost": pHost.ID, - "pHost.HCLID": entHost.HclID, + "pHost.HCLID": entHost.HCLID, "pHost.SubnetIP": pHost.SubnetIP, "stepNumber": stepNumber, "prevPlan": prevPlan.ID, @@ -860,13 +860,13 @@ func createProvisioningStep(ctx context.Context, client *ent.Client, logger *log ansible.HasAnsibleFromEnvironmentWith( environment.IDEQ(currentEnvironment.ID), ), - ansible.HclIDEQ(hclID), + ansible.HCLIDEQ(hclID), )).Only(ctx) if err != nil { if err != err.(*ent.NotFoundError) { logger.Log.WithFields(logrus.Fields{ "pHost": pHost.ID, - "pHost.HCLID": entHost.HclID, + "pHost.HCLID": entHost.HCLID, "pHost.SubnetIP": pHost.SubnetIP, "stepNumber": stepNumber, "prevPlan": prevPlan.ID, @@ -877,7 +877,7 @@ func createProvisioningStep(ctx context.Context, client *ent.Client, logger *log } else { logger.Log.WithFields(logrus.Fields{ "pHost": pHost.ID, - "pHost.HCLID": entHost.HclID, + "pHost.HCLID": entHost.HCLID, "pHost.SubnetIP": pHost.SubnetIP, "stepNumber": stepNumber, "prevPlan": prevPlan.ID, @@ -897,7 +897,7 @@ func createProvisioningStep(ctx context.Context, client *ent.Client, logger *log if err != nil { logger.Log.WithFields(logrus.Fields{ "pHost": pHost.ID, - "pHost.HCLID": entHost.HclID, + "pHost.HCLID": entHost.HCLID, "pHost.SubnetIP": pHost.SubnetIP, "stepNumber": stepNumber, "prevPlan": prevPlan.ID, @@ -938,7 +938,7 @@ func createProvisioningStep(ctx context.Context, client *ent.Client, logger *log if err != nil { logger.Log.WithFields(logrus.Fields{ "pHost": pHost.ID, - "pHost.HCLID": entHost.HclID, + "pHost.HCLID": entHost.HCLID, "pHost.SubnetIP": pHost.SubnetIP, "stepNumber": stepNumber, "prevPlan": prevPlan.ID, @@ -959,7 +959,7 @@ func createProvisioningStep(ctx context.Context, client *ent.Client, logger *log if err != nil { logger.Log.WithFields(logrus.Fields{ "pHost": pHost.ID, - "pHost.HCLID": entHost.HclID, + "pHost.HCLID": entHost.HCLID, "pHost.SubnetIP": pHost.SubnetIP, "stepNumber": stepNumber, "prevPlan": prevPlan.ID, @@ -981,7 +981,7 @@ func createProvisioningStep(ctx context.Context, client *ent.Client, logger *log if err != nil { logger.Log.WithFields(logrus.Fields{ "pHost": pHost.ID, - "pHost.HCLID": entHost.HclID, + "pHost.HCLID": entHost.HCLID, "pHost.SubnetIP": pHost.SubnetIP, "stepNumber": stepNumber, "prevPlan": prevPlan.ID, @@ -1003,7 +1003,7 @@ func createProvisioningStep(ctx context.Context, client *ent.Client, logger *log if err != nil { logger.Log.WithFields(logrus.Fields{ "pHost": pHost.ID, - "pHost.HCLID": entHost.HclID, + "pHost.HCLID": entHost.HCLID, "pHost.SubnetIP": pHost.SubnetIP, "stepNumber": stepNumber, "prevPlan": prevPlan.ID, @@ -1045,7 +1045,7 @@ func createProvisioningStep(ctx context.Context, client *ent.Client, logger *log if err != nil { logger.Log.WithFields(logrus.Fields{ "pHost": pHost.ID, - "pHost.HCLID": entHost.HclID, + "pHost.HCLID": entHost.HCLID, "pHost.SubnetIP": pHost.SubnetIP, "stepNumber": stepNumber, "prevPlan": prevPlan.ID, @@ -1067,7 +1067,7 @@ func createProvisioningStep(ctx context.Context, client *ent.Client, logger *log if err != nil { logger.Log.WithFields(logrus.Fields{ "pHost": pHost.ID, - "pHost.HCLID": entHost.HclID, + "pHost.HCLID": entHost.HCLID, "pHost.SubnetIP": pHost.SubnetIP, "stepNumber": stepNumber, "prevPlan": prevPlan.ID, @@ -1116,7 +1116,7 @@ func createProvisioningStep(ctx context.Context, client *ent.Client, logger *log if err != nil { logger.Log.WithFields(logrus.Fields{ "pHost": pHost.ID, - "pHost.HCLID": entHost.HclID, + "pHost.HCLID": entHost.HCLID, "pHost.SubnetIP": pHost.SubnetIP, "stepNumber": stepNumber, "prevPlan": prevPlan.ID, @@ -1288,25 +1288,25 @@ func renderAnsible(ctx context.Context, client *ent.Client, logger *logging.Logg ansibleFolder := path.Join(dirAbsPath, currentAnsible.Name) err = os.MkdirAll(ansibleFolder, 0755) if err != nil { - logger.Log.Errorf("Failed to create folder for ansible %v. Err: %v", currentAnsible.HclID, err) + logger.Log.Errorf("Failed to create folder for ansible %v. Err: %v", currentAnsible.HCLID, err) return "", err } data, err := yaml.Marshal(templateData) if err != nil { - logger.Log.Errorf("Failed to render vars file for ansible %v. Err: %v", currentAnsible.HclID, err) + logger.Log.Errorf("Failed to render vars file for ansible %v. Err: %v", currentAnsible.HCLID, err) return "", err } varFileName := path.Join(ansibleFolder, "laforge_vars.yml") err = ioutil.WriteFile(varFileName, data, 0755) if err != nil { - logger.Log.Errorf("Failed to create vars file for ansible %v. Err: %v", currentAnsible.HclID, err) + logger.Log.Errorf("Failed to create vars file for ansible %v. Err: %v", currentAnsible.HCLID, err) return "", err } err = CopyDir(currentAnsible.AbsPath, ansibleFolder) if err != nil { - logger.Log.Errorf("Failed to copy folder for ansible %v. Err: %v", currentAnsible.HclID, err) + logger.Log.Errorf("Failed to copy folder for ansible %v. Err: %v", currentAnsible.HCLID, err) return "", err } diff --git a/utils/deleter/delete.go b/utils/deleter/delete.go index b0ff8c80..c6ce4147 100755 --- a/utils/deleter/delete.go +++ b/utils/deleter/delete.go @@ -122,7 +122,7 @@ func main() { } println(deletedCount) - // entEnvironment, err := client.Environment.Query().Where(environment.HclIDEQ("/envs/jrwr-2021-regional-dev")).Only(ctx) + // entEnvironment, err := client.Environment.Query().Where(environment.HCLIDEQ("/envs/jrwr-2021-regional-dev")).Only(ctx) // if err != nil { // log.Fatalf("failed to get env: %v", err) // } @@ -143,7 +143,7 @@ func main() { // if err != nil { // logrus.Fatalf("failed to delete middleware: %v", err) // } - // logrus.Infof("Deleted %v amount of GinMiddleware for Build %v for env %v", amountDeleted, entBuild.Revision, entEnvironment.HclID) + // logrus.Infof("Deleted %v amount of GinMiddleware for Build %v for env %v", amountDeleted, entBuild.Revision, entEnvironment.HCLID) // amountDeleted, err = client.AgentStatus.Delete().Where( // agentstatus.HasAgentStatusToBuildWith( // build.IDEQ(entBuild.ID), @@ -152,7 +152,7 @@ func main() { // if err != nil { // logrus.Fatalf("failed to delete agentstatus: %v", err) // } - // logrus.Infof("Deleted %v amount of agentstatus for Build %v for env %v", amountDeleted, entBuild.Revision, entEnvironment.HclID) + // logrus.Infof("Deleted %v amount of agentstatus for Build %v for env %v", amountDeleted, entBuild.Revision, entEnvironment.HCLID) // amountDeleted, err = client.AgentTask.Delete().Where( // agenttask.HasAgentTaskToProvisionedHostWith( // provisionedhost.HasProvisionedHostToProvisionedNetworkWith( @@ -165,7 +165,7 @@ func main() { // if err != nil { // logrus.Fatalf("failed to delete agenttask: %v", err) // } - // logrus.Infof("Deleted %v amount of agenttask for Build %v for env %v", amountDeleted, entBuild.Revision, entEnvironment.HclID) + // logrus.Infof("Deleted %v amount of agenttask for Build %v for env %v", amountDeleted, entBuild.Revision, entEnvironment.HCLID) // amountDeleted, err = client.ProvisioningStep.Delete().Where( // provisioningstep.HasProvisioningStepToProvisionedHostWith( // provisionedhost.HasProvisionedHostToProvisionedNetworkWith( @@ -178,7 +178,7 @@ func main() { // if err != nil { // logrus.Fatalf("failed to delete pstep: %v", err) // } - // logrus.Infof("Deleted %v amount of pstep for Build %v for env %v", amountDeleted, entBuild.Revision, entEnvironment.HclID) + // logrus.Infof("Deleted %v amount of pstep for Build %v for env %v", amountDeleted, entBuild.Revision, entEnvironment.HCLID) // amountDeleted, err = client.ProvisionedHost.Delete().Where( // provisionedhost.HasProvisionedHostToProvisionedNetworkWith( // provisionednetwork.HasProvisionedNetworkToBuildWith( @@ -189,7 +189,7 @@ func main() { // if err != nil { // logrus.Fatalf("failed to delete phost: %v", err) // } - // logrus.Infof("Deleted %v amount of phost for Build %v for env %v", amountDeleted, entBuild.Revision, entEnvironment.HclID) + // logrus.Infof("Deleted %v amount of phost for Build %v for env %v", amountDeleted, entBuild.Revision, entEnvironment.HCLID) // amountDeleted, err = client.ProvisionedNetwork.Delete().Where( // provisionednetwork.HasProvisionedNetworkToBuildWith( // build.IDEQ(entBuild.ID), @@ -203,7 +203,7 @@ func main() { // if err != nil { // logrus.Fatalf("failed to delete ProvisionedNetwork: %v", err) // } - // logrus.Infof("Deleted %v amount of ProvisionedNetwork for Build %v for env %v", amountDeleted, entBuild.Revision, entEnvironment.HclID) + // logrus.Infof("Deleted %v amount of ProvisionedNetwork for Build %v for env %v", amountDeleted, entBuild.Revision, entEnvironment.HCLID) // amountDeleted, err = client.BuildCommit.Delete().Where( // buildcommit.HasBuildCommitToBuildWith( // build.IDEQ(entBuild.ID), @@ -212,7 +212,7 @@ func main() { // if err != nil { // logrus.Fatalf("failed to delete BuildCommit: %v", err) // } - // logrus.Infof("Deleted %v amount of BuildCommit for Build %v for env %v", amountDeleted, entBuild.Revision, entEnvironment.HclID) + // logrus.Infof("Deleted %v amount of BuildCommit for Build %v for env %v", amountDeleted, entBuild.Revision, entEnvironment.HCLID) // amountDeleted, err = client.Plan.Delete().Where( // plan.HasPlanToBuildWith( // build.IDEQ(entBuild.ID), @@ -221,7 +221,7 @@ func main() { // if err != nil { // logrus.Fatalf("failed to delete plan: %v", err) // } - // logrus.Infof("Deleted %v amount of plan for Build %v for env %v", amountDeleted, entBuild.Revision, entEnvironment.HclID) + // logrus.Infof("Deleted %v amount of plan for Build %v for env %v", amountDeleted, entBuild.Revision, entEnvironment.HCLID) // amountDeleted, err = client.AdhocPlan.Delete().Where( // adhocplan.HasAdhocPlanToBuildWith( // build.IDEQ(entBuild.ID), @@ -230,14 +230,14 @@ func main() { // if err != nil { // logrus.Fatalf("failed to delete AdhocPlan: %v", err) // } - // logrus.Infof("Deleted %v amount of AdhocPlan for Build %v for env %v", amountDeleted, entBuild.Revision, entEnvironment.HclID) + // logrus.Infof("Deleted %v amount of AdhocPlan for Build %v for env %v", amountDeleted, entBuild.Revision, entEnvironment.HCLID) // amountDeleted, err = client.Build.Delete().Where( // build.IDEQ(entBuild.ID), // ).Exec(ctx) // if err != nil { // logrus.Fatalf("failed to delete Build: %v", err) // } - // logrus.Infof("Deleted Build %v for env %v", amountDeleted, entBuild.Revision, entEnvironment.HclID) + // logrus.Infof("Deleted Build %v for env %v", amountDeleted, entBuild.Revision, entEnvironment.HCLID) // } // amountDeleted, err := client.Environment.Delete().Where( // environment.IDEQ(entEnvironment.ID), @@ -245,5 +245,5 @@ func main() { // if err != nil { // logrus.Fatalf("failed to delete Build: %v", err) // } - // logrus.Infof("Deleted Env %v", amountDeleted, entEnvironment.HclID) + // logrus.Infof("Deleted Env %v", amountDeleted, entEnvironment.HCLID) }