Remove custom PARALLEL config support from queries

This commit is contained in:
Tobie Morgan Hitchcock 2019-01-22 20:33:15 +00:00
parent efeb186200
commit 62f02015ba
14 changed files with 1705 additions and 1988 deletions

View file

@ -11,23 +11,23 @@ import (
const (
// ----- content types ----
codecSelferCcUTF84278 = 1
codecSelferCcRAW4278 = 0
codecSelferCcUTF89522 = 1
codecSelferCcRAW9522 = 0
// ----- value types used ----
codecSelferValueTypeArray4278 = 10
codecSelferValueTypeMap4278 = 9
codecSelferValueTypeString4278 = 6
codecSelferValueTypeInt4278 = 2
codecSelferValueTypeUint4278 = 3
codecSelferValueTypeFloat4278 = 4
codecSelferBitsize4278 = uint8(32 << (^uint(0) >> 63))
codecSelferValueTypeArray9522 = 10
codecSelferValueTypeMap9522 = 9
codecSelferValueTypeString9522 = 6
codecSelferValueTypeInt9522 = 2
codecSelferValueTypeUint9522 = 3
codecSelferValueTypeFloat9522 = 4
codecSelferBitsize9522 = uint8(32 << (^uint(0) >> 63))
)
var (
errCodecSelferOnlyMapOrArrayEncodeToStruct4278 = errors.New(`only encoded map or array can be decoded into a struct`)
errCodecSelferOnlyMapOrArrayEncodeToStruct9522 = errors.New(`only encoded map or array can be decoded into a struct`)
)
type codecSelfer4278 struct{}
type codecSelfer9522 struct{}
func init() {
if codec1978.GenVersion != 8 {
@ -39,7 +39,7 @@ func init() {
}
func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) {
var h codecSelfer4278
var h codecSelfer9522
z, r := codec1978.GenHelperEncoder(e)
_, _, _ = h, z, r
if x == nil {
@ -77,19 +77,19 @@ func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) {
if yyq2[0] {
if false {
} else {
r.EncodeString(codecSelferCcUTF84278, string(x.Time))
r.EncodeString(codecSelferCcUTF89522, string(x.Time))
}
} else {
r.EncodeString(codecSelferCcUTF84278, "")
r.EncodeString(codecSelferCcUTF89522, "")
}
} else {
if yyq2[0] {
r.WriteMapElemKey()
r.EncodeString(codecSelferCcUTF84278, `time`)
r.EncodeString(codecSelferCcUTF89522, `time`)
r.WriteMapElemValue()
if false {
} else {
r.EncodeString(codecSelferCcUTF84278, string(x.Time))
r.EncodeString(codecSelferCcUTF89522, string(x.Time))
}
}
}
@ -98,19 +98,19 @@ func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) {
if yyq2[1] {
if false {
} else {
r.EncodeString(codecSelferCcUTF84278, string(x.Status))
r.EncodeString(codecSelferCcUTF89522, string(x.Status))
}
} else {
r.EncodeString(codecSelferCcUTF84278, "")
r.EncodeString(codecSelferCcUTF89522, "")
}
} else {
if yyq2[1] {
r.WriteMapElemKey()
r.EncodeString(codecSelferCcUTF84278, `status`)
r.EncodeString(codecSelferCcUTF89522, `status`)
r.WriteMapElemValue()
if false {
} else {
r.EncodeString(codecSelferCcUTF84278, string(x.Status))
r.EncodeString(codecSelferCcUTF89522, string(x.Status))
}
}
}
@ -119,19 +119,19 @@ func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) {
if yyq2[2] {
if false {
} else {
r.EncodeString(codecSelferCcUTF84278, string(x.Detail))
r.EncodeString(codecSelferCcUTF89522, string(x.Detail))
}
} else {
r.EncodeString(codecSelferCcUTF84278, "")
r.EncodeString(codecSelferCcUTF89522, "")
}
} else {
if yyq2[2] {
r.WriteMapElemKey()
r.EncodeString(codecSelferCcUTF84278, `detail`)
r.EncodeString(codecSelferCcUTF89522, `detail`)
r.WriteMapElemValue()
if false {
} else {
r.EncodeString(codecSelferCcUTF84278, string(x.Detail))
r.EncodeString(codecSelferCcUTF89522, string(x.Detail))
}
}
}
@ -152,7 +152,7 @@ func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) {
} else {
if yyq2[3] {
r.WriteMapElemKey()
r.EncodeString(codecSelferCcUTF84278, `result`)
r.EncodeString(codecSelferCcUTF89522, `result`)
r.WriteMapElemValue()
if x.Result == nil {
r.EncodeNil()
@ -174,7 +174,7 @@ func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) {
}
func (x *Response) CodecDecodeSelf(d *codec1978.Decoder) {
var h codecSelfer4278
var h codecSelfer9522
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
if false {
@ -182,14 +182,14 @@ func (x *Response) CodecDecodeSelf(d *codec1978.Decoder) {
z.DecExtension(x, yyxt1)
} else {
yyct2 := r.ContainerType()
if yyct2 == codecSelferValueTypeMap4278 {
if yyct2 == codecSelferValueTypeMap9522 {
yyl2 := r.ReadMapStart()
if yyl2 == 0 {
r.ReadMapEnd()
} else {
x.codecDecodeSelfFromMap(yyl2, d)
}
} else if yyct2 == codecSelferValueTypeArray4278 {
} else if yyct2 == codecSelferValueTypeArray9522 {
yyl2 := r.ReadArrayStart()
if yyl2 == 0 {
r.ReadArrayEnd()
@ -197,13 +197,13 @@ func (x *Response) CodecDecodeSelf(d *codec1978.Decoder) {
x.codecDecodeSelfFromArray(yyl2, d)
}
} else {
panic(errCodecSelferOnlyMapOrArrayEncodeToStruct4278)
panic(errCodecSelferOnlyMapOrArrayEncodeToStruct9522)
}
}
}
func (x *Response) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
var h codecSelfer4278
var h codecSelfer9522
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
var yyhl3 bool = l >= 0
@ -256,7 +256,7 @@ func (x *Response) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
}
func (x *Response) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
var h codecSelfer4278
var h codecSelfer9522
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
var yyj9 int
@ -346,7 +346,7 @@ func (x *Response) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
}
func (x *Dispatch) CodecEncodeSelf(e *codec1978.Encoder) {
var h codecSelfer4278
var h codecSelfer9522
z, r := codec1978.GenHelperEncoder(e)
_, _, _ = h, z, r
if x == nil {
@ -383,19 +383,19 @@ func (x *Dispatch) CodecEncodeSelf(e *codec1978.Encoder) {
if yyq2[0] {
if false {
} else {
r.EncodeString(codecSelferCcUTF84278, string(x.Query))
r.EncodeString(codecSelferCcUTF89522, string(x.Query))
}
} else {
r.EncodeString(codecSelferCcUTF84278, "")
r.EncodeString(codecSelferCcUTF89522, "")
}
} else {
if yyq2[0] {
r.WriteMapElemKey()
r.EncodeString(codecSelferCcUTF84278, `query`)
r.EncodeString(codecSelferCcUTF89522, `query`)
r.WriteMapElemValue()
if false {
} else {
r.EncodeString(codecSelferCcUTF84278, string(x.Query))
r.EncodeString(codecSelferCcUTF89522, string(x.Query))
}
}
}
@ -404,19 +404,19 @@ func (x *Dispatch) CodecEncodeSelf(e *codec1978.Encoder) {
if yyq2[1] {
if false {
} else {
r.EncodeString(codecSelferCcUTF84278, string(x.Action))
r.EncodeString(codecSelferCcUTF89522, string(x.Action))
}
} else {
r.EncodeString(codecSelferCcUTF84278, "")
r.EncodeString(codecSelferCcUTF89522, "")
}
} else {
if yyq2[1] {
r.WriteMapElemKey()
r.EncodeString(codecSelferCcUTF84278, `action`)
r.EncodeString(codecSelferCcUTF89522, `action`)
r.WriteMapElemValue()
if false {
} else {
r.EncodeString(codecSelferCcUTF84278, string(x.Action))
r.EncodeString(codecSelferCcUTF89522, string(x.Action))
}
}
}
@ -437,7 +437,7 @@ func (x *Dispatch) CodecEncodeSelf(e *codec1978.Encoder) {
} else {
if yyq2[2] {
r.WriteMapElemKey()
r.EncodeString(codecSelferCcUTF84278, `result`)
r.EncodeString(codecSelferCcUTF89522, `result`)
r.WriteMapElemValue()
if x.Result == nil {
r.EncodeNil()
@ -459,7 +459,7 @@ func (x *Dispatch) CodecEncodeSelf(e *codec1978.Encoder) {
}
func (x *Dispatch) CodecDecodeSelf(d *codec1978.Decoder) {
var h codecSelfer4278
var h codecSelfer9522
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
if false {
@ -467,14 +467,14 @@ func (x *Dispatch) CodecDecodeSelf(d *codec1978.Decoder) {
z.DecExtension(x, yyxt1)
} else {
yyct2 := r.ContainerType()
if yyct2 == codecSelferValueTypeMap4278 {
if yyct2 == codecSelferValueTypeMap9522 {
yyl2 := r.ReadMapStart()
if yyl2 == 0 {
r.ReadMapEnd()
} else {
x.codecDecodeSelfFromMap(yyl2, d)
}
} else if yyct2 == codecSelferValueTypeArray4278 {
} else if yyct2 == codecSelferValueTypeArray9522 {
yyl2 := r.ReadArrayStart()
if yyl2 == 0 {
r.ReadArrayEnd()
@ -482,13 +482,13 @@ func (x *Dispatch) CodecDecodeSelf(d *codec1978.Decoder) {
x.codecDecodeSelfFromArray(yyl2, d)
}
} else {
panic(errCodecSelferOnlyMapOrArrayEncodeToStruct4278)
panic(errCodecSelferOnlyMapOrArrayEncodeToStruct9522)
}
}
}
func (x *Dispatch) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
var h codecSelfer4278
var h codecSelfer9522
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
var yyhl3 bool = l >= 0
@ -535,7 +535,7 @@ func (x *Dispatch) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
}
func (x *Dispatch) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
var h codecSelfer4278
var h codecSelfer9522
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
var yyj8 int

View file

@ -364,7 +364,6 @@ func (e *executor) fetchThing(ctx context.Context, val *sql.Thing, doc *data.Doc
Expr: []*sql.Field{{Expr: &sql.All{}}},
What: []sql.Expr{val},
Version: sql.Expr(ver),
Parallel: 1,
})
if err != nil {
@ -400,7 +399,6 @@ func (e *executor) fetchArray(ctx context.Context, val []interface{}, doc *data.
Expr: []*sql.Field{{Expr: &sql.All{}}},
What: []sql.Expr{val},
Version: sql.Expr(ver),
Parallel: 1,
})
if err != nil {

View file

@ -60,7 +60,6 @@ type iterator struct {
limit int
start int
versn int64
tasks int
}
type workable struct {
@ -100,8 +99,8 @@ func newIterator(e *executor, ctx context.Context, stm sql.Statement, vir bool)
i.wait = sync.WaitGroup{}
i.fail = make(chan error, 1)
i.stop = make(chan struct{})
i.jobs = make(chan *workable, 1000)
i.vals = make(chan *doneable, 1000)
i.jobs = make(chan *workable, workerCount)
i.vals = make(chan *doneable, workerCount)
// Comment here
@ -111,10 +110,6 @@ func newIterator(e *executor, ctx context.Context, stm sql.Statement, vir bool)
i.setupWorkers(ctx)
// Comment here ...
i.watchVals(ctx)
return
}
@ -140,7 +135,6 @@ func (i *iterator) Close() {
i.limit = -1
i.start = -1
i.versn = 0
i.tasks = 0
iteratorPool.Put(i)
@ -167,26 +161,18 @@ func (i *iterator) setupState(ctx context.Context) {
i.split = stm.Split
i.group = stm.Group
i.order = stm.Order
i.tasks = stm.Parallel
case *sql.CreateStatement:
i.what = stm.What
i.tasks = stm.Parallel
case *sql.UpdateStatement:
i.what = stm.What
i.cond = stm.Cond
i.tasks = stm.Parallel
case *sql.DeleteStatement:
i.what = stm.What
i.cond = stm.Cond
i.tasks = stm.Parallel
case *sql.RelateStatement:
i.tasks = stm.Parallel
case *sql.InsertStatement:
i.what = sql.Exprs{stm.Data}
i.tasks = stm.Parallel
case *sql.UpsertStatement:
i.what = sql.Exprs{stm.Data}
i.tasks = stm.Parallel
}
if stm, ok := i.stm.(*sql.SelectStatement); ok {
@ -237,27 +223,28 @@ func (i *iterator) checkState(ctx context.Context) bool {
func (i *iterator) setupWorkers(ctx context.Context) {
if i.checkState(ctx) {
for w := 1; w <= ints.Between(1, workerCount, i.tasks); w++ {
go i.setupWorker(ctx, i.jobs, i.vals)
}
if !i.checkState(ctx) {
return
}
go func(vals <-chan *doneable) {
for v := range vals {
i.receive(v)
}
}(i.vals)
func (i *iterator) setupWorker(ctx context.Context, jobs chan *workable, vals chan *doneable) {
for w := 1; w <= workerCount; w++ {
go func(jobs <-chan *workable, vals chan<- *doneable) {
for j := range jobs {
res, err := newDocument(i, j.key, j.val, j.doc).query(ctx, i.stm)
vals <- &doneable{res: res, err: err}
}
}(i.jobs, i.vals)
}
}
func (i *iterator) submitTask(key *keys.Thing, val kvs.KV, doc *data.Doc) {
func (i *iterator) deliver(key *keys.Thing, val kvs.KV, doc *data.Doc) {
i.wait.Add(1)
@ -265,16 +252,6 @@ func (i *iterator) submitTask(key *keys.Thing, val kvs.KV, doc *data.Doc) {
}
func (i *iterator) watchVals(ctx context.Context) {
go func(vals <-chan *doneable) {
for val := range vals {
i.receive(val)
}
}(i.vals)
}
func (i *iterator) receive(val *doneable) {
defer i.wait.Done()
@ -529,7 +506,7 @@ func (i *iterator) processThing(ctx context.Context, key *keys.Thing) {
i.processPerms(ctx, key.NS, key.DB, key.TB)
if i.checkState(ctx) {
i.submitTask(key, nil, nil)
i.deliver(key, nil, nil)
}
}
@ -578,7 +555,7 @@ func (i *iterator) processTable(ctx context.Context, key *keys.Table) {
for _, val := range vals {
if i.checkState(ctx) {
i.submitTask(nil, val, nil)
i.deliver(nil, val, nil)
continue
}
}
@ -608,7 +585,7 @@ func (i *iterator) processBatch(ctx context.Context, key *keys.Thing, qry *sql.B
if i.checkState(ctx) {
key := key.Copy()
key.TB, key.ID = val.TB, val.ID
i.submitTask(key, nil, nil)
i.deliver(key, nil, nil)
continue
}
@ -635,7 +612,7 @@ func (i *iterator) processModel(ctx context.Context, key *keys.Thing, qry *sql.M
if i.checkState(ctx) {
key := key.Copy()
key.ID = guid.New().String()
i.submitTask(key, nil, nil)
i.deliver(key, nil, nil)
continue
}
@ -656,7 +633,7 @@ func (i *iterator) processModel(ctx context.Context, key *keys.Thing, qry *sql.M
if i.checkState(ctx) {
key := key.Copy()
key.ID = num
i.submitTask(key, nil, nil)
i.deliver(key, nil, nil)
continue
}
@ -677,7 +654,7 @@ func (i *iterator) processModel(ctx context.Context, key *keys.Thing, qry *sql.M
if i.checkState(ctx) {
key := key.Copy()
key.ID = num
i.submitTask(key, nil, nil)
i.deliver(key, nil, nil)
continue
}
@ -707,7 +684,7 @@ func (i *iterator) processOther(ctx context.Context, key *keys.Thing, val []inte
if i.checkState(ctx) {
key := key.Copy()
key.TB, key.ID = v.TB, v.ID
i.submitTask(key, nil, nil)
i.deliver(key, nil, nil)
continue
}
@ -752,7 +729,7 @@ func (i *iterator) processQuery(ctx context.Context, key *keys.Thing, val []inte
if i.checkState(ctx) {
key := key.Copy()
key.TB, key.ID = v.TB, v.ID
i.submitTask(key, nil, nil)
i.deliver(key, nil, nil)
continue
}
@ -762,7 +739,7 @@ func (i *iterator) processQuery(ctx context.Context, key *keys.Thing, val []inte
// of the data so we can process it.
if i.checkState(ctx) {
i.submitTask(nil, nil, data.Consume(v))
i.deliver(nil, nil, data.Consume(v))
continue
}
@ -790,7 +767,7 @@ func (i *iterator) processArray(ctx context.Context, key *keys.Thing, val []inte
if i.checkState(ctx) {
key := key.Copy()
key.ID = v.ID
i.submitTask(key, nil, nil)
i.deliver(key, nil, nil)
continue
}
@ -809,7 +786,7 @@ func (i *iterator) processArray(ctx context.Context, key *keys.Thing, val []inte
if i.checkState(ctx) {
key := key.Copy()
key.ID = thg.ID
i.submitTask(key, nil, data.Consume(v))
i.deliver(key, nil, data.Consume(v))
continue
}
@ -821,7 +798,7 @@ func (i *iterator) processArray(ctx context.Context, key *keys.Thing, val []inte
if i.checkState(ctx) {
key := key.Copy()
key.ID = fld
i.submitTask(key, nil, data.Consume(v))
i.deliver(key, nil, data.Consume(v))
continue
}
@ -835,7 +812,7 @@ func (i *iterator) processArray(ctx context.Context, key *keys.Thing, val []inte
if i.checkState(ctx) {
key := key.Copy()
key.ID = guid.New().String()
i.submitTask(key, nil, data.Consume(v))
i.deliver(key, nil, data.Consume(v))
continue
}

View file

@ -161,7 +161,6 @@ func (d *document) tableDelete(ctx context.Context, tng *sql.Thing, exp sql.Fiel
NS: d.key.NS,
DB: d.key.DB,
What: sql.Exprs{tng},
Parallel: 1,
}
key := &keys.Thing{KV: stm.KV, NS: stm.NS, DB: stm.DB, TB: tng.TB, ID: tng.ID}
@ -189,7 +188,6 @@ func (d *document) tableUpdate(ctx context.Context, tng *sql.Thing, exp sql.Fiel
DB: d.key.DB,
What: sql.Exprs{tng},
Data: &sql.ContentExpression{Data: res},
Parallel: 1,
}
key := &keys.Thing{KV: stm.KV, NS: stm.NS, DB: stm.DB, TB: tng.TB, ID: tng.ID}
@ -302,7 +300,6 @@ func (d *document) tableModify(ctx context.Context, tng *sql.Thing, exp sql.Fiel
DB: d.key.DB,
What: sql.Exprs{tng},
Data: set,
Parallel: 1,
}
key := &keys.Thing{KV: stm.KV, NS: stm.NS, DB: stm.DB, TB: tng.TB, ID: tng.ID}

View file

@ -76,7 +76,7 @@ const (
var (
// workerCount specifies how many workers should be used
// to process each query statement concurrently.
workerCount = runtime.NumCPU() * 2
workerCount = runtime.NumCPU()
// maxRecursiveQueries specifies how many queries will be
// processed recursively before the query is cancelled.

File diff suppressed because it is too large Load diff

View file

@ -178,7 +178,6 @@ type SelectStatement struct {
Fetch Fetchs
Version Expr
Timeout time.Duration
Parallel int
}
// CreateStatement represents a SQL CREATE statement.
@ -190,7 +189,6 @@ type CreateStatement struct {
Data Expr
Echo Token
Timeout time.Duration
Parallel int
}
// UpdateStatement represents a SQL UPDATE statement.
@ -203,7 +201,6 @@ type UpdateStatement struct {
Cond Expr
Echo Token
Timeout time.Duration
Parallel int
}
// DeleteStatement represents a SQL DELETE statement.
@ -215,7 +212,6 @@ type DeleteStatement struct {
Cond Expr
Echo Token
Timeout time.Duration
Parallel int
}
// RelateStatement represents a SQL RELATE statement.
@ -230,7 +226,6 @@ type RelateStatement struct {
Uniq bool
Echo Token
Timeout time.Duration
Parallel int
}
// InsertStatement represents a SQL INSERT statement.
@ -242,7 +237,6 @@ type InsertStatement struct {
Into *Table
Echo Token
Timeout time.Duration
Parallel int
}
// UpsertStatement represents a SQL UPSERT statement.
@ -254,7 +248,6 @@ type UpsertStatement struct {
Into *Table
Echo Token
Timeout time.Duration
Parallel int
}
// --------------------------------------------------

View file

@ -38,10 +38,6 @@ func (p *parser) parseCreateStatement() (stmt *CreateStatement, err error) {
return nil, err
}
if stmt.Parallel, err = p.parseParallel(); err != nil {
return nil, err
}
return
}

View file

@ -40,10 +40,6 @@ func (p *parser) parseDeleteStatement() (stmt *DeleteStatement, err error) {
return nil, err
}
if stmt.Parallel, err = p.parseParallel(); err != nil {
return nil, err
}
return
}

View file

@ -44,10 +44,6 @@ func (p *parser) parseInsertStatement() (stmt *InsertStatement, err error) {
return nil, err
}
if stmt.Parallel, err = p.parseParallel(); err != nil {
return nil, err
}
return
}

View file

@ -56,10 +56,6 @@ func (p *parser) parseRelateStatement() (stmt *RelateStatement, err error) {
return nil, err
}
if stmt.Parallel, err = p.parseParallel(); err != nil {
return nil, err
}
return
}

View file

@ -71,10 +71,6 @@ func (p *parser) parseSelectStatement() (stmt *SelectStatement, err error) {
return nil, err
}
if stmt.Parallel, err = p.parseParallel(); err != nil {
return nil, err
}
if err = checkExpression(aggrs, stmt.Expr, stmt.Group); err != nil {
return nil, err
}

View file

@ -42,10 +42,6 @@ func (p *parser) parseUpdateStatement() (stmt *UpdateStatement, err error) {
return nil, err
}
if stmt.Parallel, err = p.parseParallel(); err != nil {
return nil, err
}
return
}

View file

@ -44,10 +44,6 @@ func (p *parser) parseUpsertStatement() (stmt *UpsertStatement, err error) {
return nil, err
}
if stmt.Parallel, err = p.parseParallel(); err != nil {
return nil, err
}
return
}