Commit 2a672f6b authored by hequn's avatar hequn

ftr: add simple task pool

parent b731b1f5
...@@ -19,3 +19,5 @@ classes ...@@ -19,3 +19,5 @@ classes
# vim # vim
*.swp *.swp
vendor/
package gxsync
import (
"fmt"
)
const (
defaultTaskQNumber = 10
defaultTaskQLen = 128
)
/////////////////////////////////////////
// Task Pool Options
/////////////////////////////////////////
type TaskPoolOptions struct {
tQLen int // task queue length. buffer size per queue
tQNumber int // task queue number. number of queue
tQPoolSize int // task pool size. number of workers
}
func (o *TaskPoolOptions) validate() {
if o.tQPoolSize < 1 {
panic(fmt.Sprintf("illegal pool size %d", o.tQPoolSize))
}
if o.tQLen < 1 {
o.tQLen = defaultTaskQLen
}
if o.tQNumber < 1 {
o.tQNumber = defaultTaskQNumber
}
if o.tQNumber > o.tQPoolSize {
o.tQNumber = o.tQPoolSize
}
}
type TaskPoolOption func(*TaskPoolOptions)
// @size is the task queue pool size
func WithTaskPoolTaskPoolSize(size int) TaskPoolOption {
return func(o *TaskPoolOptions) {
o.tQPoolSize = size
}
}
// @length is the task queue length
func WithTaskPoolTaskQueueLength(length int) TaskPoolOption {
return func(o *TaskPoolOptions) {
o.tQLen = length
}
}
// @number is the task queue number
func WithTaskPoolTaskQueueNumber(number int) TaskPoolOption {
return func(o *TaskPoolOptions) {
o.tQNumber = number
}
}
...@@ -19,9 +19,13 @@ package gxsync ...@@ -19,9 +19,13 @@ package gxsync
import ( import (
"fmt" "fmt"
"log"
"math/rand"
"os" "os"
"runtime" "runtime"
"runtime/debug" "runtime/debug"
"sync"
"sync/atomic"
"time" "time"
) )
...@@ -29,33 +33,210 @@ import ( ...@@ -29,33 +33,210 @@ import (
gxruntime "github.com/dubbogo/gost/runtime" gxruntime "github.com/dubbogo/gost/runtime"
) )
// task t
type task func() type task func()
type GenericTaskPool interface {
// AddTask wait idle worker add task
AddTask(t task) bool
// AddTaskAlways add task to queues or do it immediately
AddTaskAlways(t task)
// AddTaskBalance add task to idle queue
AddTaskBalance(t task)
Close()
}
func goSafely(fn func()) {
gxruntime.GoSafely(nil, false, fn, nil)
}
///////////////////////////////////////// /////////////////////////////////////////
// Task Pool // Task Pool
///////////////////////////////////////// /////////////////////////////////////////
type TaskPool interface { // task pool: manage task ts
AddTask(t task) bool type TaskPool struct {
AddTaskAlways(t task) bool TaskPoolOptions
idx uint32 // round robin index
qArray []chan task
wg sync.WaitGroup
once sync.Once
done chan struct{}
}
// build a task pool
func NewTaskPool(opts ...TaskPoolOption) GenericTaskPool {
var tOpts TaskPoolOptions
for _, opt := range opts {
opt(&tOpts)
}
tOpts.validate()
p := &TaskPool{
TaskPoolOptions: tOpts,
qArray: make([]chan task, tOpts.tQNumber),
done: make(chan struct{}),
}
for i := 0; i < p.tQNumber; i++ {
p.qArray[i] = make(chan task, p.tQLen)
}
p.start()
return p
}
// start task pool
func (p *TaskPool) start() {
for i := 0; i < p.tQPoolSize; i++ {
p.wg.Add(1)
workerID := i
q := p.qArray[workerID%p.tQNumber]
p.safeRun(workerID, q)
}
}
func (p *TaskPool) safeRun(workerID int, q chan task) {
gxruntime.GoSafely(nil, false,
func() {
err := p.run(int(workerID), q)
if err != nil {
// log error to stderr
log.Printf("gost/TaskPool.run error: %s", err.Error())
}
},
nil,
)
}
// worker
func (p *TaskPool) run(id int, q chan task) error {
defer p.wg.Done()
var (
ok bool
t task
)
for {
select {
case <-p.done:
if 0 < len(q) {
return fmt.Errorf("task worker %d exit now while its task buffer length %d is greater than 0",
id, len(q))
}
return nil
case t, ok = <-q:
if ok {
func() {
defer func() {
if r := recover(); r != nil {
fmt.Fprintf(os.Stderr, "%s goroutine panic: %v\n%s\n",
time.Now(), r, string(debug.Stack()))
}
}()
t()
}()
}
}
}
}
// return false when the pool is stop
func (p *TaskPool) AddTask(t task) (ok bool) {
idx := atomic.AddUint32(&p.idx, 1)
id := idx % uint32(p.tQNumber)
select {
case <-p.done:
return false
default:
p.qArray[id] <- t
return true
}
}
func (p *TaskPool) AddTaskAlways(t task) {
id := atomic.AddUint32(&p.idx, 1) % uint32(p.tQNumber)
select {
case p.qArray[id] <- t:
return
default:
goSafely(t)
}
}
// do it immediately when no idle queue
func (p *TaskPool) AddTaskBalance(t task) {
length := len(p.qArray)
// try len/2 times to lookup idle queue
for i := 0; i < length/2; i++ {
select {
case p.qArray[rand.Intn(length)] <- t:
return
default:
continue
}
}
goSafely(t)
}
// stop all tasks
func (p *TaskPool) stop() {
select {
case <-p.done:
return
default:
p.once.Do(func() {
close(p.done)
})
}
}
// check whether the session has been closed.
func (p *TaskPool) IsClosed() bool {
select {
case <-p.done:
return true
default:
return false
}
} }
type taskPool struct { func (p *TaskPool) Close() {
p.stop()
p.wg.Wait()
for i := range p.qArray {
close(p.qArray[i])
}
}
/////////////////////////////////////////
// Task Pool Simple
/////////////////////////////////////////
type taskPoolSimple struct {
work chan task work chan task
sem chan struct{} sem chan struct{}
} }
func NewTaskPool(size int) TaskPool { func NewTaskPoolSimple(size int) GenericTaskPool {
if size < 1 { if size < 1 {
size = runtime.NumCPU() * 100 size = runtime.NumCPU() * 100
} }
return &taskPool{ return &taskPoolSimple{
work: make(chan task), work: make(chan task),
sem: make(chan struct{}, size), sem: make(chan struct{}, size),
} }
} }
func (p *taskPool) AddTask(t task) (ok bool) { func (p *taskPoolSimple) AddTask(t task) bool {
select { select {
case p.work <- t: case p.work <- t:
case p.sem <- struct{}{}: case p.sem <- struct{}{}:
...@@ -64,7 +245,7 @@ func (p *taskPool) AddTask(t task) (ok bool) { ...@@ -64,7 +245,7 @@ func (p *taskPool) AddTask(t task) (ok bool) {
return true return true
} }
func (p *taskPool) AddTaskAlways(t task) (ok bool) { func (p *taskPoolSimple) AddTaskAlways(t task) {
select { select {
case p.work <- t: case p.work <- t:
return return
...@@ -75,12 +256,11 @@ func (p *taskPool) AddTaskAlways(t task) (ok bool) { ...@@ -75,12 +256,11 @@ func (p *taskPool) AddTaskAlways(t task) (ok bool) {
case p.sem <- struct{}{}: case p.sem <- struct{}{}:
go p.worker(t) go p.worker(t)
default: default:
p.goSafely(t) goSafely(t)
} }
return true
} }
func (p *taskPool) worker(t task) { func (p *taskPoolSimple) worker(t task) {
defer func() { defer func() {
if r := recover(); r != nil { if r := recover(); r != nil {
fmt.Fprintf(os.Stderr, "%s goroutine panic: %v\n%s\n", fmt.Fprintf(os.Stderr, "%s goroutine panic: %v\n%s\n",
...@@ -94,6 +274,6 @@ func (p *taskPool) worker(t task) { ...@@ -94,6 +274,6 @@ func (p *taskPool) worker(t task) {
} }
} }
func (p *taskPool) goSafely(fn func()) { func (p *taskPoolSimple) Close() {}
gxruntime.GoSafely(nil, false, fn, nil)
} func (p *taskPoolSimple) AddTaskBalance(t task) { p.AddTaskAlways(t) }
...@@ -33,11 +33,11 @@ func newCountTask() (func(), *int64) { ...@@ -33,11 +33,11 @@ func newCountTask() (func(), *int64) {
}, &cnt }, &cnt
} }
func TestTaskPool(t *testing.T) { func TestTaskPoolSimple(t *testing.T) {
numCPU := runtime.NumCPU() numCPU := runtime.NumCPU()
taskCnt := int64(numCPU * numCPU * 100) taskCnt := int64(numCPU * numCPU * 100)
tp := NewTaskPool(10) tp := NewTaskPoolSimple(1)
task, cnt := newCountTask() task, cnt := newCountTask()
...@@ -61,8 +61,8 @@ func TestTaskPool(t *testing.T) { ...@@ -61,8 +61,8 @@ func TestTaskPool(t *testing.T) {
} }
} }
func BenchmarkTaskPool_CountTask(b *testing.B) { func BenchmarkTaskPoolSimple_CountTask(b *testing.B) {
tp := NewTaskPool(runtime.NumCPU()) tp := NewTaskPoolSimple(runtime.NumCPU())
b.Run(`AddTask`, func(b *testing.B) { b.Run(`AddTask`, func(b *testing.B) {
task, _ := newCountTask() task, _ := newCountTask()
...@@ -91,8 +91,183 @@ func fib(n int) int { ...@@ -91,8 +91,183 @@ func fib(n int) int {
} }
// cpu-intensive task // cpu-intensive task
func BenchmarkTaskPoolSimple_CPUTask(b *testing.B) {
tp := NewTaskPoolSimple(runtime.NumCPU())
newCPUTask := func() (func(), *int64) {
var cnt int64
return func() {
atomic.AddInt64(&cnt, int64(fib(22)))
}, &cnt
}
b.Run(`fib`, func(b *testing.B) {
t, _ := newCPUTask()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
t()
}
})
})
b.Run(`AddTask`, func(b *testing.B) {
task, _ := newCPUTask()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
tp.AddTask(task)
}
})
})
b.Run(`AddTaskAlways`, func(b *testing.B) {
task, _ := newCPUTask()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
tp.AddTaskAlways(task)
}
})
})
}
// IO-intensive task
func BenchmarkTaskPoolSimple_IOTask(b *testing.B) {
tp := NewTaskPoolSimple(runtime.NumCPU())
newIOTask := func() (func(), *int64) {
var cnt int64
return func() {
time.Sleep(700 * time.Microsecond)
}, &cnt
}
b.Run(`AddTask`, func(b *testing.B) {
task, _ := newIOTask()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
tp.AddTask(task)
}
})
})
b.Run(`AddTaskAlways`, func(b *testing.B) {
task, _ := newIOTask()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
tp.AddTaskAlways(task)
}
})
})
}
func BenchmarkTaskPoolSimple_RandomTask(b *testing.B) {
tp := NewTaskPoolSimple(runtime.NumCPU())
newRandomTask := func() (func(), *int64) {
c := rand.Intn(4)
tasks := []func(){
func() { _ = fib(rand.Intn(20)) },
func() { t, _ := newCountTask(); t() },
func() { runtime.Gosched() },
func() { time.Sleep(time.Duration(rand.Int63n(100)) * time.Microsecond) },
}
return tasks[c], nil
}
b.Run(`AddTask`, func(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
task, _ := newRandomTask()
tp.AddTask(task)
}
})
})
b.Run(`AddTaskAlways`, func(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
task, _ := newRandomTask()
tp.AddTaskAlways(task)
}
})
})
}
func TestTaskPool(t *testing.T) {
numCPU := runtime.NumCPU()
taskCnt := int64(numCPU * numCPU * 100)
tp := NewTaskPool(
WithTaskPoolTaskPoolSize(1),
WithTaskPoolTaskQueueNumber(1),
WithTaskPoolTaskQueueLength(1),
)
task, cnt := newCountTask()
var wg sync.WaitGroup
for i := 0; i < numCPU*numCPU; i++ {
wg.Add(1)
go func() {
for j := 0; j < 100; j++ {
ok := tp.AddTask(task)
if !ok {
t.Log(j)
}
}
wg.Done()
}()
}
wg.Wait()
tp.Close()
if taskCnt != *cnt {
t.Error("want ", taskCnt, " got ", *cnt)
}
}
func BenchmarkTaskPool_CountTask(b *testing.B) {
tp := NewTaskPool(
WithTaskPoolTaskPoolSize(runtime.NumCPU()),
WithTaskPoolTaskQueueNumber(runtime.NumCPU()),
//WithTaskPoolTaskQueueLength(runtime.NumCPU()),
)
b.Run(`AddTask`, func(b *testing.B) {
task, _ := newCountTask()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
tp.AddTask(task)
}
})
})
b.Run(`AddTaskAlways`, func(b *testing.B) {
task, _ := newCountTask()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
tp.AddTaskAlways(task)
}
})
})
b.Run(`AddTaskBalance`, func(b *testing.B) {
task, _ := newCountTask()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
tp.AddTaskBalance(task)
}
})
})
}
// cpu-intensive task
func BenchmarkTaskPool_CPUTask(b *testing.B) { func BenchmarkTaskPool_CPUTask(b *testing.B) {
tp := NewTaskPool(runtime.NumCPU()) tp := NewTaskPool(
WithTaskPoolTaskPoolSize(runtime.NumCPU()),
WithTaskPoolTaskQueueNumber(runtime.NumCPU()),
//WithTaskPoolTaskQueueLength(runtime.NumCPU()),
)
newCPUTask := func() (func(), *int64) { newCPUTask := func() (func(), *int64) {
var cnt int64 var cnt int64
...@@ -127,11 +302,25 @@ func BenchmarkTaskPool_CPUTask(b *testing.B) { ...@@ -127,11 +302,25 @@ func BenchmarkTaskPool_CPUTask(b *testing.B) {
} }
}) })
}) })
b.Run(`AddTaskBalance`, func(b *testing.B) {
task, _ := newCPUTask()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
tp.AddTaskBalance(task)
}
})
})
} }
// IO-intensive task // IO-intensive task
func BenchmarkTaskPool_IOTask(b *testing.B) { func BenchmarkTaskPool_IOTask(b *testing.B) {
tp := NewTaskPool(runtime.NumCPU()) tp := NewTaskPool(
WithTaskPoolTaskPoolSize(runtime.NumCPU()),
WithTaskPoolTaskQueueNumber(runtime.NumCPU()),
//WithTaskPoolTaskQueueLength(runtime.NumCPU()),
)
newIOTask := func() (func(), *int64) { newIOTask := func() (func(), *int64) {
var cnt int64 var cnt int64
...@@ -157,10 +346,23 @@ func BenchmarkTaskPool_IOTask(b *testing.B) { ...@@ -157,10 +346,23 @@ func BenchmarkTaskPool_IOTask(b *testing.B) {
} }
}) })
}) })
b.Run(`AddTaskBalance`, func(b *testing.B) {
task, _ := newIOTask()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
tp.AddTaskBalance(task)
}
})
})
} }
func BenchmarkTaskPool_RandomTask(b *testing.B) { func BenchmarkTaskPool_RandomTask(b *testing.B) {
tp := NewTaskPool(runtime.NumCPU()) tp := NewTaskPool(
WithTaskPoolTaskPoolSize(runtime.NumCPU()),
WithTaskPoolTaskQueueNumber(runtime.NumCPU()),
//WithTaskPoolTaskQueueLength(runtime.NumCPU()),
)
newRandomTask := func() (func(), *int64) { newRandomTask := func() (func(), *int64) {
c := rand.Intn(4) c := rand.Intn(4)
...@@ -190,20 +392,45 @@ func BenchmarkTaskPool_RandomTask(b *testing.B) { ...@@ -190,20 +392,45 @@ func BenchmarkTaskPool_RandomTask(b *testing.B) {
} }
}) })
}) })
b.Run(`AddTaskBalance`, func(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
task, _ := newRandomTask()
tp.AddTaskBalance(task)
}
})
})
} }
/* /*
goos: darwin goos: darwin
goarch: amd64 goarch: amd64
pkg: github.com/dubbogo/gost/sync pkg: github.com/dubbogo/gost/sync
BenchmarkTaskPool_CountTask/AddTask-8 1724671 655 ns/op 0 B/op 0 allocs/op BenchmarkTaskPoolSimple_CountTask/AddTask-8 1693192 700 ns/op 0 B/op 0 allocs/op
BenchmarkTaskPool_CountTask/AddTaskAlways-8 3102237 339 ns/op 0 B/op 0 allocs/op BenchmarkTaskPoolSimple_CountTask/AddTaskAlways-8 3262932 315 ns/op 0 B/op 0 allocs/op
BenchmarkTaskPool_CPUTask/fib-8 75745 16507 ns/op 0 B/op 0 allocs/op BenchmarkTaskPoolSimple_CPUTask/fib-8 83479 14760 ns/op 0 B/op 0 allocs/op
BenchmarkTaskPool_CPUTask/AddTask-8 65875 18167 ns/op 0 B/op 0 allocs/op BenchmarkTaskPoolSimple_CPUTask/AddTask-8 85956 14571 ns/op 0 B/op 0 allocs/op
BenchmarkTaskPool_CPUTask/AddTaskAlways-8 116119 18804 ns/op 1 B/op 0 allocs/op BenchmarkTaskPoolSimple_CPUTask/AddTaskAlways-8 1000000 17712 ns/op 19 B/op 0 allocs/op
BenchmarkTaskPool_IOTask/AddTask-8 10000 103712 ns/op 0 B/op 0 allocs/op BenchmarkTaskPoolSimple_IOTask/AddTask-8 10000 107361 ns/op 0 B/op 0 allocs/op
BenchmarkTaskPool_IOTask/AddTaskAlways-8 2034420 618 ns/op 87 B/op 1 allocs/op BenchmarkTaskPoolSimple_IOTask/AddTaskAlways-8 2772476 477 ns/op 79 B/op 1 allocs/op
BenchmarkTaskPool_RandomTask/AddTask-8 462364 2575 ns/op 6 B/op 0 allocs/op BenchmarkTaskPoolSimple_RandomTask/AddTask-8 499417 2451 ns/op 6 B/op 0 allocs/op
BenchmarkTaskPool_RandomTask/AddTaskAlways-8 3025962 415 ns/op 21 B/op 0 allocs/op BenchmarkTaskPoolSimple_RandomTask/AddTaskAlways-8 3307748 354 ns/op 21 B/op 0 allocs/op
BenchmarkTaskPool_CountTask/AddTask-8 5367189 229 ns/op 0 B/op 0 allocs/op
BenchmarkTaskPool_CountTask/AddTaskAlways-8 5438667 218 ns/op 0 B/op 0 allocs/op
BenchmarkTaskPool_CountTask/AddTaskBalance-8 4765616 247 ns/op 0 B/op 0 allocs/op
BenchmarkTaskPool_CPUTask/fib-8 74749 17153 ns/op 0 B/op 0 allocs/op
BenchmarkTaskPool_CPUTask/AddTask-8 71020 18131 ns/op 0 B/op 0 allocs/op
BenchmarkTaskPool_CPUTask/AddTaskAlways-8 563931 17725 ns/op 0 B/op 0 allocs/op
BenchmarkTaskPool_CPUTask/AddTaskBalance-8 204085 17720 ns/op 0 B/op 0 allocs/op
BenchmarkTaskPool_IOTask/AddTask-8 12427 106108 ns/op 0 B/op 0 allocs/op
BenchmarkTaskPool_IOTask/AddTaskAlways-8 2607068 504 ns/op 81 B/op 1 allocs/op
BenchmarkTaskPool_IOTask/AddTaskBalance-8 2065213 580 ns/op 63 B/op 0 allocs/op
BenchmarkTaskPool_RandomTask/AddTask-8 590595 2274 ns/op 6 B/op 0 allocs/op
BenchmarkTaskPool_RandomTask/AddTaskAlways-8 3565921 333 ns/op 21 B/op 0 allocs/op
BenchmarkTaskPool_RandomTask/AddTaskBalance-8 1487217 839 ns/op 17 B/op 0 allocs/op
PASS PASS
*/ */
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment