Commit 9067c167 authored by xujianhai666's avatar xujianhai666

use new queue

parent efd601d7
......@@ -19,108 +19,373 @@ package container
import (
"errors"
"runtime"
"sync"
)
// minQueueLen is smallest capacity that queue may have.
// Must be power of 2 for bitwise modulus: x % n == x & (n - 1).
const (
defaultQueueLen = 16
"sync/atomic"
"time"
)
var (
ErrEmpty = errors.New("quue is empty")
ErrFull = errors.New("queue is full")
// ErrDisposed is returned when an operation is performed on a disposed
// queue.
ErrDisposed = errors.New(`queue: disposed`)
// ErrTimeout is returned when an applicable queue operation times out.
ErrTimeout = errors.New(`queue: poll timed out`)
// ErrEmptyQueue is returned when an non-applicable queue operation was called
// due to the queue's empty item state
ErrEmptyQueue = errors.New(`queue: empty queue`)
)
// Queue represents a single instance of the queue data structure.
type Queue struct {
buf []interface{}
head, tail, count, cap int
lock sync.Mutex
type waiters []*sema
func (w *waiters) get() *sema {
if len(*w) == 0 {
return nil
}
sema := (*w)[0]
copy((*w)[0:], (*w)[1:])
(*w)[len(*w)-1] = nil // or the zero value of T
*w = (*w)[:len(*w)-1]
return sema
}
// New constructs and returns a new Queue.
func NewQueue(cap int) *Queue {
return &Queue{
cap: cap,
buf: make([]interface{}, defaultQueueLen),
func (w *waiters) put(sema *sema) {
*w = append(*w, sema)
}
func (w *waiters) remove(sema *sema) {
if len(*w) == 0 {
return
}
// build new slice, copy all except sema
ws := *w
newWs := make(waiters, 0, len(*w))
for i := range ws {
if ws[i] != sema {
newWs = append(newWs, ws[i])
}
}
*w = newWs
}
// Length returns the number of elements currently stored in the queue.
func (q *Queue) Length() int {
q.lock.Lock()
defer q.lock.Unlock()
return q.count
type items []interface{}
func (items *items) get(number int64) []interface{} {
returnItems := make([]interface{}, 0, number)
index := int64(0)
for i := int64(0); i < number; i++ {
if i >= int64(len(*items)) {
break
}
returnItems = append(returnItems, (*items)[i])
(*items)[i] = nil
index++
}
*items = (*items)[index:]
return returnItems
}
// resizes the queue to fit exactly twice its current contents
// this can result in shrinking if the queue is less than half-full
func (q *Queue) resize() {
newSize := q.count << 1
if newSize > q.cap {
newSize = q.cap
func (items *items) peek() (interface{}, bool) {
length := len(*items)
if length == 0 {
return nil, false
}
return (*items)[0], true
}
func (items *items) getUntil(checker func(item interface{}) bool) []interface{} {
length := len(*items)
if len(*items) == 0 {
// returning nil here actually wraps that nil in a list
// of interfaces... thanks go
return []interface{}{}
}
newBuf := make([]interface{}, newSize)
if q.tail > q.head {
copy(newBuf, q.buf[q.head:q.tail])
} else {
n := copy(newBuf, q.buf[q.head:])
copy(newBuf[n:], q.buf[:q.tail])
returnItems := make([]interface{}, 0, length)
index := -1
for i, item := range *items {
if !checker(item) {
break
}
returnItems = append(returnItems, item)
index = i
(*items)[i] = nil // prevent memory leak
}
*items = (*items)[index+1:]
return returnItems
}
type sema struct {
ready chan bool
response *sync.WaitGroup
}
func newSema() *sema {
return &sema{
ready: make(chan bool, 1),
response: &sync.WaitGroup{},
}
}
q.head = 0
q.tail = q.count
q.buf = newBuf
// Queue is the struct responsible for tracking the state
// of the queue.
type Queue struct {
waiters waiters
items items
lock sync.Mutex
disposed bool
}
// Add puts an element on the end of the queue.
func (q *Queue) Add(elem interface{}) error {
// Put will add the specified items to the queue.
func (q *Queue) Put(items ...interface{}) error {
if len(items) == 0 {
return nil
}
q.lock.Lock()
defer q.lock.Unlock()
if q.count >= q.cap {
return ErrFull
if q.disposed {
q.lock.Unlock()
return ErrDisposed
}
if q.count == len(q.buf) {
q.resize()
q.items = append(q.items, items...)
for {
sema := q.waiters.get()
if sema == nil {
break
}
sema.response.Add(1)
select {
case sema.ready <- true:
sema.response.Wait()
default:
// This semaphore timed out.
}
if len(q.items) == 0 {
break
}
}
q.buf[q.tail] = elem
q.tail = (q.tail + 1) & (len(q.buf) - 1)
q.count++
q.lock.Unlock()
return nil
}
// Peek returns the element at the head of the queue. return ErrEmpty
// if the queue is empty.
// Get retrieves items from the queue. If there are some items in the
// queue, get will return a number UP TO the number passed in as a
// parameter. If no items are in the queue, this method will pause
// until items are added to the queue.
func (q *Queue) Get(number int64) ([]interface{}, error) {
return q.Poll(number, 0)
}
// Poll retrieves items from the queue. If there are some items in the queue,
// Poll will return a number UP TO the number passed in as a parameter. If no
// items are in the queue, this method will pause until items are added to the
// queue or the provided timeout is reached. A non-positive timeout will block
// until items are added. If a timeout occurs, ErrTimeout is returned.
func (q *Queue) Poll(number int64, timeout time.Duration) ([]interface{}, error) {
if number < 1 {
// thanks again go
return []interface{}{}, nil
}
q.lock.Lock()
if q.disposed {
q.lock.Unlock()
return nil, ErrDisposed
}
var items []interface{}
if len(q.items) == 0 {
sema := newSema()
q.waiters.put(sema)
q.lock.Unlock()
var timeoutC <-chan time.Time
if timeout > 0 {
timeoutC = time.After(timeout)
}
select {
case <-sema.ready:
// we are now inside the put's lock
if q.disposed {
return nil, ErrDisposed
}
items = q.items.get(number)
sema.response.Done()
return items, nil
case <-timeoutC:
// cleanup the sema that was added to waiters
select {
case sema.ready <- true:
// we called this before Put() could
// Remove sema from waiters.
q.lock.Lock()
q.waiters.remove(sema)
q.lock.Unlock()
default:
// Put() got it already, we need to call Done() so Put() can move on
sema.response.Done()
}
return nil, ErrTimeout
}
}
items = q.items.get(number)
q.lock.Unlock()
return items, nil
}
// Peek returns a the first item in the queue by value
// without modifying the queue.
func (q *Queue) Peek() (interface{}, error) {
q.lock.Lock()
defer q.lock.Unlock()
if q.count <= 0 {
return nil, ErrEmpty
if q.disposed {
return nil, ErrDisposed
}
peekItem, ok := q.items.peek()
if !ok {
return nil, ErrEmptyQueue
}
return peekItem, nil
}
// TakeUntil takes a function and returns a list of items that
// match the checker until the checker returns false. This does not
// wait if there are no items in the queue.
func (q *Queue) TakeUntil(checker func(item interface{}) bool) ([]interface{}, error) {
if checker == nil {
return nil, nil
}
q.lock.Lock()
if q.disposed {
q.lock.Unlock()
return nil, ErrDisposed
}
return q.buf[q.head], nil
result := q.items.getUntil(checker)
q.lock.Unlock()
return result, nil
}
// Empty returns a bool indicating if this bool is empty.
func (q *Queue) Empty() bool {
q.lock.Lock()
defer q.lock.Unlock()
return len(q.items) == 0
}
// Remove removes element from the front of queue. If the
// queue is empty, return ErrEmpty
func (q *Queue) Remove() (interface{}, error) {
// Len returns the number of items in this queue.
func (q *Queue) Len() int64 {
q.lock.Lock()
defer q.lock.Unlock()
if q.count <= 0 {
return nil, ErrEmpty
return int64(len(q.items))
}
// Disposed returns a bool indicating if this queue
// has had disposed called on it.
func (q *Queue) Disposed() bool {
q.lock.Lock()
defer q.lock.Unlock()
return q.disposed
}
// Dispose will dispose of this queue and returns
// the items disposed. Any subsequent calls to Get
// or Put will return an error.
func (q *Queue) Dispose() []interface{} {
q.lock.Lock()
defer q.lock.Unlock()
q.disposed = true
for _, waiter := range q.waiters {
waiter.response.Add(1)
select {
case waiter.ready <- true:
// release Poll immediately
default:
// ignore if it's a timeout or in the get
}
}
ret := q.buf[q.head]
q.buf[q.head] = nil
q.head = (q.head + 1) & (len(q.buf) - 1)
q.count--
// Resize down if buffer 1/4 full.
if len(q.buf) > defaultQueueLen && (q.count<<2) == len(q.buf) {
q.resize()
disposedItems := q.items
q.items = nil
q.waiters = nil
return disposedItems
}
// New is a constructor for a new threadsafe queue.
func New(hint int64) *Queue {
return &Queue{
items: make([]interface{}, 0, hint),
}
}
// ExecuteInParallel will (in parallel) call the provided function
// with each item in the queue until the queue is exhausted. When the queue
// is exhausted execution is complete and all goroutines will be killed.
// This means that the queue will be disposed so cannot be used again.
func ExecuteInParallel(q *Queue, fn func(interface{})) {
if q == nil {
return
}
q.lock.Lock() // so no one touches anything in the middle
// of this process
todo, done := uint64(len(q.items)), int64(-1)
// this is important or we might face an infinite loop
if todo == 0 {
return
}
numCPU := 1
if runtime.NumCPU() > 1 {
numCPU = runtime.NumCPU() - 1
}
var wg sync.WaitGroup
wg.Add(numCPU)
items := q.items
for i := 0; i < numCPU; i++ {
go func() {
for {
index := atomic.AddInt64(&done, 1)
if index >= int64(todo) {
wg.Done()
break
}
fn(items[index])
items[index] = 0
}
}()
}
return ret, nil
wg.Wait()
q.lock.Unlock()
q.Dispose()
}
......@@ -18,119 +18,597 @@ limitations under the License.
package container
import (
"sync"
"sync/atomic"
"testing"
"time"
)
import (
"github.com/stretchr/testify/assert"
)
func TestQueueSimple(t *testing.T) {
q := NewQueue(defaultQueueLen)
func TestPut(t *testing.T) {
q := New(10)
for i := 0; i < defaultQueueLen; i++ {
q.Add(i)
q.Put(`test`)
assert.Equal(t, int64(1), q.Len())
results, err := q.Get(1)
assert.Nil(t, err)
result := results[0]
assert.Equal(t, `test`, result)
assert.True(t, q.Empty())
q.Put(`test2`)
assert.Equal(t, int64(1), q.Len())
results, err = q.Get(1)
assert.Nil(t, err)
result = results[0]
assert.Equal(t, `test2`, result)
assert.True(t, q.Empty())
}
func TestGet(t *testing.T) {
q := New(10)
q.Put(`test`)
result, err := q.Get(2)
if !assert.Nil(t, err) {
return
}
for i := 0; i < defaultQueueLen; i++ {
v, _ := q.Peek()
assert.Equal(t, i, v.(int))
x, _ := q.Remove()
assert.Equal(t, i, x)
assert.Len(t, result, 1)
assert.Equal(t, `test`, result[0])
assert.Equal(t, int64(0), q.Len())
q.Put(`1`)
q.Put(`2`)
result, err = q.Get(1)
if !assert.Nil(t, err) {
return
}
assert.Len(t, result, 1)
assert.Equal(t, `1`, result[0])
assert.Equal(t, int64(1), q.Len())
result, err = q.Get(2)
if !assert.Nil(t, err) {
return
}
assert.Equal(t, `2`, result[0])
}
func TestQueueWrapping(t *testing.T) {
q := NewQueue(defaultQueueLen)
func TestPoll(t *testing.T) {
q := New(10)
// should be able to Poll() before anything is present, without breaking future Puts
q.Poll(1, time.Millisecond)
q.Put(`test`)
result, err := q.Poll(2, 0)
if !assert.Nil(t, err) {
return
}
assert.Len(t, result, 1)
assert.Equal(t, `test`, result[0])
assert.Equal(t, int64(0), q.Len())
q.Put(`1`)
q.Put(`2`)
for i := 0; i < defaultQueueLen; i++ {
q.Add(i)
result, err = q.Poll(1, time.Millisecond)
if !assert.Nil(t, err) {
return
}
for i := 0; i < 3; i++ {
q.Remove()
q.Add(defaultQueueLen + i)
assert.Len(t, result, 1)
assert.Equal(t, `1`, result[0])
assert.Equal(t, int64(1), q.Len())
result, err = q.Poll(2, time.Millisecond)
if !assert.Nil(t, err) {
return
}
for i := 0; i < defaultQueueLen; i++ {
v, _ := q.Peek()
assert.Equal(t, i+3, v.(int))
q.Remove()
assert.Equal(t, `2`, result[0])
before := time.Now()
_, err = q.Poll(1, 5*time.Millisecond)
// This delta is normally 1-3 ms but running tests in CI with -race causes
// this to run much slower. For now, just bump up the threshold.
assert.InDelta(t, 5, time.Since(before).Seconds()*1000, 10)
assert.Equal(t, ErrTimeout, err)
}
func TestPollNoMemoryLeak(t *testing.T) {
q := New(0)
assert.Len(t, q.waiters, 0)
for i := 0; i < 10; i++ {
// Poll() should cleanup waiters after timeout
q.Poll(1, time.Nanosecond)
assert.Len(t, q.waiters, 0)
}
}
func TestQueueFull(t *testing.T) {
q := NewQueue(defaultQueueLen)
for i := 0; i < defaultQueueLen; i++ {
err := q.Add(i)
assert.Nil(t, err)
func TestAddEmptyPut(t *testing.T) {
q := New(10)
q.Put()
if q.Len() != 0 {
t.Errorf(`Expected len: %d, received: %d`, 0, q.Len())
}
err := q.Add(defaultQueueLen)
assert.Equal(t, ErrFull, err)
}
func TestQueueLength(t *testing.T) {
q := NewQueue(1000)
func TestGetNonPositiveNumber(t *testing.T) {
q := New(10)
q.Put(`test`)
result, err := q.Get(0)
if !assert.Nil(t, err) {
return
}
if len(result) != 0 {
t.Errorf(`Expected len: %d, received: %d`, 0, len(result))
}
}
assert.Equal(t, 0, q.Length(), "empty queue length should be 0")
func TestEmpty(t *testing.T) {
q := New(10)
for i := 0; i < 1000; i++ {
q.Add(i)
assert.Equal(t, i+1, q.Length())
if !q.Empty() {
t.Errorf(`Expected empty queue.`)
}
for i := 0; i < 1000; i++ {
q.Remove()
assert.Equal(t, 1000-i-1, q.Length())
q.Put(`test`)
if q.Empty() {
t.Errorf(`Expected non-empty queue.`)
}
}
func TestQueuePeekOutOfRangeErr(t *testing.T) {
q := NewQueue(defaultQueueLen)
func TestGetEmpty(t *testing.T) {
q := New(10)
_, err := q.Peek()
assert.Equal(t, ErrEmpty, err)
go func() {
q.Put(`a`)
}()
q.Add(1)
_, err = q.Remove()
assert.Nil(t, err)
result, err := q.Get(2)
if !assert.Nil(t, err) {
return
}
assert.Len(t, result, 1)
assert.Equal(t, `a`, result[0])
}
func TestMultipleGetEmpty(t *testing.T) {
q := New(10)
var wg sync.WaitGroup
wg.Add(2)
results := make([][]interface{}, 2)
go func() {
wg.Done()
local, err := q.Get(1)
assert.Nil(t, err)
results[0] = local
wg.Done()
}()
go func() {
wg.Done()
local, err := q.Get(1)
assert.Nil(t, err)
results[1] = local
wg.Done()
}()
wg.Wait()
wg.Add(2)
q.Put(`a`, `b`, `c`)
wg.Wait()
if assert.Len(t, results[0], 1) && assert.Len(t, results[1], 1) {
assert.True(t, (results[0][0] == `a` && results[1][0] == `b`) ||
(results[0][0] == `b` && results[1][0] == `a`),
`The array should be a, b or b, a`)
}
}
func TestDispose(t *testing.T) {
// when the queue is empty
q := New(10)
itemsDisposed := q.Dispose()
assert.Empty(t, itemsDisposed)
// when the queue is not empty
q = New(10)
q.Put(`1`)
itemsDisposed = q.Dispose()
expected := []interface{}{`1`}
assert.Equal(t, expected, itemsDisposed)
// when the queue has been disposed
itemsDisposed = q.Dispose()
assert.Nil(t, itemsDisposed)
}
func TestEmptyGetWithDispose(t *testing.T) {
q := New(10)
var wg sync.WaitGroup
wg.Add(1)
var err error
go func() {
wg.Done()
_, err = q.Get(1)
wg.Done()
}()
wg.Wait()
wg.Add(1)
q.Dispose()
wg.Wait()
assert.IsType(t, ErrDisposed, err)
}
func TestDisposeAfterEmptyPoll(t *testing.T) {
q := New(10)
_, err := q.Poll(1, time.Millisecond)
assert.IsType(t, ErrTimeout, err)
// it should not hang
q.Dispose()
_, err = q.Poll(1, time.Millisecond)
assert.IsType(t, ErrDisposed, err)
}
func TestGetPutDisposed(t *testing.T) {
q := New(10)
q.Dispose()
_, err := q.Get(1)
assert.IsType(t, ErrDisposed, err)
err = q.Put(`a`)
assert.IsType(t, ErrDisposed, err)
}
func BenchmarkQueue(b *testing.B) {
q := New(int64(b.N))
var wg sync.WaitGroup
wg.Add(1)
i := 0
go func() {
for {
q.Get(1)
i++
if i == b.N {
wg.Done()
break
}
}
}()
for i := 0; i < b.N; i++ {
q.Put(`a`)
}
_, err = q.Peek()
assert.Equal(t, ErrEmpty, err)
wg.Wait()
}
func TestQueueRemoveOutOfRangeErr(t *testing.T) {
q := NewQueue(defaultQueueLen)
func BenchmarkChannel(b *testing.B) {
ch := make(chan interface{}, 1)
var wg sync.WaitGroup
wg.Add(1)
i := 0
go func() {
for {
<-ch
i++
if i == b.N {
wg.Done()
break
}
}
}()
for i := 0; i < b.N; i++ {
ch <- `a`
}
wg.Wait()
}
_, err := q.Remove()
assert.Equal(t, ErrEmpty, err)
func TestPeek(t *testing.T) {
q := New(10)
q.Put(`a`)
q.Put(`b`)
q.Put(`c`)
peekResult, err := q.Peek()
peekExpected := `a`
assert.Nil(t, err)
assert.Equal(t, q.Len(), int64(3))
assert.Equal(t, peekExpected, peekResult)
q.Add(1)
_, err = q.Remove()
popResult, err := q.Get(1)
assert.Nil(t, err)
assert.Equal(t, peekResult, popResult[0])
assert.Equal(t, q.Len(), int64(2))
}
func TestPeekOnDisposedQueue(t *testing.T) {
q := New(10)
q.Dispose()
result, err := q.Peek()
assert.Nil(t, result)
assert.IsType(t, ErrDisposed, err)
}
func TestTakeUntil(t *testing.T) {
q := New(10)
q.Put(`a`, `b`, `c`)
result, err := q.TakeUntil(func(item interface{}) bool {
return item != `c`
})
if !assert.Nil(t, err) {
return
}
expected := []interface{}{`a`, `b`}
assert.Equal(t, expected, result)
}
func TestTakeUntilEmptyQueue(t *testing.T) {
q := New(10)
result, err := q.TakeUntil(func(item interface{}) bool {
return item != `c`
})
if !assert.Nil(t, err) {
return
}
expected := []interface{}{}
assert.Equal(t, expected, result)
}
func TestTakeUntilThenGet(t *testing.T) {
q := New(10)
q.Put(`a`, `b`, `c`)
takeItems, _ := q.TakeUntil(func(item interface{}) bool {
return item != `c`
})
restItems, _ := q.Get(3)
assert.Equal(t, []interface{}{`a`, `b`}, takeItems)
assert.Equal(t, []interface{}{`c`}, restItems)
}
func TestTakeUntilNoMatches(t *testing.T) {
q := New(10)
q.Put(`a`, `b`, `c`)
takeItems, _ := q.TakeUntil(func(item interface{}) bool {
return item != `a`
})
restItems, _ := q.Get(3)
assert.Equal(t, []interface{}{}, takeItems)
assert.Equal(t, []interface{}{`a`, `b`, `c`}, restItems)
}
func TestTakeUntilOnDisposedQueue(t *testing.T) {
q := New(10)
q.Dispose()
result, err := q.TakeUntil(func(item interface{}) bool {
return true
})
assert.Nil(t, result)
assert.IsType(t, ErrDisposed, err)
}
func TestWaiters(t *testing.T) {
s1, s2, s3, s4 := newSema(), newSema(), newSema(), newSema()
w := waiters{}
assert.Len(t, w, 0)
//
// test put()
w.put(s1)
assert.Equal(t, waiters{s1}, w)
w.put(s2)
w.put(s3)
w.put(s4)
assert.Equal(t, waiters{s1, s2, s3, s4}, w)
//
// test remove()
//
// remove from middle
w.remove(s2)
assert.Equal(t, waiters{s1, s3, s4}, w)
// remove non-existing element
w.remove(s2)
assert.Equal(t, waiters{s1, s3, s4}, w)
// remove from beginning
w.remove(s1)
assert.Equal(t, waiters{s3, s4}, w)
// remove from end
w.remove(s4)
assert.Equal(t, waiters{s3}, w)
// remove last element
w.remove(s3)
assert.Empty(t, w)
_, err = q.Remove()
assert.Equal(t, ErrEmpty, err)
// remove non-existing element
w.remove(s3)
assert.Empty(t, w)
//
// test get()
//
// start with 3 elements in list
w.put(s1)
w.put(s2)
w.put(s3)
assert.Equal(t, waiters{s1, s2, s3}, w)
// get() returns each item in insertion order
assert.Equal(t, s1, w.get())
assert.Equal(t, s2, w.get())
w.put(s4) // interleave a put(), item should go to the end
assert.Equal(t, s3, w.get())
assert.Equal(t, s4, w.get())
assert.Empty(t, w)
assert.Nil(t, w.get())
}
func TestExecuteInParallel(t *testing.T) {
q := New(10)
for i := 0; i < 10; i++ {
q.Put(i)
}
numCalls := uint64(0)
ExecuteInParallel(q, func(item interface{}) {
t.Logf("ExecuteInParallel called us with %+v", item)
atomic.AddUint64(&numCalls, 1)
})
assert.Equal(t, uint64(10), numCalls)
assert.True(t, q.Disposed())
}
func TestExecuteInParallelEmptyQueue(t *testing.T) {
q := New(1)
// basically just ensuring we don't deadlock here
ExecuteInParallel(q, func(interface{}) {
t.Fail()
})
}
func BenchmarkQueuePut(b *testing.B) {
numItems := int64(1000)
qs := make([]*Queue, 0, b.N)
for i := 0; i < b.N; i++ {
q := New(10)
qs = append(qs, q)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
q := qs[i]
for j := int64(0); j < numItems; j++ {
q.Put(j)
}
}
}
// General warning: Go's benchmark utility (go test -bench .) increases the number of
// iterations until the benchmarks take a reasonable amount of time to run; memory usage
// is *NOT* considered. On my machine, these benchmarks hit around ~1GB before they've had
// enough, but if you have less than that available and start swapping, then all bets are off.
func BenchmarkQueueGet(b *testing.B) {
numItems := int64(1000)
qs := make([]*Queue, 0, b.N)
for i := 0; i < b.N; i++ {
q := New(numItems)
for j := int64(0); j < numItems; j++ {
q.Put(j)
}
qs = append(qs, q)
}
b.ResetTimer()
func BenchmarkQueueSerial(b *testing.B) {
q := NewQueue(defaultQueueLen)
for i := 0; i < b.N; i++ {
q.Add(nil)
q := qs[i]
for j := int64(0); j < numItems; j++ {
q.Get(1)
}
}
}
func BenchmarkQueuePoll(b *testing.B) {
numItems := int64(1000)
qs := make([]*Queue, 0, b.N)
for i := 0; i < b.N; i++ {
q.Peek()
q.Remove()
q := New(numItems)
for j := int64(0); j < numItems; j++ {
q.Put(j)
}
qs = append(qs, q)
}
b.ResetTimer()
for _, q := range qs {
for j := int64(0); j < numItems; j++ {
q.Poll(1, time.Millisecond)
}
}
}
func BenchmarkQueueTickTock(b *testing.B) {
q := NewQueue(defaultQueueLen)
func BenchmarkExecuteInParallel(b *testing.B) {
numItems := int64(1000)
qs := make([]*Queue, 0, b.N)
for i := 0; i < b.N; i++ {
q := New(numItems)
for j := int64(0); j < numItems; j++ {
q.Put(j)
}
qs = append(qs, q)
}
var counter int64
fn := func(ifc interface{}) {
c := ifc.(int64)
atomic.AddInt64(&counter, c)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
q.Add(nil)
q.Peek()
q.Remove()
q := qs[i]
ExecuteInParallel(q, fn)
}
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment