Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

tests cleanup #9

Merged
merged 8 commits into from
Feb 8, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions task.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,13 @@ type task struct {
// Ensure that task implements the Task interface.
var _ Task = &task{}

// String implements fmt.Stringer for task.
func (t *task) String() string {
return t.id
}

// Err returns the error resulting from processing the task. It ensures that
// the task struct implements the Task interface.
func (t *task) Err() error {
return t.err
}
145 changes: 108 additions & 37 deletions workerpool_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,24 +15,108 @@
package workerpool

import (
"errors"
"context"
"fmt"
"runtime"
"sync"
"testing"
"time"
)

func TestWorkerPool(t *testing.T) {
func TestWorkerPoolTasksCapacity(t *testing.T) {
wp := New(runtime.NumCPU())
defer wp.Close()

if c := cap(wp.tasks); c != 0 {
t.Errorf("tasks channel capacity is %d; want 0 (an unbuffered channel)", c)
}
}

func TestWorkerPoolCap(t *testing.T) {
one := New(1)
defer one.Close()
if c := one.Cap(); c != 1 {
t.Errorf("got %d; want %d", c, 1)
}

n := runtime.NumCPU()
ncpu := New(n)
defer ncpu.Close()
if c := ncpu.Cap(); c != n {
t.Errorf("got %d; want %d", c, n)
}

fortyTwo := New(42)
defer fortyTwo.Close()
if c := fortyTwo.Cap(); c != 42 {
t.Errorf("got %d; want %d", c, 42)
}
}

// TestWorkerPoolConcurrentTasksCount ensure that there is at least, but no
// more than n workers running in the pool when more than n tasks are
// submitted.
func TestWorkerPoolConcurrentTasksCount(t *testing.T) {
n := runtime.NumCPU()
wp := New(n)
if c := wp.Cap(); c != n {
t.Fatalf("workers channel capacity: got '%d', want '%d'", c, n)
defer wp.Close()

// working is written to by each task as soon as possible.
working := make(chan struct{})

ctx, cancel := context.WithCancel(context.Background())
// cleanup is a bit tricky. We need to free up all tasks that will attempt
// to write to the working channel.
defer func() {
// call cancel first to ensure that no worker is waiting on the
// context.
cancel()
// all remaining tasks now can block on writing to the working channel,
// so drain them all.
for {
select {
case <-working:
case <-time.After(100 * time.Millisecond):
return
}
}
}()

// NOTE: schedule one more task than we have workers, hence n+1.
for i := 0; i < n+1; i++ {
id := fmt.Sprintf("task #%2d", i)
err := wp.Submit(id, func() error {
working <- struct{}{}
<-ctx.Done()
return nil
})
if err != nil {
t.Fatalf("failed to submit task '%s': %v", id, err)
}
}
if c := cap(wp.tasks); c != 0 {
t.Fatalf("tasks channel capacity: got '%d', want an unbuffered channel", c)

// ensure that n workers are busy.
for i := 0; i < n; i++ {
select {
case <-working:
case <-time.After(100 * time.Millisecond):
t.Fatalf("got %d tasks running; want %d", i, n)
}
}

// ensure that one task is not scheduled, as all workers should now be
// waiting on the context.
select {
case <-working:
t.Fatalf("got %d tasks running; want %d", n+1, n)
case <-time.After(100 * time.Millisecond):
}
}

func TestWorkerPool(t *testing.T) {
n := runtime.NumCPU()
wp := New(n)

numTasks := n + 2
done := make(chan struct{})
// working is used to ensure that n routines are dispatched at a given time
Expand Down Expand Up @@ -116,18 +200,6 @@ func TestWorkerPool(t *testing.T) {
if err := wp.Close(); err != nil {
t.Errorf("close: got '%v', want no error", err)
}

if err := wp.Submit("", nil); err != ErrClosed {
t.Errorf("submit: got '%v', want '%v'", err, ErrClosed)
}

results, err := wp.Drain()
if err != ErrClosed {
t.Errorf("drain: got '%v', want '%v'", err, ErrClosed)
}
if results != nil {
t.Errorf("drain: got '%v', want '%v'", results, nil)
}
}

func TestConcurrentDrain(t *testing.T) {
Expand Down Expand Up @@ -209,24 +281,23 @@ func TestConcurrentDrain(t *testing.T) {
}
}

func TestWorkerPoolCap(t *testing.T) {
one := New(1)
defer one.Close()
if c := one.Cap(); c != 1 {
t.Errorf("got %d; want %d", c, 1)
func TestWorkerPoolDrainAfterClose(t *testing.T) {
wp := New(runtime.NumCPU())
wp.Close()
tasks, err := wp.Drain()
if err != ErrClosed {
t.Errorf("got %v; want %v", err, ErrClosed)
}

n := runtime.NumCPU()
ncpu := New(n)
defer ncpu.Close()
if c := ncpu.Cap(); c != n {
t.Errorf("got %d; want %d", c, n)
if tasks != nil {
t.Errorf("got %v as tasks; want %v", tasks, nil)
}
}

fortyTwo := New(42)
defer fortyTwo.Close()
if c := fortyTwo.Cap(); c != 42 {
t.Errorf("got %d; want %d", c, 42)
func TestWorkerPoolSubmitAfterClose(t *testing.T) {
wp := New(runtime.NumCPU())
wp.Close()
if err := wp.Submit("dummy", nil); err != ErrClosed {
t.Fatalf("got %v; want %v", err, ErrClosed)
}
}

Expand All @@ -239,10 +310,10 @@ func TestWorkerPoolManyClose(t *testing.T) {
}

// calling Close() more than once should always return an error.
if err := wp.Close(); !errors.Is(err, ErrClosed) {
t.Fatalf("got %s; want %s", err, ErrClosed)
if err := wp.Close(); err != ErrClosed {
t.Fatalf("got %v; want %v", err, ErrClosed)
}
if err := wp.Close(); !errors.Is(err, ErrClosed) {
t.Fatalf("got %s; want %s", err, ErrClosed)
if err := wp.Close(); err != ErrClosed {
t.Fatalf("got %v; want %v", err, ErrClosed)
}
}