Skip to content

Commit

Permalink
Cherry-pick #664 and #678 to tidb-6.1 (#680)
Browse files Browse the repository at this point in the history
* Add a background region cache GC goroutine (#664)

* Add a background region cache GC goroutine

Signed-off-by: Yilin Chen <sticnarf@gmail.com>

* Change interval to 1s and regionPerRound to 50

Signed-off-by: Yilin Chen <sticnarf@gmail.com>

* Rewind after cacheGC reaches the end (#678)

Signed-off-by: Yilin Chen <sticnarf@gmail.com>

---------

Signed-off-by: Yilin Chen <sticnarf@gmail.com>
  • Loading branch information
sticnarf authored Jan 31, 2023
1 parent 35a262e commit ba41798
Show file tree
Hide file tree
Showing 3 changed files with 123 additions and 1 deletion.
63 changes: 63 additions & 0 deletions internal/locate/region_cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -304,6 +304,11 @@ func (r *Region) compareAndSwapStore(oldStore, newStore *regionStore) bool {
return atomic.CompareAndSwapPointer(&r.store, unsafe.Pointer(oldStore), unsafe.Pointer(newStore))
}

func (r *Region) isCacheTTLExpired(ts int64) bool {
lastAccess := atomic.LoadInt64(&r.lastAccess)
return ts-lastAccess > regionCacheTTLSec
}

func (r *Region) checkRegionCacheTTL(ts int64) bool {
// Only consider use percentage on this failpoint, for example, "2%return"
if _, err := util.EvalFailpoint("invalidateRegionCache"); err == nil {
Expand Down Expand Up @@ -397,6 +402,7 @@ func NewRegionCache(pdClient pd.Client) *RegionCache {
c.ctx, c.cancelFunc = context.WithCancel(context.Background())
interval := config.GetGlobalConfig().StoresRefreshInterval
go c.asyncCheckAndResolveLoop(time.Duration(interval) * time.Second)
go c.cacheGC()
c.enableForwarding = config.GetGlobalConfig().EnableForwarding
return c
}
Expand Down Expand Up @@ -1804,6 +1810,63 @@ func (c *RegionCache) UpdateBucketsIfNeeded(regionID RegionVerID, latestBucketsV
}
}

const cleanCacheInterval = time.Second
const cleanRegionNumPerRound = 50

// This function is expected to run in a background goroutine.
// It keeps iterating over the whole region cache, searching for stale region
// info. It runs at cleanCacheInterval and checks only cleanRegionNumPerRound
// regions. In this way, the impact of this background goroutine should be
// negligible.
func (c *RegionCache) cacheGC() {
ticker := time.NewTicker(cleanCacheInterval)
defer ticker.Stop()

beginning := newBtreeSearchItem([]byte(""))
iterItem := beginning
expired := make([]*btreeItem, cleanRegionNumPerRound)
for {
select {
case <-c.ctx.Done():
return
case <-ticker.C:
count := 0
expired = expired[:0]

// Only RLock when checking TTL to avoid blocking other readers
c.mu.RLock()
ts := time.Now().Unix()
c.mu.sorted.AscendGreaterOrEqual(iterItem, func(item_ btree.Item) bool {
item := item_.(*btreeItem)
if count > cleanRegionNumPerRound {
iterItem = item
return false
}
count++
if item.cachedRegion.isCacheTTLExpired(ts) {
expired = append(expired, item)
}
return true
})
c.mu.RUnlock()

// Reach the end of the region cache, start from the beginning
if count <= cleanRegionNumPerRound {
iterItem = beginning
}

if len(expired) > 0 {
c.mu.Lock()
for _, item := range expired {
c.mu.sorted.Delete(item)
c.removeVersionFromCache(item.cachedRegion.VerID(), item.cachedRegion.GetID())
}
c.mu.Unlock()
}
}
}
}

// btreeItem is BTree's Item that uses []byte to compare.
type btreeItem struct {
key []byte
Expand Down
57 changes: 57 additions & 0 deletions internal/locate/region_cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ import (
"fmt"
"math/rand"
"reflect"
"sync/atomic"
"testing"
"time"

Expand Down Expand Up @@ -1605,3 +1606,59 @@ func (s *testRegionCacheSuite) TestRemoveIntersectingRegions() {
s.Equal(loc.Region.GetID(), regions[0])
s.checkCache(1)
}

func (s *testRegionCacheSuite) TestBackgroundCacheGC() {
// Prepare 100 regions
regionCnt := 100
regions := s.cluster.AllocIDs(regionCnt)
regions = append([]uint64{s.region1}, regions...)
peers := [][]uint64{{s.peer1, s.peer2}}
for i := 0; i < regionCnt; i++ {
peers = append(peers, s.cluster.AllocIDs(2))
}
for i := 0; i < regionCnt; i++ {
s.cluster.Split(regions[i], regions[i+1], []byte(fmt.Sprintf(regionSplitKeyFormat, i)), peers[i+1], peers[i+1][0])
}
loadRegionsToCache(s.cache, regionCnt)
s.checkCache(regionCnt)

// Make parts of the regions stale
remaining := 0
s.cache.mu.Lock()
now := time.Now().Unix()
for verID, r := range s.cache.mu.regions {
if verID.id%3 == 0 {
atomic.StoreInt64(&r.lastAccess, now-regionCacheTTLSec-10)
} else {
remaining++
}
}
s.cache.mu.Unlock()

s.Eventually(func() bool {
s.cache.mu.RLock()
defer s.cache.mu.RUnlock()
return len(s.cache.mu.regions) == remaining
}, 3*time.Second, 200*time.Millisecond)
s.checkCache(remaining)

// Make another part of the regions stale
remaining = 0
s.cache.mu.Lock()
now = time.Now().Unix()
for verID, r := range s.cache.mu.regions {
if verID.id%3 == 1 {
atomic.StoreInt64(&r.lastAccess, now-regionCacheTTLSec-10)
} else {
remaining++
}
}
s.cache.mu.Unlock()

s.Eventually(func() bool {
s.cache.mu.RLock()
defer s.cache.mu.RUnlock()
return len(s.cache.mu.regions) == remaining
}, 3*time.Second, 200*time.Millisecond)
s.checkCache(remaining)
}
4 changes: 3 additions & 1 deletion internal/locate/region_request3_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -304,7 +304,9 @@ func (s *testRegionRequestToThreeStoresSuite) TestReplicaSelector() {

cache := NewRegionCache(s.cache.pdClient)
defer cache.Close()
cache.mu.Lock()
cache.insertRegionToCache(region)
cache.mu.Unlock()

// Verify creating the replicaSelector.
replicaSelector, err := newReplicaSelector(cache, regionLoc.Region, req)
Expand Down Expand Up @@ -537,7 +539,7 @@ func (s *testRegionRequestToThreeStoresSuite) TestReplicaSelector() {
s.Nil(err)

// Test accessFollower state filtering epoch-stale stores.
region.lastAccess = time.Now().Unix()
atomic.StoreInt64(&region.lastAccess, time.Now().Unix())
refreshEpochs(regionStore)
// Mark all followers as stale.
tiKVNum := regionStore.accessStoreNum(tiKVOnly)
Expand Down

0 comments on commit ba41798

Please sign in to comment.