0. Write first

Source reference lot redis/evict. J c, commitid – d1a005ab3963c16b65e805675a76f0e40c723158

In the process, you need to remind yourself that reading the source code is to learn the implementation details, but you can’t fall into the details. The analysis order is in accordance with the execution order, so as to avoid sticking large chunks of source code.

1. Implementation of Redis memory elimination strategy

Memory obsolescence strategy when memory usage exceeds configuration, very good memory: do not handle /LRU algorithm /LFU algorithm/random/expiration time

  • Noeviction: Will not expel any key

  • Allkeys-lru: deletes allkeys using the lru algorithm

  • Volatile – lRU: Deletes all keys with expiration time using the LRU algorithm

  • Allkeys-random: deletes allkeys randomly

  • Volatile -random: Randomly deletes all keys whose expiration time is set

  • Volatile – TTL: deletes keys that are about to expire

  • Allkeys-lfu: deletes allkeys using the lfu algorithm

  • Volatile – lFU: Deletes all keys whose expiration time is set using the LFU algorithm

1.1 freeMemoryIfNeededAndSafe: begin to free memory

The freeMemoryIfNeeded() function is executed when no Lua script is in a timeout state and not in the data loading state

int freeMemoryIfNeededAndSafe(void) {
    if (server.lua_timedout || server.loading) return C_OK;
    return freeMemoryIfNeeded();
}
Copy the code

1.2 freeMemoryIfNeeded: Clear memory according to the elimination policy

By default, slave nodes should ignore the maxMemory instruction and just do what slave nodes should do

if (server.masterhost && server.repl_slave_ignore_maxmemory) return C_OK;
Copy the code

If the elimination strategy is noeviction, Redis will just say, “I’m doing my best.”

if (server.maxmemory_policy == MAXMEMORY_NO_EVICTION)
        goto cant_free; /* We need to free memory, but policy forbids. */
Copy the code

Cant_free has the following two cases, and all it can do is check if the Lazyfree thread (presumably added by Redis V4) has any more tasks, and wait.

  • The first is the elimination strategynoeviction
  • The second is under the current elimination strategy, “nothing to free”
cant_free:
    while(bioPendingJobsOfType(BIO_LAZY_FREE)) {
        if (((mem_reported - zmalloc_used_memory()) + mem_freed) >= mem_tofree)
            break;
        usleep(1000);
    }
    return C_ERR;
}
Copy the code

Use getMaxmemoryState (unimportant, omitted) to get how much space mem_tofrTEE to free, and select a strategy to begin the cleanup

size_t mem_reported, mem_tofree, mem_freed;
if (getMaxmemoryState(&mem_reported,NULL,&mem_tofree,NULL) == C_OK)
    return C_OK;
mem_freed = 0

while (mem_freed < mem_tofree) {
    // The matched key
    sds bestkey = NULL;
    // The db where bestkey resides
    int bestdbid;
    // LRU algorithm /LFU algorithm /TTL expiration time
    if(server.maxmemory_policy & (MAXMEMORY_FLAG_LRU|MAXMEMORY_FLAG_LFU) || server.maxmemory_policy == MAXMEMORY_VOLATILE_TTL) {}// Random elimination
    else if (server.maxmemory_policy == MAXMEMORY_ALLKEYS_RANDOM || server.maxmemory_policy == MAXMEMORY_VOLATILE_RANDOM)) {

    }
    // Delete the selected key
    if (bestkey) {
        // Get current memory
        delta = (long long) zmalloc_used_memory();
        // Whether the deletion is non-blocking
        if (server.lazyfree_lazy_eviction)
            dbAsyncDelete(db,keyobj);
        else
            dbSyncDelete(db,keyobj);
        // Calculate how much memory is freed
        delta -= (long long) zmalloc_used_memory();
        mem_freed += delta;
    } else {
        goto cant_free; /* nothing to free... * /}}Copy the code

1.3 Allkeys-random and volatile-random branches

When selecting random cull, every DB of the current redis instance will be traversed. If all keys are randomly deleted, select db->dict; otherwise, only the set with expiration time is selected: Db ->expires. If the dictSize(dict) of each DB is 0, the second case of cant_free mentioned above will be entered

for (i = 0; i < server.dbnum; i++) {
    j = (++next_db) % server.dbnum;
    db = server.db+j;
    dict = (server.maxmemory_policy == MAXMEMORY_ALLKEYS_RANDOM) ? db->dict : db->expires;
    if(dictSize(dict) ! =0) {
        de = dictGetRandomKey(dict);
        bestkey = dictGetKey(de);
        bestdbid = j;
        break; }}Copy the code

1.4 lfu/lru/TTL branch

The core field of the elimination strategy lies in the idle property

#define EVPOOL_SIZE 16
#define EVPOOL_CACHED_SDS_SIZE 255
// Sample set type
struct evictionPoolEntry {
    unsigned long long idle;    /* Object idle time (inverse frequency for LFU) */
    sds key;                    /* Key name. */
    sds cached;                 /* Cached SDS object for key name. */
    int dbid;                   /* Key DB number. */
};
static struct evictionPoolEntry *EvictionPoolLRU;
Copy the code

Initialize the sample set, which has nothing to do with LRU/LFU/TTL. The core is the evictionPoolPopulate function

// Initialize the sample set
struct evictionPoolEntry *pool = EvictionPoolLRU;

while (bestkey == NULL) {
    unsigned long total_keys = 0, keys;
    // Update/maintain the pool sample set through each DB
    for (i = 0; i < server.dbnum; i++) {
        db = server.db+i;
        dict = (server.maxmemory_policy & MAXMEMORY_FLAG_ALLKEYS) ? db->dict : db->expires;
        if((keys = dictSize(dict)) ! =0) {
            // Four parameters can be interpreted as: dbid, candidate set (db->dict/db-> Expires), complete set (DB ->dict), sample setevictionPoolPopulate(i, dict, db->dict, pool); total_keys += keys; }}// There is no key to eliminate
    if(! total_keys)break;
    // Traverse backwards and reset the sample set
    for (k = EVPOOL_SIZE- 1; k >= 0; k--) {
        if (pool[k].key == NULL) continue;
        bestdbid = pool[k].dbid;
       
        if (server.maxmemory_policy & MAXMEMORY_FLAG_ALLKEYS) {
            de = dictFind(server.db[pool[k].dbid].dict,pool[k].key);
        } else {
            de = dictFind(server.db[pool[k].dbid].expires,pool[k].key);
        }
        pool[k].key = NULL;
        pool[k].idle = 0;

        // Find bestkey, break
        if (de) {
            bestkey = dictGetKey(de);
            break; }}}Copy the code

1.5 evictionPoolPopulate: LRU/LFU algorithm

First, according to the Maxmemory_samples configuration, select a certain number of samples, which defaults to 5. The higher the value, the closer it is to the real LRU/LFU algorithm. The lower the value, the higher the performance, so balance is required

count = dictGetSomeKeys(sampledict,samples,server.maxmemory_samples);
for (j = 0; j < count; j++) {
    // See the next code block
}
Copy the code

The idle value of each sample is calculated according to the policy. The higher the value is, the higher the matching degree is, and the priority is deleted

  • volatile-ttlStrategy to calculateidleValue mode: put the soon expired ones behind
  • lruPolicy calculation: more time not accessed first
  • lfuStrategy calculation mode: the lower access frequency is placed later, the frequency is consistent, and the comparison of who has not been accessed for longer
if (server.maxmemory_policy & MAXMEMORY_FLAG_LRU) {
    idle = estimateObjectIdleTime(o);
} else if (server.maxmemory_policy & MAXMEMORY_FLAG_LFU) {
    idle = 255-LFUDecrAndReturn(o);
} else if (server.maxmemory_policy == MAXMEMORY_VOLATILE_TTL) {
    idle = ULLONG_MAX - (long)dictGetVal(de);
} else {
    serverPanic("Unknown eviction policy in evictionPoolPopulate()");
}
// Then maintain the sample set
Copy the code

1.6 Implementation of Redis LRU

Redis maintains a 24bit global clock, which can be simply interpreted as the current system timestamp. This clock is updated at regular intervals

/ / server. H 595 lines
#define LRU_BITS 24  // 24bit clock
#define LRU_CLOCK_MAX ((1<
       
         Maximum value of lRU clock
       )-1)>
#define LRU_CLOCK_RESOLUTION 1000 // Clock precision, milliseconds

/ / server. H 1017 lines
stuct redisServer {
    // Clock for LRU eviction
    _Atomic unsigned int lruclock;
}
Copy the code

Each key object also maintains a 24bit clock, which is referred to as O -> LRU

/ / server. H 600 lines
typedef struct redisObject {
    // LRU time (relative to global lru_clock)
    unsigned lru:LRU_BITS
}
Copy the code

Set the global LRUClock. When the value of lRUClock exceeds LRU_CLOCK_MAX, it will be calculated from the beginning

/ / SRC/evict. 70 lines of c
unsigned int getLRUClock(void) {
    return (mstime()/LRU_CLOCK_RESOLUTION) & LRU_CLOCK_MAX;
}
Copy the code

Set o->lru to assign the system clock to the internal object clock when a new key object is added

/ / SRC/evict. 78 lines of c
unsigned int LRU_CLOCK(void) {
    unsigned int lruclock;
    if (1000/server.hz <= LRU_CLOCK_RESOLUTION) {
        lruclock = server.lruclock;
    } else {
        lruclock = getLRUClock();
    }
    return lruclock;
}
Copy the code

For example, to do LRU now, you need to compare LRUClock and O -> LRU, which have two cases

  • lruclock >= o->lru: This is the usual case
  • lruclock < o->lru: When the value of lRUClock exceeds the value of LRU_CLOCK_MAX, the calculation is done from the beginning, so this happens and extra time is needed to calculate
/ / SRC/evict. 88 lines of c
unsigned long long estimateObjectIdleTime(robj *o) {
    unsigned long long lruclock = LRU_CLOCK();
    if (lruclock >= o->lru) {
        return (lruclock - o->lru) * LRU_CLOCK_RESOLUTION;
    } else {
        return(lruclock + (LRU_CLOCK_MAX - o->lru)) * LRU_CLOCK_RESOLUTION; }}Copy the code

Redis LFU implementation can be described as obscure, boring, better to do leetcode

2. LRU Golang implementation

package problem0146

// Two-way list structure
type LinkNode struct {
	key, value int
	pre, next  *LinkNode
}

/ / the LRU structure
type LRUCache struct {
	m          map[int]*LinkNode
	cap        int
	head, tail *LinkNode
}

func Constructor(capacity int) LRUCache {
	head := &LinkNode{0.0.nil.nil}
	tail := &LinkNode{0.0.nil.nil}
	head.next = tail
	tail.pre = head
	return LRUCache{make(map[int]*LinkNode, capacity), capacity, head, tail}
}

func (this *LRUCache) moveToHead(node *LinkNode) {
	this.removeNode(node)
	this.addNode(node)
}

func (this *LRUCache) removeNode(node *LinkNode) {
	node.pre.next = node.next
	node.next.pre = node.pre
}

func (this *LRUCache) addNode(node *LinkNode) {
	head := this.head
	node.next = head.next
	node.next.pre = node
	node.pre = head
	head.next = node
}

// If so, move the element to the first position and return the value
// If not, return -1
func (this *LRUCache) Get(key int) int {
	if v, exist := this.m[key]; exist {
		this.moveToHead(v)
		return v.value
	} else {
		return - 1}}// If the element exists, move it to the front and update the value
// If the element does not exist, insert it at the beginning of the element and place it in the map.
func (this *LRUCache) Put(key int, value int) {
	tail := this.tail
	cache := this.m
	if v, exist := cache[key]; exist {
		v.value = value
		this.moveToHead(v)
	} else {
		v := &LinkNode{key, value, nil.nil}
		if len(this.m) == this.cap {
			delete(cache, tail.pre.key)
			this.removeNode(tail.pre)
		}
		this.addNode(v)
		cache[key] = v
	}
}
Copy the code

3. LFU Golang implementation

package problem0460

// LRU: Least Recently Used, delete the most Recently unused data in the cache, and add new elements
// LFU: The most Frequently Used element is removed from the cache, and then a new element is added. If the cache is the same Frequently, the most Frequently Used element is removed

// Node: contains key, value, frequent access times, pre precursor pointer, and next subsequent pointer
type Node struct {
	key, value, frequent int
	pre, next            *Node
}

// Two-way list: includes head pointer, tail pointer, size pointer
type ListNode struct {
	head, tail *Node
	size       int
}

// Add a node to the first node
func (this *ListNode) addNode(node *Node) {
	head := this.head
	node.next = head.next
	node.next.pre = node
	node.pre = head
	head.next = node
}

// Delete a node
func (this *ListNode) removeNode(node *Node) {
	node.pre.next = node.next
	node.next.pre = node.pre
}

// LFUCache structure: Capacity capacity, size current capacity, minFrequent minimum current access frequency, cacheMap cache hash table, frequentMap hash table
// minFrequent Minimum current access frequency:
// 1. Insert a new node, which must have not been visited before, minFrequent = 1
// 2. MinFrequent++ if the number of bidirectional linked list nodes is 0 and it happens to be the minimum access list when put and get are removed
type LFUCache struct {
	capacity, size, minFrequent int
	cacheMap                    map[int]*Node
	frequentMap                 map[int]*ListNode
}

func Constructor(capacity int) LFUCache {
	return LFUCache{
		capacity:    capacity,
		size:        0,
		minFrequent: 0,
		cacheMap:    make(map[int]*Node),
		frequentMap: make(map[int]*ListNode),
	}
}

// LFUCache helper function: removes the node from the corresponding frequency bidirectional list
func (this *LFUCache) remove(node *Node) {
	this.frequentMap[node.frequent].removeNode(node)
	this.frequentMap[node.frequent].size--
}

// LFUCache helper function: adds the node to the corresponding frequency bidirectional list, or creates one if it does not
func (this *LFUCache) add(node *Node) {
	if listNode, exist := this.frequentMap[node.frequent]; exist {
		listNode.addNode(node)
		listNode.size++
	} else {
		listNode = &ListNode{&Node{}, &Node{}, 0}
		listNode.head.next = listNode.tail
		listNode.tail.pre = listNode.head
		listNode.addNode(node)
		listNode.size++
		this.frequentMap[node.frequent] = listNode
	}
}

// LFUCache helper function: removes a key
func (this *LFUCache) evictNode(a) {
	listNode := this.frequentMap[this.minFrequent]
	delete(this.cacheMap, listNode.tail.pre.key)
	listNode.removeNode(listNode.tail.pre)
	listNode.size--
}

// LFUCache auxiliary function: Obtain a key and modify a key to increase the access frequency of the corresponding key, can be an independent method to accomplish the following tasks:
// 1. Remove the node from the frequency list
// 2. Maintain minFrequent
// 3. Access frequency of this node ++, move to the next access frequency linked list
func (this *LFUCache) triggerVisit(node *Node) {
	this.remove(node)
	if node.frequent == this.minFrequent && this.frequentMap[node.frequent].size == 0 {
		this.minFrequent++
	}
	node.frequent++
	this.add(node)
}

func (this *LFUCache) Get(key int) int {
	if node, exist := this.cacheMap[key]; exist {
		this.triggerVisit(node)
		return node.value
	}
	return - 1
}

func (this *LFUCache) Put(key int, value int) {
	if this.capacity == 0 {
		return
	}
	if node, exist := this.cacheMap[key]; exist {
		this.triggerVisit(node)
		this.cacheMap[key].value = value
	} else {
		newNode := &Node{key, value, 1.nil.nil}
		if this.size < this.capacity {
			this.add(newNode)
			this.size++
			this.minFrequent = 1
		} else {
			this.evictNode()
			this.add(newNode)
			this.minFrequent = 1
		}
		this.cacheMap[key] = newNode
	}
}
Copy the code