LRUCache implementation [Java]

import java.util.HashMap;
import java.util.Map;

public class LRUCache<K.V> {

    private Map<K, Node<K,V>> map = new HashMap<>();
    private Node<K,V> head;
    private Node<K,V> tail;
    private int capacity;
    private int pos;

    public LRUCache(int capacity) {
        this.head = new Node<>(null.null.null.null);
        this.tail = new Node<>(null.null.null.null);
        this.head.next = this.tail;
        this.tail.pre = this.head;
        this.capacity = capacity;
    }

    static class Node<K.V> {
        private K key;
        private V val;
        private Node<K,V> pre;
        private Node<K,V> next;

        public Node(K key, V val, Node<K,V> pre, Node<K,V> next) {
            this.key = key;
            this.val = val;
            this.pre = pre;
            this.next = next; }}public V get(K key){
        if(map.containsKey(key)){
            Node<K,V> valNode = map.get(key);
            moveToHead(valNode);
            return valNode.val;
        }
        return null;
    }

    public V put(K key, V value){
        if(map.containsKey(key)){
            Node<K,V> valNode = map.get(key);
            valNode.val = value;
            moveToHead(valNode);
        } else {
            Node<K,V> newNode = new Node<>(key, value, null.null);
            map.put(key, newNode);
            addToHead(newNode);
            drainOutIfNesesary();
        }
        return null;
    }

    public void drainOutIfNesesary(a){
        if(pos > capacity){ map.remove(tail.pre.key); tail.pre.pre.next = tail; tail.pre = tail.pre.pre; pos--; }}public void addToHead(Node<K,V> newNode){
        newNode.next = head.next;
        head.next.pre = newNode;
        head.next = newNode;
        newNode.pre = head;
        pos++;
    }


    public V remove(K key){
        if(map.containsKey(key)){
            Node<K,V> node = map.get(key);
            node.next.pre = node.pre;
            node.pre.next = node.next;
            map.remove(key);
            pos--;
            return node.val;
        }
        return null;
    }

    private void moveToHead(Node<K,V> valNode){
        valNode.next.pre = valNode.pre;
        valNode.pre.next = valNode.next;
        head.next.pre = valNode;
        valNode.next = head.next;
        head.next = valNode;
        valNode.pre = head;
    }


    public static void main(String[] args) {
        LRUCache<String, Integer> cache = new LRUCache<>(3);
        cache.put("a".1);
        cache.put("b".2);
        cache.put("c".3);
        cache.put("d".4);
        cache.put("e".5);
        print(cache);
        cache.remove("d");
        print(cache);
        cache.put("f".6);
        cache.put("g".7);
        print(cache);
        cache.get("f");
        print(cache);
        cache.put("e".10);
        print(cache);
    }

    static void print(LRUCache cache){
        for(Node curNode = cache.head.next; ! curNode.equals(cache.tail); curNode = curNode.next){ System.out.print(curNode.val+","); } System.out.println(); }}Copy the code

Redis LRU Cache implementation

If we use HashMap and bidirectional linked list, we need extra storage to store the next and prev Pointers, at the cost of a large amount of storage space.

  • Volatile – LRU Keys with expiration times participate in an approximate LRU elimination strategy
  • Allkeys -lru allkeys participate in an approximate lru elimination strategy

Redis will select a fixed number of keys based on the server.maxMemory_samples configuration, compare their LRU access times, and then weed out the most recent and most inaccessible keys. The larger the value of maxmemory_samples is, the closer the approximate LRU algorithm of Redis is to the strict LRU algorithm, but the corresponding consumption is also higher, which has some impact on performance. The default sample value is 5.