Skip to content
JZLeetCode
Go back

LeetCode 146 LintCode 134 LRU Cache

Table of contents

Open Table of contents

Description

Question Links: LeetCode 146, LintCode 134

Design a data structure that follows the constraints of the Least Recently Used (LRU) cache.

Implement the LRUCache class:

The functions get and put must each run in O(1) average time complexity.

Example 1:

Input
["LRUCache", "put", "put", "get", "put", "get", "put", "get", "get", "get"]
[[2], [1, 1], [2, 2], [1], [3, 3], [2], [4, 4], [1], [3], [4]]
Output
[null, null, null, 1, null, -1, null, -1, 3, 4]

Explanation
LRUCache lRUCache = new LRUCache(2);
lRUCache.put(1, 1); // cache is {1=1}
lRUCache.put(2, 2); // cache is {1=1, 2=2}
lRUCache.get(1);    // return 1
lRUCache.put(3, 3); // LRU key was 2, evicts key 2, cache is {1=1, 3=3}
lRUCache.get(2);    // returns -1 (not found)
lRUCache.put(4, 4); // LRU key was 1, evicts key 1, cache is {4=4, 3=3}
lRUCache.get(1);    // return -1 (not found)
lRUCache.get(3);    // return 3
lRUCache.get(4);    // return 4

Constraints:

Solution

Idea

We can use a doubly linked list and a hash map so that both the put and get operations can be O(1) time complexity. We cannot use Java standard library’s LinkedList since remove is O(n) time complexity because we need to locate the node in the linked list.

As far as I know, there no standard library implementation of a doubly or singly linked list node. Java has LinkedList and C++ has std::list and the node data structure is encapsulated. Maybe the data structure itself is very trivial to write so no standard library provides it.

Wait for the Rust solution, it will be fun.

Complexity: Time O(1), Space O(n).

Java

public class LRUCache {
    Map<Integer, Node> cache; // key -> node
    int cnt;
    int capacity;
    Node head, tail;

    public LRUCache(int capacity) {
        this.cnt = 0;
        this.capacity = capacity;
        cache = new HashMap<>();
        head = new Node();
        tail = new Node();
        head.next = tail;
        tail.pre = head;
    }

    private void addToHead(Node node) {
        node.pre = head;
        node.next = head.next;
        head.next.pre = node;
        head.next = node;
    }

    private void removeNode(Node node) {
        node.pre.next = node.next;
        node.next.pre = node.pre;
    }

    private void moveToHead(Node node) {
        removeNode(node);
        addToHead(node);
    }

    private Node popTail() {
        Node res = tail.pre;
        removeNode(res);
        return res;
    }

    public int get(int key) {
        Node node = cache.get(key);
        if (node == null) return -1;
        moveToHead(node);
        return node.v;
    }

    public void put(int key, int value) {
        Node node = cache.get(key);
        if (node == null) {
            Node newNode = new Node(key, value);
            cache.put(key, newNode);
            addToHead(newNode);
            ++cnt;
            if (cnt > capacity) {
                Node tail = popTail();
                cache.remove(tail.k);
                --cnt;
            }
        } else {
            node.v = value;
            moveToHead(node);
        }
    }

    static class Node {
        int k;
        int v;
        Node pre;
        Node next;

        Node(int k, int v) {
            this.k = k;
            this.v = v;
        }

        Node() {
        }
    }
}

Python

def delete(node):
    node.pre.nex = node.nex
    node.nex.pre = node.pre


class LRUCache:
    """118 ms, 77.4 mb"""

    def __init__(self, capacity: int):
        self.head, self.tail = Node(), Node()
        self.head.nex = self.tail
        self.tail.prev = self.head
        self.kn = dict()
        self.cnt = 0
        self.cap = capacity

    def get(self, key: int) -> int:
        if key in self.kn:
            self.move_front(self.kn[key])
            return self.kn[key].v
        else:
            return -1

    def put(self, key: int, value: int) -> None:
        if key in self.kn:
            self.kn[key].v = value
            self.move_front(self.kn[key])
        else:
            node = Node(k=key, v=value)
            self.add_front(node)
            self.cnt += 1
            self.kn[key] = node
            if self.cnt > self.cap:
                tmp = self.pop_tail()
                del self.kn[tmp.k]
                self.cnt -= 1

    def pop_tail(self):
        tmp = self.tail.pre
        delete(tmp)
        return tmp

    def add_front(self, node):
        self.head.nex.pre = node
        node.nex = self.head.nex
        node.pre = self.head
        self.head.nex = node

    def move_front(self, node):
        delete(node)
        self.add_front(node)


class Node:
    def __init__(self, k=0, v=0, pre=None, nex=None):
        self.k = k
        self.v = v
        self.pre = pre
        self.nex = nex

C++

C++‘s std::list is a doubly linked list and exposes O(1) splicing, which lets us pair it with an unordered_map<int, list<...>::iterator> for an idiomatic O(1) get/put.

class LRUCache {
    int cap;
    list<pair<int, int>> ll;                                  // front = MRU, back = LRU
    unordered_map<int, list<pair<int, int>>::iterator> map;   // key -> node
public:
    explicit LRUCache(int capacity) : cap(capacity) {}

    int get(int key) {
        auto it = map.find(key);
        if (it == map.end()) return -1;
        ll.splice(ll.begin(), ll, it->second);                // promote in O(1)
        return it->second->second;
    }

    void put(int key, int value) {
        auto it = map.find(key);
        if (it != map.end()) {
            it->second->second = value;
            ll.splice(ll.begin(), ll, it->second);
            return;
        }
        if ((int)ll.size() == cap) {                           // evict LRU
            map.erase(ll.back().first);
            ll.pop_back();
        }
        ll.emplace_front(key, value);
        map[key] = ll.begin();
    }
};

Rust

In Rust, sharing a node between the linked list (which sets prev/next) and the hash map (which holds another reference) requires reference counting. We use Rc<RefCell<Node>> for shared ownership with interior mutability, and Weak<RefCell<Node>> for the prev link to break the strong-reference cycle that would otherwise leak the list.

type NodePtr = Rc<RefCell<Node>>;
type WeakNodePtr = Weak<RefCell<Node>>;

struct Node {
    key: i32,
    value: i32,
    prev: Option<WeakNodePtr>,  // weak to avoid cycles
    next: Option<NodePtr>,
}

struct LRUCache {
    capacity: usize,
    map: HashMap<i32, NodePtr>,
    head: Option<NodePtr>,      // MRU end
    tail: Option<NodePtr>,      // LRU end
}

impl LRUCache {
    fn new(capacity: i32) -> Self {
        Self { capacity: capacity as usize, map: HashMap::new(), head: None, tail: None }
    }

    fn get(&mut self, key: i32) -> i32 {
        if let Some(node) = self.map.get(&key) {
            let node = Rc::clone(node);
            self.remove(&node);
            self.push_front(&node);
            let v = node.borrow().value;
            v
        } else {
            -1
        }
    }

    fn put(&mut self, key: i32, value: i32) {
        if let Some(node) = self.map.get(&key) {
            let node = Rc::clone(node);
            node.borrow_mut().value = value;
            self.remove(&node);
            self.push_front(&node);
        } else {
            let node = Rc::new(RefCell::new(Node { key, value, prev: None, next: None }));
            self.push_front(&node);
            self.map.insert(key, node);
            if self.map.len() > self.capacity {
                if let Some(evicted) = self.pop_back() {
                    self.map.remove(&evicted.borrow().key);
                }
            }
        }
    }
    // push_front, remove, pop_back follow the standard doubly-linked list pattern;
    // see crates/leet/src/list/lru_cache.rs for the full source.
}

See LeetCode Tree Node and LinkedList in Rust for a deeper explanation of Option<Rc<RefCell<...>>>.

Share this post on:

Previous Post
Fun Bit Tricks (Manipulations)
Next Post
LeetCode 487 LintCode 883 Max Consecutive Ones II