LRU缓存设计

时间:2010-03-23 22:48:54

标签: c++ algorithm data-structures lru

最近最少使用(LRU)缓存首先丢弃最近最少使用的项目 你如何设计和实现这样的缓存类?设计要求如下:

1)尽快找到项目

2)一旦缓存未命中且缓存已满,我们需要尽快替换最近最少使用的项目。

如何在设计模式和算法设计方面分析和实现这个问题?

12 个答案:

答案 0 :(得分:93)

指向链表节点的链接列表+哈希表是实现LRU缓存的常用方法。这给出了O(1)操作(假设一个不错的哈希值)。这样做的好处(是O(1)):你可以通过锁定整个结构来做多线程版本。您不必担心粒度锁定等。

简而言之,它的运作方式:

在访问值时,将链接列表中的相应节点移动到头部。

当你需要从缓存中删除一个值时,你会从尾端删除。

向缓存添加值时,只需将其放在链表的头部即可。

感谢doublep,这里是带有C ++实现的站点:Miscellaneous Container Templates

答案 1 :(得分:23)

这是我对LRU缓存的简单示例c ++实现,结合了hash(unordered_map)和list。列表中的项目具有访问映射的键,而映射上的项目具有列表访问列表的迭代器。

#include <list>
#include <unordered_map>
#include <assert.h>

using namespace std;

template <class KEY_T, class VAL_T> class LRUCache{
private:
        list< pair<KEY_T,VAL_T> > item_list;
        unordered_map<KEY_T, decltype(item_list.begin()) > item_map;
        size_t cache_size;
private:
        void clean(void){
                while(item_map.size()>cache_size){
                        auto last_it = item_list.end(); last_it --;
                        item_map.erase(last_it->first);
                        item_list.pop_back();
                }
        };
public:
        LRUCache(int cache_size_):cache_size(cache_size_){
                ;
        };

        void put(const KEY_T &key, const VAL_T &val){
                auto it = item_map.find(key);
                if(it != item_map.end()){
                        item_list.erase(it->second);
                        item_map.erase(it);
                }
                item_list.push_front(make_pair(key,val));
                item_map.insert(make_pair(key, item_list.begin()));
                clean();
        };
        bool exist(const KEY_T &key){
                return (item_map.count(key)>0);
        };
        VAL_T get(const KEY_T &key){
                assert(exist(key));
                auto it = item_map.find(key);
                item_list.splice(item_list.begin(), item_list, it->second);
                return it->second->second;
        };

};

答案 2 :(得分:3)

这是我对基本的简单LRU缓存的实现。

//LRU Cache
#include <cassert>
#include <list>

template <typename K,
          typename V
          >
class LRUCache
    {
    // Key access history, most recent at back
    typedef std::list<K> List;

    // Key to value and key history iterator
    typedef unordered_map< K,
                           std::pair<
                                     V,
                                     typename std::list<K>::iterator
                                    >
                         > Cache;

    typedef V (*Fn)(const K&);

public:
    LRUCache( size_t aCapacity, Fn aFn ) 
        : mFn( aFn )
        , mCapacity( aCapacity )
        {}

    //get value for key aKey
    V operator()( const K& aKey )
        {
        typename Cache::iterator it = mCache.find( aKey );
        if( it == mCache.end() ) //cache-miss: did not find the key
            {
            V v = mFn( aKey );
            insert( aKey, v );
            return v;
            }

        // cache-hit
        // Update access record by moving accessed key to back of the list
        mList.splice( mList.end(), mList, (it)->second.second );

        // return the retrieved value
        return (it)->second.first;
        }

private:
        // insert a new key-value pair in the cache
    void insert( const K& aKey, V aValue )
        {
        //method should be called only when cache-miss happens
        assert( mCache.find( aKey ) == mCache.end() );

        // make space if necessary
        if( mList.size() == mCapacity )
            {
            evict();
            }

        // record k as most-recently-used key
        typename std::list<K>::iterator it = mList.insert( mList.end(), aKey );

        // create key-value entry, linked to the usage record
        mCache.insert( std::make_pair( aKey, std::make_pair( aValue, it ) ) );
        }

        //Purge the least-recently used element in the cache
    void evict()
        {
        assert( !mList.empty() );

        // identify least-recently-used key
        const typename Cache::iterator it = mCache.find( mList.front() );

        //erase both elements to completely purge record
        mCache.erase( it );
        mList.pop_front();
        }

private:
    List mList;
    Cache mCache;
    Fn mFn;
    size_t mCapacity;
    };

答案 3 :(得分:2)

我在这里看到了一些不必要的复杂实现,因此我决定也提供我的实现。缓存只有两种方法,get和set。希望它具有更好的可读性和可读性:

Column

答案 4 :(得分:1)

我有一个LRU实现here。接口遵循std :: map,因此它应该不难使用。此外,您可以提供自定义备份处理程序,如果数据在缓存中无效,则使用该处理程序。

sweet::Cache<std::string,std::vector<int>, 48> c1;
c1.insert("key1", std::vector<int>());
c1.insert("key2", std::vector<int>());
assert(c1.contains("key1"));

答案 5 :(得分:1)

两年前我实现了一个线程安全的LRU缓存。

LRU通常使用HashMap和LinkedList实现。您可以谷歌实施细节。关于它有很多资源(维基百科也有很好的解释)。

为了保证线程安全,只要修改LRU的状态,就需要锁定。

我会在这里粘贴我的C ++代码供您参考。

这是实施。

/***
    A template thread-safe LRU container.

    Typically LRU cache is implemented using a doubly linked list and a hash map.
    Doubly Linked List is used to store list of pages with most recently used page
    at the start of the list. So, as more pages are added to the list,
    least recently used pages are moved to the end of the list with page
    at tail being the least recently used page in the list.

    Additionally, this LRU provides time-to-live feature. Each entry has an expiration
    datetime.
***/
#ifndef LRU_CACHE_H
#define LRU_CACHE_H

#include <iostream>
#include <list>

#include <boost/unordered_map.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/make_shared.hpp>
#include <boost/date_time/posix_time/posix_time.hpp>
#include <boost/thread/mutex.hpp>

template <typename KeyType, typename ValueType>
  class LRUCache {
 private:
  typedef boost::posix_time::ptime DateTime;

  // Cache-entry
  struct ListItem {
  ListItem(const KeyType &key,
           const ValueType &value,
           const DateTime &expiration_datetime)
  : m_key(key), m_value(value), m_expiration_datetime(expiration_datetime){}
    KeyType m_key;
    ValueType m_value;
    DateTime m_expiration_datetime;
  };

  typedef boost::shared_ptr<ListItem> ListItemPtr;
  typedef std::list<ListItemPtr> LruList;
  typedef typename std::list<ListItemPtr>::iterator LruListPos;
  typedef boost::unordered_map<KeyType, LruListPos> LruMapper;

  // A mutext to ensuare thread-safety.
  boost::mutex m_cache_mutex;

  // Maximum number of entries.
  std::size_t m_capacity;

  // Stores cache-entries from latest to oldest.
  LruList m_list;

  // Mapper for key to list-position.
  LruMapper m_mapper;

  // Default time-to-live being add to entry every time we touch it.
  unsigned long m_ttl_in_seconds;

  /***
      Note : This is a helper function whose function call need to be wrapped
      within a lock. It returns true/false whether key exists and
      not expires. Delete the expired entry if necessary.
  ***/
  bool containsKeyHelper(const KeyType &key) {
    bool has_key(m_mapper.count(key) != 0);
    if (has_key) {
      LruListPos pos = m_mapper[key];
      ListItemPtr & cur_item_ptr = *pos;

      // Remove the entry if key expires
      if (isDateTimeExpired(cur_item_ptr->m_expiration_datetime)) {
        has_key = false;
        m_list.erase(pos);
        m_mapper.erase(key);
      }
    }
    return has_key;
  }

  /***
      Locate an item in list by key, and move it at the front of the list,
      which means make it the latest item.
      Note : This is a helper function whose function call need to be wrapped
      within a lock.
  ***/
  void makeEntryTheLatest(const KeyType &key) {
    if (m_mapper.count(key)) {
      // Add original item at the front of the list,
      // and update <Key, ListPosition> mapper.
      LruListPos original_list_position = m_mapper[key];
      const ListItemPtr & cur_item_ptr = *original_list_position;
      m_list.push_front(cur_item_ptr);
      m_mapper[key] = m_list.begin();

      // Don't forget to update its expiration datetime.
      m_list.front()->m_expiration_datetime = getExpirationDatetime(m_list.front()->m_expiration_datetime);

      // Erase the item at original position.
      m_list.erase(original_list_position);
    }
  }

 public:

  /***
      Cache should have capacity to limit its memory usage.
      We also add time-to-live for each cache entry to expire
      the stale information. By default, ttl is one hour.
  ***/
 LRUCache(std::size_t capacity, unsigned long ttl_in_seconds = 3600)
   : m_capacity(capacity), m_ttl_in_seconds(ttl_in_seconds) {}

  /***
      Return now + time-to-live
  ***/
  DateTime getExpirationDatetime(const DateTime &now) {
    static const boost::posix_time::seconds ttl(m_ttl_in_seconds);
    return now + ttl;
  }

  /***
      If input datetime is older than current datetime,
      then it is expired.
  ***/
  bool isDateTimeExpired(const DateTime &date_time) {
    return date_time < boost::posix_time::second_clock::local_time();
  }

  /***
      Return the number of entries in this cache.
   ***/
  std::size_t size() {
    boost::mutex::scoped_lock lock(m_cache_mutex);
    return m_mapper.size();
  }

  /***
      Get value by key.
      Return true/false whether key exists.
      If key exists, input paramter value will get updated.
  ***/
  bool get(const KeyType &key, ValueType &value) {
    boost::mutex::scoped_lock lock(m_cache_mutex);
    if (!containsKeyHelper(key)) {
      return false;
    } else {
      // Make the entry the latest and update its TTL.
      makeEntryTheLatest(key);

      // Then get its value.
      value = m_list.front()->m_value;
      return true;
    }
  }

  /***
      Add <key, value> pair if no such key exists.
      Otherwise, just update the value of old key.
  ***/
  void put(const KeyType &key, const ValueType &value) {
    boost::mutex::scoped_lock lock(m_cache_mutex);
    if (containsKeyHelper(key)) {
      // Make the entry the latest and update its TTL.
      makeEntryTheLatest(key);

      // Now we only need to update its value.
      m_list.front()->m_value = value;
    } else { // Key exists and is not expired.
      if (m_list.size() == m_capacity) {
        KeyType delete_key = m_list.back()->m_key;
        m_list.pop_back();
        m_mapper.erase(delete_key);
      }

      DateTime now = boost::posix_time::second_clock::local_time();
      m_list.push_front(boost::make_shared<ListItem>(key, value,
                                                     getExpirationDatetime(now)));
      m_mapper[key] = m_list.begin();
    }
  }
};
#endif

这是单元测试。

#include "cxx_unit.h"
#include "lru_cache.h"

struct LruCacheTest
  : public FDS::CxxUnit::TestFixture<LruCacheTest>{
  CXXUNIT_TEST_SUITE();
  CXXUNIT_TEST(LruCacheTest, testContainsKey);
  CXXUNIT_TEST(LruCacheTest, testGet);
  CXXUNIT_TEST(LruCacheTest, testPut);
  CXXUNIT_TEST_SUITE_END();

  void testContainsKey();
  void testGet();
  void testPut();
};


void LruCacheTest::testContainsKey() {
  LRUCache<int,std::string> cache(3);
  cache.put(1,"1"); // 1
  cache.put(2,"2"); // 2,1
  cache.put(3,"3"); // 3,2,1
  cache.put(4,"4"); // 4,3,2

  std::string value_holder("");
  CXXUNIT_ASSERT(cache.get(1, value_holder) == false); // 4,3,2
  CXXUNIT_ASSERT(value_holder == "");

  CXXUNIT_ASSERT(cache.get(2, value_holder) == true); // 2,4,3
  CXXUNIT_ASSERT(value_holder == "2");

  cache.put(5,"5"); // 5, 2, 4

  CXXUNIT_ASSERT(cache.get(3, value_holder) == false); // 5, 2, 4
  CXXUNIT_ASSERT(value_holder == "2"); // value_holder is still "2"

  CXXUNIT_ASSERT(cache.get(4, value_holder) == true); // 4, 5, 2
  CXXUNIT_ASSERT(value_holder == "4");

  cache.put(2,"II"); // {2, "II"}, 4, 5

  CXXUNIT_ASSERT(cache.get(2, value_holder) == true); // 2, 4, 5
  CXXUNIT_ASSERT(value_holder == "II");

  // Cache-entries : {2, "II"}, {4, "4"}, {5, "5"}
  CXXUNIT_ASSERT(cache.size() == 3);
  CXXUNIT_ASSERT(cache.get(2, value_holder) == true);
  CXXUNIT_ASSERT(cache.get(4, value_holder) == true);
  CXXUNIT_ASSERT(cache.get(5, value_holder) == true);
}

void LruCacheTest::testGet() {
  LRUCache<int,std::string> cache(3);
  cache.put(1,"1"); // 1
  cache.put(2,"2"); // 2,1
  cache.put(3,"3"); // 3,2,1
  cache.put(4,"4"); // 4,3,2

  std::string value_holder("");
  CXXUNIT_ASSERT(cache.get(1, value_holder) == false); // 4,3,2
  CXXUNIT_ASSERT(value_holder == "");

  CXXUNIT_ASSERT(cache.get(2, value_holder) == true); // 2,4,3
  CXXUNIT_ASSERT(value_holder == "2");

  cache.put(5,"5"); // 5,2,4
  CXXUNIT_ASSERT(cache.get(5, value_holder) == true); // 5,2,4
  CXXUNIT_ASSERT(value_holder == "5");

  CXXUNIT_ASSERT(cache.get(4, value_holder) == true); // 4, 5, 2
  CXXUNIT_ASSERT(value_holder == "4");


  cache.put(2,"II");
  CXXUNIT_ASSERT(cache.get(2, value_holder) == true); // {2 : "II"}, 4, 5
  CXXUNIT_ASSERT(value_holder == "II");

  // Cache-entries : {2, "II"}, {4, "4"}, {5, "5"}
  CXXUNIT_ASSERT(cache.size() == 3);
  CXXUNIT_ASSERT(cache.get(2, value_holder) == true);
  CXXUNIT_ASSERT(cache.get(4, value_holder) == true);
  CXXUNIT_ASSERT(cache.get(5, value_holder) == true);
}

void LruCacheTest::testPut() {
  LRUCache<int,std::string> cache(3);
  cache.put(1,"1"); // 1
  cache.put(2,"2"); // 2,1
  cache.put(3,"3"); // 3,2,1
  cache.put(4,"4"); // 4,3,2
  cache.put(5,"5"); // 5,4,3

  std::string value_holder("");
  CXXUNIT_ASSERT(cache.get(2, value_holder) == false); // 5,4,3
  CXXUNIT_ASSERT(value_holder == "");

  CXXUNIT_ASSERT(cache.get(4, value_holder) == true); // 4,5,3
  CXXUNIT_ASSERT(value_holder == "4");

  cache.put(2,"II");
  CXXUNIT_ASSERT(cache.get(2, value_holder) == true); // II,4,5
  CXXUNIT_ASSERT(value_holder == "II");

  // Cache-entries : {2, "II"}, {4, "4"}, {5, "5"}
  CXXUNIT_ASSERT(cache.size() == 3);
  CXXUNIT_ASSERT(cache.get(2, value_holder) == true);
  CXXUNIT_ASSERT(cache.get(4, value_holder) == true);
  CXXUNIT_ASSERT(cache.get(5, value_holder) == true);
}

CXXUNIT_REGISTER_TEST(LruCacheTest);

答案 6 :(得分:0)

缓存是否像哈希表那样通过密钥支持检索值的数据结构? LRU意味着缓存具有一定的大小限制,我们需要定期删除最少使用的条目。

如果使用链表+指针散列表实现,如何通过密钥进行O(1)值检索?

我将使用哈希表实现LRU缓存,每个条目的值是值+指向上一个/下一个条目的指针。

关于多线程访问,我更喜欢读取器 - 写入器锁(理想情况下通过自旋锁实现,因为争用通常很快)来监视。

答案 7 :(得分:0)

LRU页面替换技术:

引用页面时,所需页面可能位于缓存中。

If in the cache:我们需要将它带到缓存队列的前面。

If NOT in the cache:我们将其带入缓存中。简单来说,我们在缓存队列的前面添加一个新页面。如果缓存已满,即所有帧都已满,我们从缓存队列的后面删除一个页面,并将新页面添加到缓存队列的前面。

# Cache Size
csize = int(input())

# Sequence of pages 
pages = list(map(int,input().split()))

# Take a cache list
cache=[]

# Keep track of number of elements in cache
n=0

# Count Page Fault
fault=0

for page in pages:
    # If page exists in cache
    if page in cache:
        # Move the page to front as it is most recent page
        # First remove from cache and then append at front
        cache.remove(page)
        cache.append(page)
    else:
        # Cache is full
        if(n==csize):
            # Remove the least recent page 
            cache.pop(0)
        else:
            # Increment element count in cache
            n=n+1

        # Page not exist in cache => Page Fault
        fault += 1
        cache.append(page)

print("Page Fault:",fault)

<强>输入/输出

Input:
3
1 2 3 4 1 2 5 1 2 3 4 5

Output:
Page Fault: 10

答案 8 :(得分:0)

这是我的简单Java程序员,复杂度为O(1)。

//

package com.chase.digital.mystack;

import java.util.HashMap;
import java.util.Map;

public class LRUCache {

  private int size;
  private Map<String, Map<String, Integer>> cache = new HashMap<>();

  public LRUCache(int size) {
    this.size = size;
  }

  public void addToCache(String key, String value) {
    if (cache.size() < size) {
      Map<String, Integer> valueMap = new HashMap<>();
      valueMap.put(value, 0);
      cache.put(key, valueMap);
    } else {
      findLRUAndAdd(key, value);
    }
  }


  public String getFromCache(String key) {
    String returnValue = null;
    if (cache.get(key) == null) {
      return null;
    } else {
      Map<String, Integer> value = cache.get(key);
      for (String s : value.keySet()) {
        value.put(s, value.get(s) + 1);
        returnValue = s;
      }
    }
    return returnValue;
  }

  private void findLRUAndAdd(String key, String value) {
    String leastRecentUsedKey = null;
    int lastUsedValue = 500000;
    for (String s : cache.keySet()) {
      final Map<String, Integer> stringIntegerMap = cache.get(s);
      for (String s1 : stringIntegerMap.keySet()) {
        final Integer integer = stringIntegerMap.get(s1);
        if (integer < lastUsedValue) {
          lastUsedValue = integer;
          leastRecentUsedKey = s;
        }
      }
    }
    cache.remove(leastRecentUsedKey);
    Map<String, Integer> valueMap = new HashMap<>();
    valueMap.put(value, 0);
    cache.put(key, valueMap);
  }


}

答案 9 :(得分:0)

在我的blogpost中的详细说明。

class LRUCache {
  constructor(capacity) {
    
        this.head = null;
        this.tail = null;
        this.capacity = capacity;
        this.count = 0;
    this.hashMap  = new Map();    
  }
 
  get(key) {
    var node = this.hashMap.get(key);
    if(node) {
      if(node == this.head) {
        // node is already at the head, just return the value
        return node.val;
      }      
      if(this.tail == node && this.tail.prev) {
        // if the node is at the tail,
        // set tail to the previous node if it exists.
        this.tail = this.tail.prev;
        this.tail.next = null;
      }
      // link neibouring nodes together
      if(node.prev)
        node.prev.next = node.next;
      if(node.next)
        node.next.prev = node.prev;      
      // add the new head node
      node.prev = null;
      node.next = this.head;
      this.head.prev = node;
      this.head = node;
      return node.val;
    }
    return -1;
  }
  put(key, val) {
    this.count ++;
    var newNode = { key, val, prev: null, next: null };
    if(this.head == null) {
      // this.hashMap is empty creating new node
      this.head =  newNode;
      this.tail = newNode;
    }
    else {
      var oldNode = this.hashMap.get(key);
      if(oldNode) {
        // if node with the same key exists, 
        // clear prev and next pointers before deleting the node.
        if(oldNode.next) {
          if(oldNode.prev)
            oldNode.next.prev = oldNode.prev;
          else
            this.head = oldNode.next;
        }
        if(oldNode.prev) {          
          oldNode.prev.next = oldNode.next;
          if(oldNode == this.tail)
            this.tail = oldNode.prev;
        }
        // removing the node
        this.hashMap.delete(key);
        this.count --;        
      }
      // adding the new node and set up the pointers to it's neibouring nodes      
      var currentHead = this.head;
      currentHead.prev = newNode;        
      newNode.next = currentHead;
      this.head = newNode;
      if(this.tail == null)
        this.tail = currentHead;
      if(this.count == this.capacity + 1) {
        // remove last nove if over capacity
        var lastNode = this.tail;
        this.tail = lastNode.prev;
        if(!this.tail) {
          //debugger;
        }
        this.tail.next = null;
        this.hashMap.delete(lastNode.key);
        this.count --;
      }
    }
    this.hashMap.set(key, newNode);
    return null;
  }
}

var cache = new LRUCache(3);
cache.put(1,1); // 1
cache.put(2,2); // 2,1
cache.put(3,3); // 3,2,1

console.log( cache.get(2) ); // 2,3,1
console.log( cache.get(1) ); // 1,2,3
cache.put(4,4);              // 4,1,2 evicts 3
console.log( cache.get(3) ); // 3 is no longer in cache

答案 10 :(得分:0)

LRU缓存的工作

首先丢弃最近最少使用的项目。如果要确保算法始终丢弃最近最少使用的商品,则此算法需要跟踪何时使用了昂贵的商品。该技术的一般实现方式要求为高速缓存行保留“年龄位”,并根据年龄位跟踪“最近最少使用”的高速缓存行。在这种实现方式中,每次使用高速缓存行时,所有其他高速缓存行的寿命都会改变。

以下示例的访问顺序为A B C D E C D B。

enter image description here

类节点: def init (自身,k,v): self.key = k self.value = v self.next =无 self.prev =无 LRU_cache类: def init (自身,容量): 自我能力=能力 self.dic = dict() self.head =节点(0,0) self.tail =节点(0,0) self.head.next = self.tail self.tail.prev = self.head def _add(自己,节点): p = self.tail.prev p.next =节点 self.tail.prev =节点 node.next = self.tail node.prev = p def _remove(自己,节点): p = node.prev n = node.next p.next = n n。上一个= p def get(自身,密钥): 如果键入self.dic: n = self.dic [key] self._remove(n) self._add(n) 返回n.value 返回-1 定义集(自身,键,值): n =节点(键,值) self._add(n) self.dic [key] = n 如果len(self.dic)> self.capacity: n = self.head.next self._remove(n) del self.dic [n.key] 缓存= LRU_cache(3) cache.set('a','apple') cache.set('b','ball') cache.set('c','cat') cache.set('d','dog') 打印(cache.get('a')) 打印(cache.get('c'))

答案 11 :(得分:0)

您可以访问我的 LRU 缓存博客文章: https://lrucachejava.blogspot.com/

Java 代码:

封装数据结构;

导入 java.util.HashMap;

类 Node2 {

int key;
int value;
Node2 pre;
Node2 next;

Node2(int key ,int value)
{
    this.key=key;
    this.value=value;
}

} 类 LRUCache {

private HashMap<Integer,Node2> lrumap;
private int capacity;
private Node2 head,tail;

LRUCache(int capacity)
{
    this.capacity=capacity;
    lrumap=new HashMap<Integer,Node2>();
    head=null;
    tail=null;
    }

public void deleteNode(Node2 node)
{
    
    if(node==head)
    {
        head.next.pre=null;
        head=head.next;
        node=null;          
    }
    else if(node==tail)
    {
        tail.pre.next=null;
        tail=tail.pre;
        node=null;          
    }
    else
    {
        node.pre.next=node.next;
        node.next.pre=node.pre;
        node=null;
    }
}

public void addToHead(Node2 node)
{
    if(head==null && tail==null)
    {
        head=node;
        tail=node;
    }
    else
    {
    node.next=head;
    head.pre=node;
    head=node;
    }
    
}

public int get(int key)
{
    if(lrumap.containsKey(key))
    {
        Node2 gnode=lrumap.get(key);
        int result=gnode.value;
        deleteNode(gnode);
        addToHead(gnode);
        
        return result;
    }
    
    return -1;
}

public void set(int key,int value)
{
    if(lrumap.containsKey(key))
    {
        Node2 snode=lrumap.get(key);
        snode.value=value;
        deleteNode(snode);
        addToHead(snode);
    }
    else
    {
        Node2 node=new Node2(key,value);
        //System.out.println("mapsize="+lrumap.size()+"   capacity="+capacity);
        if(lrumap.size()>=capacity)
        {
        System.out.println("remove="+tail.key);
            lrumap.remove(tail.key);
            deleteNode(tail);
        
        }
        lrumap.put(key, node);
        addToHead(node);
        
    }
}

public void show()
{
    Node2 node = head;
    
    while(node.next!=null)
    {   
    System.out.print("["+node.key+","+node.value+"]--");
    node=node.next;
    }
    System.out.print("["+node.key+","+node.value+"]--");
    System.out.println();
}

}

公共类 LRUCacheDS{

public static void main(String[] args) {
    
    LRUCache lr= new LRUCache(4);
    lr.set(4,8);
    lr.set(2,28);
    lr.set(6,38);
    lr.show();
    lr.set(14,48);
    lr.show();
    lr.set(84,58);
    lr.show();
    lr.set(84,34);
    lr.show();
    lr.get(6);
    System.out.println("---------------------------------------------------------");
    lr.show();
    
}

}