Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

port mru_cache from chromium project #110

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion container/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,6 @@ cc_test(
]
)


cc_test(
name = 'skiplist_test',
srcs = 'skiplist_test.cpp',
Expand All @@ -51,3 +50,9 @@ cc_test(
'//toft/base:random',
]
)

cc_test(
name = 'mru_cache_test',
srcs = 'mru_cache_test.cpp',
deps = ['//thirdparty/glog:glog']
)
316 changes: 316 additions & 0 deletions container/mru_cache.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,316 @@
// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

// This file contains a template for a Most Recently Used cache that allows
// constant-time access to items using a key, but easy identification of the
// least-recently-used items for removal. Each key can only be associated with
// one payload item at a time.
//
// The key object will be stored twice, so it should support efficient copying.
//
// NOTE: While all operations are O(1), this code is written for
// legibility rather than optimality. If future profiling identifies this as
// a bottleneck, there is room for smaller values of 1 in the O(1). :]

#ifndef TOFT_CONTAINER_MRU_CACHE_H
#define TOFT_CONTAINER_MRU_CACHE_H

#include <list>
#include <map>
#include <utility>

#include "toft/base/uncopyable.h"
#include "toft/base/unordered_map.h"

#include "thirdparty/glog/logging.h"

namespace toft {

// MruCacheBase ----------------------------------------------------------------

// This template is used to standardize map type containers that can be used
// by MruCacheBase. This level of indirection is necessary because of the way
// that template template params and default template params interact.
template<class KeyType, class ValueType>
struct MruCacheStandardMap {
typedef std::map<KeyType, ValueType> Type;
};

// Base class for the Mru cache specializations defined below.
// The deletor will get called on all payloads that are being removed or
// replaced.
template<class KeyType, class PayloadType, class DeletorType,
template<typename, typename > class MapType = MruCacheStandardMap>
class MruCacheBase {
TOFT_DECLARE_UNCOPYABLE(MruCacheBase);

public:
// The payload of the list. This maintains a copy of the key so we can
// efficiently delete things given an element of the list.
typedef std::pair<KeyType, PayloadType> value_type;

private:
typedef std::list<value_type> PayloadList;
typedef typename MapType<KeyType, typename PayloadList::iterator>::Type KeyIndex;

public:
typedef typename PayloadList::size_type size_type;

typedef typename PayloadList::iterator iterator;
typedef typename PayloadList::const_iterator const_iterator;
typedef typename PayloadList::reverse_iterator reverse_iterator;
typedef typename PayloadList::const_reverse_iterator const_reverse_iterator;

enum {
NO_AUTO_EVICT = 0
};

// The max_size is the size at which the cache will prune its members to when
// a new item is inserted. If the caller wants to manager this itself (for
// example, maybe it has special work to do when something is evicted), it
// can pass NO_AUTO_EVICT to not restrict the cache size.
explicit MruCacheBase(size_type max_size)
: max_size_(max_size) {
}

MruCacheBase(size_type max_size, const DeletorType& deletor)
: max_size_(max_size),
deletor_(deletor) {
}

virtual ~MruCacheBase() {
iterator i = begin();
while (i != end())
i = Erase(i);
}

size_type max_size() const {
return max_size_;
}

// Inserts a payload item with the given key. If an existing item has
// the same key, it is removed prior to insertion. An iterator indicating the
// inserted item will be returned (this will always be the front of the list).
//
// The payload will be copied. In the case of an OwningMruCache, this function
// will take ownership of the pointer.
iterator Put(const KeyType& key, const PayloadType& payload) {
// Remove any existing payload with that key.
typename KeyIndex::iterator index_iter = index_.find(key);
if (index_iter != index_.end()) {
// Erase the reference to it. This will call the deletor on the removed
// element. The index reference will be replaced in the code below.
Erase(index_iter->second);
} else if (max_size_ != NO_AUTO_EVICT) {
// New item is being inserted which might make it larger than the maximum
// size: kick the oldest thing out if necessary.
ShrinkToSize(max_size_ - 1);
}

ordering_.push_front(value_type(key, payload));
index_.insert(std::make_pair(key, ordering_.begin()));
return ordering_.begin();
}

// Retrieves the contents of the given key, or end() if not found. This method
// has the side effect of moving the requested item to the front of the
// recency list.
//
// TODO(brettw) We may want a const version of this function in the future.
iterator Get(const KeyType& key) {
typename KeyIndex::iterator index_iter = index_.find(key);
if (index_iter == index_.end())
return end();
typename PayloadList::iterator iter = index_iter->second;

// Move the touched item to the front of the recency ordering.
ordering_.splice(ordering_.begin(), ordering_, iter);
return ordering_.begin();
}

// Retrieves the payload associated with a given key and returns it via
// result without affecting the ordering (unlike Get).
//
// TODO(brettw) We may want a const version of this function in the future.
iterator Peek(const KeyType& key) {
typename KeyIndex::const_iterator index_iter = index_.find(key);
if (index_iter == index_.end())
return end();
return index_iter->second;
}

// Erases the item referenced by the given iterator. An iterator to the item
// following it will be returned. The iterator must be valid.
iterator Erase(iterator pos) {
deletor_(pos->second);
index_.erase(pos->first);
return ordering_.erase(pos);
}

// MruCache entries are often processed in reverse order, so we add this
// convenience function (not typically defined by STL containers).
reverse_iterator Erase(reverse_iterator pos) {
// We have to actually give it the incremented iterator to delete, since
// the forward iterator that base() returns is actually one past the item
// being iterated over.
return reverse_iterator(Erase((++pos).base()));
}

// Shrinks the cache so it only holds |new_size| items. If |new_size| is
// bigger or equal to the current number of items, this will do nothing.
void ShrinkToSize(size_type new_size) {
for (size_type i = size(); i > new_size; i--)
Erase(rbegin());
}

// Deletes everything from the cache.
void Clear() {
for (typename PayloadList::iterator i(ordering_.begin()); i != ordering_.end(); ++i)
deletor_(i->second);
index_.clear();
ordering_.clear();
}

// Returns the number of elements in the cache.
size_type size() const {
// We don't use ordering_.size() for the return value because
// (as a linked list) it can be O(n).
DCHECK(index_.size() == ordering_.size());
return index_.size();
}

// Allows iteration over the list. Forward iteration starts with the most
// recent item and works backwards.
//
// Note that since these iterators are actually iterators over a list, you
// can keep them as you insert or delete things (as long as you don't delete
// the one you are pointing to) and they will still be valid.
iterator begin() {
return ordering_.begin();
}
const_iterator begin() const {
return ordering_.begin();
}
iterator end() {
return ordering_.end();
}
const_iterator end() const {
return ordering_.end();
}

reverse_iterator rbegin() {
return ordering_.rbegin();
}
const_reverse_iterator rbegin() const {
return ordering_.rbegin();
}
reverse_iterator rend() {
return ordering_.rend();
}
const_reverse_iterator rend() const {
return ordering_.rend();
}

bool empty() const {
return ordering_.empty();
}

private:
PayloadList ordering_;
KeyIndex index_;

size_type max_size_;

DeletorType deletor_;
};

// MruCache --------------------------------------------------------------------

// A functor that does nothing. Used by the MruCache.
template<class PayloadType>
class MruCacheNullDeletor {
public:
void operator()(PayloadType& payload) { // NOLINT(runtime/references)
}
};

// A container that does not do anything to free its data. Use this when storing
// value types (as opposed to pointers) in the list.
template<class KeyType, class PayloadType>
class MruCache : public MruCacheBase<KeyType, PayloadType, MruCacheNullDeletor<PayloadType> > {
TOFT_DECLARE_UNCOPYABLE(MruCache);

private:
typedef MruCacheBase<KeyType, PayloadType, MruCacheNullDeletor<PayloadType> > ParentType;

public:
// See MruCacheBase, noting the possibility of using NO_AUTO_EVICT.
explicit MruCache(typename ParentType::size_type max_size)
: ParentType(max_size) {
}
virtual ~MruCache() {
}
};

// OwningMruCache --------------------------------------------------------------

template<class PayloadType>
class MruCachePointerDeletor {
public:
void operator()(PayloadType& payload) { // NOLINT(runtime/references)
delete payload;
}
};

// A cache that owns the payload type, which must be a non-const pointer type.
// The pointers will be deleted when they are removed, replaced, or when the
// cache is destroyed.
template<class KeyType, class PayloadType>
class OwningMruCache :
public MruCacheBase<KeyType, PayloadType, MruCachePointerDeletor<PayloadType> > {
TOFT_DECLARE_UNCOPYABLE(OwningMruCache);

private:
typedef MruCacheBase<KeyType, PayloadType, MruCachePointerDeletor<PayloadType> > ParentType;

public:
// See MruCacheBase, noting the possibility of using NO_AUTO_EVICT.
explicit OwningMruCache(typename ParentType::size_type max_size)
: ParentType(max_size) {
}
virtual ~OwningMruCache() {
}
};

// HashingMruCache ------------------------------------------------------------

template<class KeyType, class ValueType>
struct MruCacheHashMap {
typedef std::unordered_map<KeyType, ValueType> Type;
};

// This class is similar to MruCache, except that it uses std::unordered_map as
// the map type instead of std::map. Note that your KeyType must be hashable
// to use this cache.
template<class KeyType, class PayloadType>
class HashingMruCache : public MruCacheBase<KeyType, PayloadType, MruCacheNullDeletor<PayloadType>,
MruCacheHashMap> {
TOFT_DECLARE_UNCOPYABLE(HashingMruCache);

private:
typedef MruCacheBase<KeyType, PayloadType,
MruCacheNullDeletor<PayloadType>, MruCacheHashMap> ParentType;

public:
// See MruCacheBase, noting the possibility of using NO_AUTO_EVICT.
explicit HashingMruCache(typename ParentType::size_type max_size)
: ParentType(max_size) {
}
virtual ~HashingMruCache() {
}
};

} // namespace toft

#endif // TOFT_CONTAINER_MRU_CACHE_H
Loading