Considering that most of the cache implementations use the Singlton mode, the C ++ template method is used to design a Singlton base class. In this way, as long as the class is inherited, The subclass will support the singleton mode. The Code is as follows:
[Cpp]
//
// SingltonT. h
//
# Ifndef SingltonT_h
# Define SingltonT_h
# Include <iostream>
# Include <tr1/memory>
Using namespace std;
Using namespace std: tr1;
Template <typename T>
Class Singlton {
Public:
Static T * instance ();
~ Singlton (){
Cout <"destruct singlton" <endl;
}
Protected:
Singlton ();
// Private:
Protected:
Static std: tr1: shared_ptr <T> s_instance;
// Singlton ();
};
Template <typename T>
Std: tr1: shared_ptr <T> Singlton <T >:: s_instance;
Template <typename T>
Singlton <T>: Singlton (){
Cout <"construct singlton" <endl;
}
Template <typename T>
T * Singlton <T>: instance (){
If (! S_instance.get ())
S_instance.reset (new T );
Return s_instance.get ();
}
In addition, considering the operation on the static singleton object under multiple threads, concurrent access synchronization may occur. Therefore, the read/write mutex lock is used to synchronize the set (set data. As follows:
[Cpp]
# Ifndef _ RWLOCK_H _
# Define _ RWLOCK_H _
# Define LOCK (q) while (_ sync_lock_test_and_set (& (q)-> lock, 1 )){}
# Define UNLOCK (q) _ sync_lock_release (& (q)-> lock );
Struct rwlock {
Int write;
Int read;
};
Static inline void
Rwlock_init (struct rwlock * lock ){
Lock-> write = 0;
Lock-> read = 0;
}
Static inline void
Rwlock_rlock (struct rwlock * lock ){
For (;) {// continues until the read counter is accumulated successfully.
While (lock-> write ){
_ Sync_synchronize ();
}
_ Sync_add_and_fetch (& lock-> read, 1 );
If (lock-> write) {// when the write lock is already in use, the reader locks are removed.
_ Sync_sub_and_fetch (& lock-> read, 1 );
} Else {
Break;
}
}
}
Static inline void
Rwlock_wlock (struct rwlock * lock ){
_ Sync_lock_test_and_set (& lock-> write, 1 );
While (lock-> read ){
// Http://blog.itmem.com /? M = 201204
// Http://gcc.gnu.org/onlinedocs/gcc-4.6.2/gcc/Atomic-Builtins.html
_ Sync_synchronize (); // It is very important that, if removed, the generated program after g ++-O3 is optimized produces a deadlock.
}
}
Static inline void
Rwlock_wunlock (struct rwlock * lock ){
_ Sync_lock_release (& lock-> write );
}
Static inline void
Rwlock_runlock (struct rwlock * lock ){
_ Sync_sub_and_fetch (& lock-> read, 1 );
}
Pthread_mutex_t is not used here to design the lock, but the _ sync_fetch_and_add command system is used. Of course, whether the above link is 7-8 times better than pthread_mutex_t. I have not tested it, interested friends can also help with the test.
With these two classes, I added the definition of the KEY comparison method mentioned in the original article, and introduced the id to support object-c object Caching. The final code is modified as follows:
[Cpp]
# Ifndef _ MAP_LRU_CACHE_H _
# Define _ MAP_LRU_CACHE_H _
# Include <string. h>
# Include <iostream>
# Include "rwlock. h"
# Include <stdio. h>
# Include <sys/malloc. h>
Using namespace std;
Namespace lru_cache {
Static const int DEF_CAPACITY = 100000; // default number of cache records
Typedef unsigned long virtual_time;
Typedef struct _ HashKey
{
NSString * key;
} HashKey;
Typedef struct _ HashValue
{
Id value _;
Virtual_time access _;
} HashValue;
// Only for the HashKey Comparator
Template <class key_t>
Struct hashkey_compare {
Bool operator () (key_t x, key_t y) const {
Return x <y;
}
};
Template <>
Struct hashkey_compare <HashKey>
{
Bool operator () (HashKey _ x, HashKey _ y) const {
String x = [_ x. key UTF8String];
String y = [_ y. key UTF8String];
Return x <y;
}
};
// Customize the map type
Template <typename K, typename V, typename _ Compare = hashkey_compare <K>,
Typename _ Alloc = std: allocator <std: pair <const K, V>
Class lru_map: public map <K, V, _ Compare, _ Alloc> {};
Class CLRUCache
{
Public:
CLRUCache (): _ now (0 ){
_ Lru_list = shared_ptr <lru_map <virtual_time, HashKey> (new lru_map <virtual_time, HashKey> );
_ Hash_table = shared_ptr <lru_map <HashKey, HashValue> (new lru_map <HashKey, HashValue> );
}
~ CLRUCache (){
_ Lru_list-> clear ();
_ Hash_table-> clear ();
}
Int set (const HashKey & key, const id & value)
{
HashValue hash_value;
Hash_value.value _ = value;
Hash_value.access _ = get_virtual_time ();
Pair <map <HashKey, HashValue>: iterator, bool> ret = _ hash_table-> insert (make_pair (key, hash_value ));
If (! Ret. second ){
// Key already exist
Virtual_time old_access = (* _ hash_table) [key]. access _;
Map <virtual_time, HashKey >:: iterator iter = _ lru_list-> find (old_access );
If (iter! = _ Lru_list-> end ())
{
_ Lru_list-> erase (iter );
}
_ Lru_list-> insert (make_pair (hash_value.access _, key ));
(* _ Hash_table) [key] = hash_value;
}
Else {
_ Lru_list-> insert (make_pair (hash_value.access _, key ));
If (_ hash_table-> size ()> DEF_CAPACITY)
{
// Get the least recently used key
Map <virtual_time, HashKey >:: iterator iter = _ lru_list-> begin ();
_ Hash_table-> erase (iter-> second );
// Remove last key from list
_ Lru_list-> erase (iter );
}
}
Return 0;
}
HashValue * get (const HashKey & key)
{
Map <HashKey, HashValue >:: iterator iter = _ hash_table-> find (key );
If (iter! = _ Hash_table-> end ())
{
Virtual_time old_access = iter-> second. access _;
Iter-> second. access _ = get_virtual_time ();
// Adjust the location of the current key in the LRU list
Map <virtual_time, HashKey >:: iterator it = _ lru_list-> find (old_access );
If (it! = _ Lru_list-> end ()){
_ Lru_list-> erase (it );
}
_ Lru_list-> insert (make_pair (iter-> second. access _, key ));
Return & (iter-> second );
}
Else {
Return NULL;
}
}
Unsigned get_lru_list_size () {return (unsigned) _ lru_list-> size ();}
Unsigned get_hash_table_size () {return (unsigned) _ hash_table-> size ();}
Virtual_time get_now () {return _ now ;}
Private:
Virtual_time get_virtual_time ()
{
Return ++ _ now;
}
Shared_ptr <lru_map <virtual_time, HashKey> _ lru_list;
Shared_ptr <lru_map <HashKey, HashValue> _ hash_table;
Virtual_time _ now;
};
# Endif
Next, let's take a look at the combination of singleton and rwlock to design the final cache function, as shown below:
[Cpp]
Using namespace lru_cache;
Class DZCache: public Singlton <DZCache>
{
Friend class Singlton <DZCache>;
Private:
Shared_ptr <CLRUCache> clu_cache;
Rwlock * lock;
DZCache (){
Lock = (rwlock *) malloc (sizeof (rwlock ));
Rwlock_init (lock );
Clu_cache = shared_ptr <CLRUCache> (new CLRUCache ());
Cout <"construct JobList" <endl;
}
DZCache * Instance (){
Return s_instance.get ();
}
Public:
~ DZCache (){
Free (lock );
}
Static DZCache & getInstance (){
Return * instance ();
}
Void set (NSString * key, id value ){
// Lock
Rwlock_wlock (lock );
HashKey hash_key;
Hash_key.key = key;
Clu_cache-> set (hash_key, value );
Rwlock_wunlock (lock );
}
Id get (NSString * key ){
HashKey hash_key;
Hash_key.key = key;
HashValue * value = clu_cache-> get (hash_key );
If (value = NULL ){
Return nil;
}
Else {
Return value-> value _;
}
}
};
# Endif
Finally, let's take a look at how to use it:
[Cpp]
Void testLRUCache (){
// Pointer Mode
DZCache: instance ()-> set (@ "name", @ "daizhj"); // set
NSString * name = (NSString *) DZCache: instance ()-> get (@ "name"); // get
Std: cout <[name UTF8String] <endl;
NSNumber * age = [NSNumber numberWithInt: 123123];
DZCache: instance ()-> set (@ "age", age );
Age = (NSNumber *) DZCache: instance ()-> get (@ "age ");
// Object Method
DZCache: getInstance (). set (@ "name", @ "daizhenjun ");
Name = (NSString *) DZCache: getInstance (). get (@ "name ");
Std: cout <[name UTF8String] <endl;
Age = [NSNumber numberWithInt: 123456];
DZCache: getInstance (). set (@ "age", age );
Age = (NSNumber *) DZCache: getInstance (). get (@ "age ");
}