C + + development of LRUCache caching function in iOS environment _c language

Source: Internet
Author: User
This article focuses on how to develop caching functions that run in the iOS environment through C + + in Xcode. The algorithm is based on LRU (least recently used). For LRU details, please see:
Http://en.wikipedia.org/wiki/Page_replacement_algorithm#Least_recently_used
Previously seen online users of a C + + implementation, feeling good, so the core code used his design.
The original author has made a note of the original author by recording cached data and LRU queues through two map objects, noting that the LRU queues in them do not use list lists in the usual way, but instead list by using a map.

There is also a combination of MRU and LRU, of course, if the design principle is clear, then it is easy to understand.
Given that the majority of cache implementations use a single case pattern, a Singlton base class is designed using the template method of C + +, so that subclasses support the single case pattern as long as the class is inherited. The code is as follows:
Copy Code code as follows:

//
SingltonT.h
//
#ifndef Singltont_h
#define Singltont_h
#include <iostream>
#include <tr1/memory>
using namespace Std;
using namespace Std::tr1;
Template <typename t>
Class Singlton {
Public
Static t* instance ();
void print () {
cout << "haha" << endl;
}
~singlton () {
cout << "Destruct Singlton" << Endl;
}
Protected
Singlton ();
Private
Protected
Static std::tr1::shared_ptr<t> s_instance;
Singlton ();
};
Template <typename t>
Std::tr1::shared_ptr<t> singlton<t>::s_instance;
Template <typename t>
Singlton<t>::singlton () {
cout << "construct Singlton" << Endl;
}
Template <typename t>
t* singlton<t>::instance () {
if (!s_instance.get ())
S_instance.reset (new T);
return S_instance.get ();
}

In addition, considering the operation of a static single Instance object under Multithreading, there is a problem of concurrent access synchronization, so a read-write mutex is used to synchronize the set (set data). As follows:
Copy Code code as follows:

#ifndef _rwlock_h_
#define _rwlock_h_
#define LOCK (q) while (__sync_lock_test_and_set (& q)->lock,1) {}
#define UNLOCK (q) __sync_lock_release (& (Q)->lock);
struct Rwlock {
int write;
int read;
};
static inline void
Rwlock_init (struct Rwlock *lock) {
Lock->write = 0;
Lock->read = 0;
}
static inline void
Rwlock_rlock (struct Rwlock *lock) {
for (;;) {//Keep looping until the read counter is cumulative successfully
while (Lock->write) {
__sync_synchronize ();
}
__sync_add_and_fetch (&lock->read,1);
if (lock->write) {//When the lock is already written, remove the read lock register
__sync_sub_and_fetch (&lock->read,1);
} else {
Break
}
}
}
static inline void
Rwlock_wlock (struct Rwlock *lock) {
__sync_lock_test_and_set (&lock->write,1);
while (Lock->read) {
http://blog.itmem.com/?m=201204
Http://gcc.gnu.org/onlinedocs/gcc-4.6.2/gcc/Atomic-Builtins.html
__sync_synchronize ()//very important, if removed, g++-o3 optimized compiled generated program will produce deadlock
}
}
static inline void
Rwlock_wunlock (struct Rwlock *lock) {
__sync_lock_release (&lock->write);
}
static inline void
Rwlock_runlock (struct Rwlock *lock) {
__sync_sub_and_fetch (&lock->read,1);
}

Instead of using pthread_mutex_t to design locks, the __sync_fetch_and_add instruction system is used, and, of course, ultimately, as the author of the above link says, 7-8 times higher than the pthread_mutex_t performance, I haven't tested it, Interested friends can also help with testing.
With these two classes, I added the definition of the key comparison method mentioned in the original author and introduced an ID to support the Object-c object cache, with the final code modified as follows:
Copy Code code as follows:

#ifndef _map_lru_cache_h_
#define _map_lru_cache_h_
#include <string.h>
#include <iostream>
#include "Rwlock.h"
#include <stdio.h>
#include <sys/malloc.h>
using namespace Std;
Namespace Lru_cache {
static const int def_capacity = 100000;//Default cache record Count
typedef unsigned long long virtual_time;
typedef struct _HASHKEY
{
nsstring* key;
}hashkey;
typedef struct _HASHVALUE
{
ID value_;
Virtual_time Access_;
}hashvalue;
For the HashKey comparator only
Template <class key_t>
struct hashkey_compare{
BOOL Operator () (key_t x, key_t y) const{
return x < y;
}
};
Template <>
struct hashkey_compare{
BOOL Operator () (HashKey __x, HashKey __y) const{
string x = [__x.key utf8string];
String y = [__y.key utf8string];
return x < y;
}
};
Custom map Types
Template <typename K, TypeName V, typename _compare = Hashkey_compare<k>
TypeName _alloc = std::allocator<std::p air<const K, v> > >
Class Lru_map:public map<k, V, _compare, _alloc>{};
Class Clrucache
{
Public
Clrucache (): _now (0) {
_lru_list = Shared_ptr<lru_map<virtual_time, hashkey> > (new Lru_map<virtual_time, HashKey>);
_hash_table = Shared_ptr<lru_map}
~clrucache () {
_lru_list->clear ();
_hash_table->clear ();
}
int set (const hashkey& key, const ID &value)
{
HashValue Hash_value;
Hash_value.value_ = value;
Hash_value.access_ = Get_virtual_time ();
pair< Mapif (!ret.second) {
Key already exist
Virtual_time old_access = (*_hash_table) [Key].access_;
Map<virtual_time, Hashkey>::iterator iter = _lru_list->find (old_access);
if (ITER!= _lru_list->end ())
{
_lru_list->erase (ITER);
}
_lru_list->insert (Make_pair (Hash_value.access_, key));
(*_hash_table) [Key] = Hash_value;
}
else {
_lru_list->insert (Make_pair (Hash_value.access_, key));
if (_hash_table->size () > Def_capacity)
{
Get the least recently used key
Map<virtual_time, Hashkey>::iterator iter = _lru_list->begin ();
_hash_table->erase (Iter->second);
Remove last key from list
_lru_list->erase (ITER);
}
}
return 0;
}
hashvalue* get (const hashkey& key)
{
Mapif (ITER!= _hash_table->end ())
{
Virtual_time old_access = iter->second.access_;
Iter->second.access_ = Get_virtual_time ();
Adjusts the position of the current key in the LRU list
Map<virtual_time, Hashkey>::iterator it = _lru_list->find (old_access);
if (it!= _lru_list->end ()) {
_lru_list->erase (IT);
}
_lru_list->insert (Make_pair (Iter->second.access_, key));
Return & (Iter->second);
}
else{
return NULL;
}
}

Unsigned get_lru_list_size () {return (unsigned) _lru_list->size ();}
Unsigned get_hash_table_size () {return (unsigned) _hash_table->size ();}
Virtual_time Get_now () {return _now;}
Private
Virtual_time Get_virtual_time ()
{
return ++_now;
}
Shared_ptr<lru_map<virtual_time, hashkey> > _lru_list;
Shared_ptr<lru_mapVirtual_time _now;
};
#endif

Let's take a look at the final caching functionality, if combined with a single example and Rwlock, as follows:
Copy Code code as follows:

using namespace Lru_cache;
Class Dzcache:public Singlton<dzcache>
{
Friend Class singlton<dzcache>;
Private
Shared_ptr<clrucache> Clu_cache;
Rwlock *lock;
Dzcache () {
Lock = (rwlock*) malloc (sizeof (Rwlock));
Rwlock_init (lock);
Clu_cache = shared_ptr<clrucache> (New Clrucache ());
cout << "construct joblist" << Endl;
}
Dzcache * Instance () {
return S_instance.get ();
}
Public
~dzcache () {
Free (lock);
}
Static dzcache& getinstance () {
return *instance ();
}
void set (nsstring* key, id value) {
Lock
Rwlock_wlock (lock);
HashKey Hash_key;
Hash_key.key = key;
Clu_cache->set (Hash_key, value);
Rwlock_wunlock (lock);
}
ID get (nsstring* key) {
HashKey Hash_key;
Hash_key.key = key;
hashvalue* value = Clu_cache->get (Hash_key);
if (value = = NULL) {
return nil;
}
else{
Return value->value_;
}
}
};
#endif

Finally, take a look at how to use:
Copy Code code as follows:

void Testlrucache () {
Pointer mode
Dzcache::instance ()->set (@ "name", @ "DAIZHJ");/Set
nsstring* name = (nsstring*) dzcache::instance ()->get (@ "name");//Get
Std::cout<<[name utf8string]<<endl;
NSNumber * Age=[nsnumber numberwithint:123123];
Dzcache::instance ()->set (@ ' age ', age);
Age = (nsnumber*) dzcache::instance ()->get (@ ' age ');
Object mode
Dzcache::getinstance (). Set (@ "name", @ "Daizhenjun");
Name = (nsstring*) dzcache::getinstance (). Get (@ "name");
Std::cout<<[name utf8string]<<endl;
Age = [NSNumber numberwithint:123456];
Dzcache::getinstance (). Set (@ ' age ', age);
Age = (nsnumber*) dzcache::getinstance (). Get (@ ' age ');
}

Well, today's content is here first.
Related Article

Contact Us

The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion; products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the content of the page makes you feel confusing, please write us an email, we will handle the problem within 5 days after receiving your email.

If you find any instances of plagiarism from the community, please send an email to: info-contact@alibabacloud.com and provide relevant evidence. A staff member will contact you within 5 working days.

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.