1 /******************************************************************************* 2 3 copyright: Copyright (c) 2008 Kris Bell. All rights reserved 4 5 license: BSD style: $(LICENSE) 6 7 version: Initial release: April 2008 8 9 author: Kris 10 11 Since: 0.99.7 12 13 *******************************************************************************/ 14 15 module tango.util.container.more.CacheMap; 16 17 private import tango.stdc.stdlib; 18 19 private import tango.util.container.HashMap; 20 21 public import tango.util.container.Container; 22 23 /****************************************************************************** 24 25 CacheMap extends the basic hashmap type by adding a limit to 26 the number of items contained at any given time. In addition, 27 CacheMap sorts the cache entries such that those entries 28 frequently accessed are at the head of the queue, and those 29 least frequently accessed are at the tail. When the queue 30 becomes full, old entries are dropped from the tail and are 31 reused to house new cache entries. 32 33 In other words, it retains MRU items while dropping LRU when 34 capacity is reached. 35 36 This is great for keeping commonly accessed items around, while 37 limiting the amount of memory used. Typically, the queue size 38 would be set in the thousands (via the ctor) 39 40 ******************************************************************************/ 41 42 class CacheMap (K, V, alias Hash = Container.hash, 43 alias Reap = Container.reap, 44 alias Heap = Container.Collect) 45 { 46 private alias QueuedEntry Type; 47 private alias Type *Ref; 48 private alias HashMap!(K, Ref, Hash, reaper, Heap) Map; 49 private Map hash; 50 private Type[] links; 51 52 // extents of queue 53 private Ref head, 54 tail; 55 // dimension of queue 56 private uint capacity; 57 58 /********************************************************************** 59 60 Construct a cache with the specified maximum number of 61 entries. Additions to the cache beyond this number will 62 reuse the slot of the least-recently-referenced cache 63 entry. 64 65 **********************************************************************/ 66 67 this (uint capacity) 68 { 69 hash = new Map; 70 this.capacity = capacity; 71 hash.buckets (capacity, 0.75); 72 links.length = capacity; 73 74 // create empty list 75 head = tail = &links[0]; 76 foreach (ref link; links[1..$]) 77 { 78 link.prev = tail; 79 tail.next = &link; 80 tail = &link; 81 } 82 } 83 84 /*********************************************************************** 85 86 Reaping callback for the hashmap, acting as a trampoline 87 88 ***********************************************************************/ 89 90 static void reaper(K, R) (K k, R r) 91 { 92 Reap (k, r.value); 93 } 94 95 /*********************************************************************** 96 97 98 ***********************************************************************/ 99 100 @property final const uint size () 101 { 102 return hash.size; 103 } 104 105 /*********************************************************************** 106 107 Iterate from MRU to LRU entries 108 109 ***********************************************************************/ 110 111 final int opApply (scope int delegate(ref K key, ref V value) dg) 112 { 113 K key; 114 V value; 115 int result; 116 117 auto node = head; 118 auto i = hash.size; 119 while (i--) 120 { 121 key = node.key; 122 value = node.value; 123 if ((result = dg(key, value)) != 0) 124 break; 125 node = node.next; 126 } 127 return result; 128 } 129 130 /********************************************************************** 131 132 Get the cache entry identified by the given key 133 134 **********************************************************************/ 135 136 bool get (K key, ref V value) 137 { 138 Ref entry = null; 139 140 // if we find 'key' then move it to the list head 141 if (hash.get (key, entry)) 142 { 143 value = entry.value; 144 reReference (entry); 145 return true; 146 } 147 return false; 148 } 149 150 /********************************************************************** 151 152 Place an entry into the cache and associate it with the 153 provided key. Note that there can be only one entry for 154 any particular key. If two entries are added with the 155 same key, the second effectively overwrites the first. 156 157 Returns true if we added a new entry; false if we just 158 replaced an existing one 159 160 **********************************************************************/ 161 162 final bool add (K key, V value) 163 { 164 Ref entry = null; 165 166 // already in the list? -- replace entry 167 if (hash.get (key, entry)) 168 { 169 // set the new item for this key and move to list head 170 reReference (entry.set (key, value)); 171 return false; 172 } 173 174 // create a new entry at the list head 175 addEntry (key, value); 176 return true; 177 } 178 179 /********************************************************************** 180 181 Remove the cache entry associated with the provided key. 182 Returns false if there is no such entry. 183 184 **********************************************************************/ 185 186 final bool take (K key) 187 { 188 V value; 189 190 return take (key, value); 191 } 192 193 /********************************************************************** 194 195 Remove (and return) the cache entry associated with the 196 provided key. Returns false if there is no such entry. 197 198 **********************************************************************/ 199 200 final bool take (K key, ref V value) 201 { 202 Ref entry = null; 203 if (hash.get (key, entry)) 204 { 205 value = entry.value; 206 207 // don't actually kill the list entry -- just place 208 // it at the list 'tail' ready for subsequent reuse 209 deReference (entry); 210 211 // remove the entry from hash 212 hash.removeKey (key); 213 return true; 214 } 215 return false; 216 } 217 218 /********************************************************************** 219 220 Place a cache entry at the tail of the queue. This makes 221 it the least-recently referenced. 222 223 **********************************************************************/ 224 225 private Ref deReference (Ref entry) 226 { 227 if (entry !is tail) 228 { 229 // adjust head 230 if (entry is head) 231 head = entry.next; 232 233 // move to tail 234 entry.extract; 235 tail = entry.append (tail); 236 } 237 return entry; 238 } 239 240 /********************************************************************** 241 242 Move a cache entry to the head of the queue. This makes 243 it the most-recently referenced. 244 245 **********************************************************************/ 246 247 private Ref reReference (Ref entry) 248 { 249 if (entry !is head) 250 { 251 // adjust tail 252 if (entry is tail) 253 tail = entry.prev; 254 255 // move to head 256 entry.extract; 257 head = entry.prepend (head); 258 } 259 return entry; 260 } 261 262 /********************************************************************** 263 264 Add an entry into the queue. If the queue is full, the 265 least-recently-referenced entry is reused for the new 266 addition. 267 268 **********************************************************************/ 269 270 private Ref addEntry (K key, V value) 271 { 272 assert (capacity); 273 274 if (hash.size < capacity) 275 hash.add (key, tail); 276 else 277 { 278 // we're re-using a prior QueuedEntry, so reap and 279 // relocate the existing hash-table entry first 280 Reap (tail.key, tail.value); 281 if (! hash.replaceKey (tail.key, key)) 282 throw new Exception ("key missing!"); 283 } 284 285 // place at head of list 286 return reReference (tail.set (key, value)); 287 } 288 289 /********************************************************************** 290 291 A doubly-linked list entry, used as a wrapper for queued 292 cache entries 293 294 **********************************************************************/ 295 296 private struct QueuedEntry 297 { 298 private K key; 299 private Ref prev, 300 next; 301 private V value; 302 303 /************************************************************** 304 305 Set this linked-list entry with the given arguments. 306 307 **************************************************************/ 308 309 Ref set (K key, V value) 310 { 311 this.value = value; 312 this.key = key; 313 return &this; 314 } 315 316 /************************************************************** 317 318 Insert this entry into the linked-list just in 319 front of the given entry. 320 321 **************************************************************/ 322 323 Ref prepend (Ref before) 324 { 325 if (before) 326 { 327 prev = before.prev; 328 329 // patch 'prev' to point at me 330 if (prev) 331 prev.next = &this; 332 333 //patch 'before' to point at me 334 next = before; 335 before.prev = &this; 336 } 337 return &this; 338 } 339 340 /************************************************************** 341 342 Add this entry into the linked-list just after 343 the given entry. 344 345 **************************************************************/ 346 347 Ref append (Ref after) 348 { 349 if (after) 350 { 351 next = after.next; 352 353 // patch 'next' to point at me 354 if (next) 355 next.prev = &this; 356 357 //patch 'after' to point at me 358 prev = after; 359 after.next = &this; 360 } 361 return &this; 362 } 363 364 /************************************************************** 365 366 Remove this entry from the linked-list. The 367 previous and next entries are patched together 368 appropriately. 369 370 **************************************************************/ 371 372 Ref extract () 373 { 374 // make 'prev' and 'next' entries see each other 375 if (prev) 376 prev.next = next; 377 378 if (next) 379 next.prev = prev; 380 381 // Murphy's law 382 next = prev = null; 383 return &this; 384 } 385 } 386 } 387 388 389 /******************************************************************************* 390 391 *******************************************************************************/ 392 393 debug (CacheMap) 394 { 395 import tango.io.Stdout; 396 import tango.core.Memory; 397 import tango.time.StopWatch; 398 399 void main() 400 { 401 int v; 402 auto map = new CacheMap!(char[], int)(2); 403 map.add ("foo", 1); 404 map.add ("bar", 2); 405 map.add ("wumpus", 3); 406 foreach (k, v; map) 407 Stdout.formatln ("{} {}", k, v); 408 409 Stdout.newline; 410 map.get ("bar", v); 411 foreach (k, v; map) 412 Stdout.formatln ("{} {}", k, v); 413 414 Stdout.newline; 415 map.get ("bar", v); 416 foreach (k, v; map) 417 Stdout.formatln ("{} {}", k, v); 418 419 Stdout.newline; 420 map.get ("foo", v); 421 foreach (k, v; map) 422 Stdout.formatln ("{} {}", k, v); 423 424 Stdout.newline; 425 map.get ("wumpus", v); 426 foreach (k, v; map) 427 Stdout.formatln ("{} {}", k, v); 428 429 430 // setup for benchmark, with a cache of integers 431 auto test = new CacheMap!(int, int, Container.hash, Container.reap, Container.Chunk) (1000); 432 const count = 1_000_000; 433 StopWatch w; 434 435 // benchmark adding 436 w.start; 437 for (int i=count; i--;) 438 test.add (i, i); 439 Stdout.formatln ("{} adds: {}/s", count, count/w.stop); 440 441 // benchmark reading 442 w.start; 443 for (int i=count; i--;) 444 test.get (i, v); 445 Stdout.formatln ("{} lookups: {}/s", count, count/w.stop); 446 447 // benchmark iteration 448 w.start; 449 foreach (key, value; test) {} 450 Stdout.formatln ("{} element iteration: {}/s", test.size, test.size/w.stop); 451 452 test.hash.check; 453 } 454 } 455