1 /******************************************************************************* 2 3 copyright: Copyright (c) 2008 Kris Bell. All rights reserved 4 5 license: BSD style: $(LICENSE) 6 7 version: Initial release: April 2008 8 9 author: Kris 10 11 Since: 0.99.7 12 13 *******************************************************************************/ 14 15 module tango.util.container.more.StackMap; 16 17 private import tango.stdc.stdlib; 18 19 private import tango.util.container.HashMap; 20 21 public import tango.util.container.Container; 22 23 /****************************************************************************** 24 25 StackMap extends the basic hashmap type by adding a limit to 26 the number of items contained at any given time. In addition, 27 StackMap retains the order in which elements were added, and 28 employs that during foreach() traversal. Additions to the map 29 beyond the capacity will result in an exception being thrown. 30 31 You can push and pop things just like an typical stack, and/or 32 traverse the entries in the order they were added, though with 33 the additional capability of retrieving and/or removing by key. 34 35 Note also that a push/add operation will replace an existing 36 entry with the same key, exposing a twist on the stack notion. 37 38 ******************************************************************************/ 39 40 class StackMap (K, V, alias Hash = Container.hash, 41 alias Reap = Container.reap, 42 alias Heap = Container.Collect) 43 { 44 private alias QueuedEntry Type; 45 private alias Type *Ref; 46 private alias HashMap!(K, Ref, Hash, reaper, Heap) Map; 47 private Map hash; 48 private Type[] links; 49 50 // extents of queue 51 private Ref head, 52 tail, 53 start; 54 // dimension of queue 55 private size_t capacity; 56 57 /********************************************************************** 58 59 Construct a cache with the specified maximum number of 60 entries. Additions to the cache beyond this number will 61 throw an exception 62 63 **********************************************************************/ 64 65 this (size_t capacity) 66 { 67 hash = new Map; 68 this.capacity = capacity; 69 hash.buckets (capacity, 0.75); 70 //links = (cast(Ref) calloc(capacity, Type.sizeof))[0..capacity]; 71 links.length = capacity; 72 73 // create empty list 74 head = tail = &links[0]; 75 foreach (ref link; links[1..$]) 76 { 77 link.prev = tail; 78 tail.next = &link; 79 tail = &link; 80 } 81 } 82 83 /*********************************************************************** 84 85 clean up when done 86 87 ***********************************************************************/ 88 89 ~this () 90 { 91 //free (links.ptr); 92 } 93 94 /*********************************************************************** 95 96 Reaping callback for the hashmap, acting as a trampoline 97 98 ***********************************************************************/ 99 100 static void reaper(K, R) (K k, R r) 101 { 102 Reap (k, r.value); 103 } 104 105 /*********************************************************************** 106 107 108 ***********************************************************************/ 109 110 @property final const size_t size () 111 { 112 return hash.size; 113 } 114 115 /*********************************************************************** 116 117 118 ***********************************************************************/ 119 120 final void clear () 121 { 122 hash.clear; 123 start = null; 124 } 125 126 /********************************************************************** 127 128 Place an entry into the cache and associate it with the 129 provided key. Note that there can be only one entry for 130 any particular key. If two entries are added with the 131 same key, the second effectively overwrites the first. 132 133 Returns true if we added a new entry; false if we just 134 replaced an existing one. A replacement does not change 135 the order of the keys, and thus does not change stack 136 ordering. 137 138 **********************************************************************/ 139 140 final bool push (K key, V value) 141 { 142 return add (key, value); 143 } 144 145 /********************************************************************** 146 147 Remove and return the more recent addition to the stack 148 149 **********************************************************************/ 150 151 bool popHead (ref K key, ref V value) 152 { 153 if (start) 154 { 155 key = head.key; 156 return take (key, value); 157 } 158 return false; 159 } 160 161 /********************************************************************** 162 163 Remove and return the oldest addition to the stack 164 165 **********************************************************************/ 166 167 bool popTail (ref K key, ref V value) 168 { 169 if (start) 170 { 171 key = start.key; 172 return take (key, value); 173 } 174 return false; 175 } 176 177 /*********************************************************************** 178 179 Iterate from the oldest to the most recent additions 180 181 ***********************************************************************/ 182 183 final int opApply (scope int delegate(ref K key, ref V value) dg) 184 { 185 K key; 186 V value; 187 int result; 188 189 auto node = start; 190 while (node) 191 { 192 key = node.key; 193 value = node.value; 194 if ((result = dg(key, value)) != 0) 195 break; 196 node = node.prev; 197 } 198 return result; 199 } 200 201 /********************************************************************** 202 203 Place an entry into the cache and associate it with the 204 provided key. Note that there can be only one entry for 205 any particular key. If two entries are added with the 206 same key, the second effectively overwrites the first. 207 208 Returns true if we added a new entry; false if we just 209 replaced an existing one. A replacement does not change 210 the order of the keys, and thus does not change stack 211 ordering. 212 213 **********************************************************************/ 214 215 final bool add (K key, V value) 216 { 217 Ref entry = null; 218 219 // already in the list? -- replace entry 220 if (hash.get (key, entry)) 221 { 222 entry.set (key, value); 223 return false; 224 } 225 226 // create a new entry at the list head 227 entry = addEntry (key, value); 228 if (start is null) 229 start = entry; 230 return true; 231 } 232 233 /********************************************************************** 234 235 Get the cache entry identified by the given key 236 237 **********************************************************************/ 238 239 bool get (K key, ref V value) 240 { 241 Ref entry = null; 242 243 if (hash.get (key, entry)) 244 { 245 value = entry.value; 246 return true; 247 } 248 return false; 249 } 250 251 /********************************************************************** 252 253 Remove (and return) the cache entry associated with the 254 provided key. Returns false if there is no such entry. 255 256 **********************************************************************/ 257 258 final bool take (K key, ref V value) 259 { 260 Ref entry = null; 261 if (hash.get (key, entry)) 262 { 263 value = entry.value; 264 265 if (entry is start) 266 start = entry.prev; 267 268 // don't actually kill the list entry -- just place 269 // it at the list 'tail' ready for subsequent reuse 270 deReference (entry); 271 272 // remove the entry from hash 273 hash.removeKey (key); 274 return true; 275 } 276 return false; 277 } 278 279 /********************************************************************** 280 281 Place a cache entry at the tail of the queue. This makes 282 it the least-recently referenced. 283 284 **********************************************************************/ 285 286 private Ref deReference (Ref entry) 287 { 288 if (entry !is tail) 289 { 290 // adjust head 291 if (entry is head) 292 head = entry.next; 293 294 // move to tail 295 entry.extract; 296 tail = entry.append (tail); 297 } 298 return entry; 299 } 300 301 /********************************************************************** 302 303 Move a cache entry to the head of the queue. This makes 304 it the most-recently referenced. 305 306 **********************************************************************/ 307 308 private Ref reReference (Ref entry) 309 { 310 if (entry !is head) 311 { 312 // adjust tail 313 if (entry is tail) 314 tail = entry.prev; 315 316 // move to head 317 entry.extract; 318 head = entry.prepend (head); 319 } 320 return entry; 321 } 322 323 /********************************************************************** 324 325 Add an entry into the queue. An exception is thrown if 326 the queue is full 327 328 **********************************************************************/ 329 330 private Ref addEntry (K key, V value) 331 { 332 if (hash.size < capacity) 333 { 334 hash.add (key, tail); 335 return reReference (tail.set (key, value)); 336 } 337 338 throw new Exception ("Full"); 339 } 340 341 /********************************************************************** 342 343 A doubly-linked list entry, used as a wrapper for queued 344 cache entries. 345 346 **********************************************************************/ 347 348 private struct QueuedEntry 349 { 350 private K key; 351 private Ref prev, 352 next; 353 private V value; 354 355 /************************************************************** 356 357 Set this linked-list entry with the given arguments. 358 359 **************************************************************/ 360 361 Ref set (K key, V value) 362 { 363 this.value = value; 364 this.key = key; 365 return &this; 366 } 367 368 /************************************************************** 369 370 Insert this entry into the linked-list just in 371 front of the given entry. 372 373 **************************************************************/ 374 375 Ref prepend (Ref before) 376 { 377 if (before) 378 { 379 prev = before.prev; 380 381 // patch 'prev' to point at me 382 if (prev) 383 prev.next = &this; 384 385 //patch 'before' to point at me 386 next = before; 387 before.prev = &this; 388 } 389 return &this; 390 } 391 392 /************************************************************** 393 394 Add this entry into the linked-list just after 395 the given entry. 396 397 **************************************************************/ 398 399 Ref append (Ref after) 400 { 401 if (after) 402 { 403 next = after.next; 404 405 // patch 'next' to point at me 406 if (next) 407 next.prev = &this; 408 409 //patch 'after' to point at me 410 prev = after; 411 after.next = &this; 412 } 413 return &this; 414 } 415 416 /************************************************************** 417 418 Remove this entry from the linked-list. The 419 previous and next entries are patched together 420 appropriately. 421 422 **************************************************************/ 423 424 Ref extract () 425 { 426 // make 'prev' and 'next' entries see each other 427 if (prev) 428 prev.next = next; 429 430 if (next) 431 next.prev = prev; 432 433 // Murphy's law 434 next = prev = null; 435 return &this; 436 } 437 } 438 } 439 440 441 /******************************************************************************* 442 443 *******************************************************************************/ 444 445 debug (StackMap) 446 { 447 import tango.io.Stdout; 448 import tango.core.Memory; 449 import tango.time.StopWatch; 450 451 void main() 452 { 453 int v; 454 auto map = new StackMap!(char[], int)(3); 455 map.add ("foo", 1); 456 map.add ("bar", 2); 457 map.add ("wumpus", 3); 458 foreach (k, v; map) 459 Stdout.formatln ("{} {}", k, v); 460 461 Stdout.newline; 462 map.get ("bar", v); 463 foreach (k, v; map) 464 Stdout.formatln ("{} {}", k, v); 465 466 Stdout.newline; 467 map.get ("bar", v); 468 foreach (k, v; map) 469 Stdout.formatln ("{} {}", k, v); 470 471 Stdout.newline; 472 map.get ("foo", v); 473 foreach (k, v; map) 474 Stdout.formatln ("{} {}", k, v); 475 476 Stdout.newline; 477 map.get ("wumpus", v); 478 foreach (k, v; map) 479 Stdout.formatln ("{} {}", k, v); 480 481 482 // setup for benchmark, with a cache of integers 483 auto test = new StackMap!(int, int, Container.hash, Container.reap, Container.Chunk) (1000000); 484 const count = 1_000_000; 485 StopWatch w; 486 487 // benchmark adding 488 w.start; 489 for (int i=count; i--;) 490 test.add (i, i); 491 Stdout.formatln ("{} adds: {}/s", count, count/w.stop); 492 493 // benchmark reading 494 w.start; 495 for (int i=count; i--;) 496 test.get (i, v); 497 Stdout.formatln ("{} lookups: {}/s", count, count/w.stop); 498 499 // benchmark iteration 500 w.start; 501 foreach (key, value; test) {} 502 Stdout.formatln ("{} element iteration: {}/s", test.size, test.size/w.stop); 503 } 504 } 505