diff --git a/.gitattributes b/.gitattributes
index 8db85ffe81c4b55fca801a542dc41e30c017bbda..5f2a7caf91f19560955dbaa7f200bb6326ddd90f 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -34,6 +34,15 @@ testfont binary
 /lib/master.pike.in foreign_ident
 /lib/modules/ADT.pmod/Queue.pike foreign_ident
 /lib/modules/ADT.pmod/Table.pmod foreign_ident
+/lib/modules/Cache.pmod/Policy.pmod/Base.pike foreign_ident
+/lib/modules/Cache.pmod/Policy.pmod/Multiple.pike foreign_ident
+/lib/modules/Cache.pmod/Policy.pmod/Sized.pike foreign_ident
+/lib/modules/Cache.pmod/Policy.pmod/Timed.pike foreign_ident
+/lib/modules/Cache.pmod/Storage.pmod/Base.pike foreign_ident
+/lib/modules/Cache.pmod/Storage.pmod/Gdbm.pike foreign_ident
+/lib/modules/Cache.pmod/Storage.pmod/Memory.pike foreign_ident
+/lib/modules/Cache.pmod/Storage.pmod/Yabu.pike foreign_ident
+/lib/modules/Cache.pmod/cache.pike foreign_ident
 /lib/modules/Crypto/_rsa.pike foreign_ident
 /lib/modules/Crypto/des3.pike foreign_ident
 /lib/modules/Crypto/des3_cbc.pike foreign_ident
diff --git a/lib/modules/Cache.pmod/Data.pike b/lib/modules/Cache.pmod/Data.pike
new file mode 100644
index 0000000000000000000000000000000000000000..cb0bf6efbe4b0bd7b3df8662d1cea9115403a913
--- /dev/null
+++ b/lib/modules/Cache.pmod/Data.pike
@@ -0,0 +1,47 @@
+/*
+ * Base storage-object for the cache system
+ * by Francesco Chemolli <kinkie@roxen.com>
+ * (C) 2000 Roxen IS
+ */
+
+
+int atime=0; //last-access time.
+int ctime=0; //creation-time
+int etime=0; //expiry-time (if supplied). 0 otherwise
+float cost=1.0; //relative preciousness scale
+
+void touch() { //used by the Storage Manager mainly.
+  atime=time(1);
+}
+
+//expire_time is relative and in seconds.
+void create(void|mixed value, void|int expire_time, void|float preciousness) {
+  atime=ctime=time(1);
+}
+
+int size() {} //A method in order to allow for lazy computation.
+              //Used by some Policy Managers
+
+mixed data() {} //A method in order to allow for lazy computation
+
+
+#define DEFAULT_SIZE 2048
+
+//attempts a wild guess of an object's size.
+//It's left here as a common utility. Some classes won't even need it.
+int recursive_low_size(mixed whatfor) {
+  if (stringp(whatfor)) return sizeof(whatfor);
+  if (intp(whatfor)) return 4; //BUG on non 32-bit architectures. 
+  if (programp(whatfor) || objectp(whatfor) || 
+      functionp(whatfor)) return DEFAULT_SIZE;
+  // only composite types ahead
+  array(mixed) iter;
+  int size=sizeof(whatfor);
+  if (arrayp(whatfor)) iter=whatfor;
+  if (mappingp(whatfor)) iter=indices(whatfor)+values(whatfor);
+  if (multisetp(whatfor)) iter=indices(whatfor);
+  foreach(iter,mixed tmp) {
+    size+=recursive_low_size(tmp);
+  }
+  return size;
+}
diff --git a/lib/modules/Cache.pmod/Policy.pmod/Base.pike b/lib/modules/Cache.pmod/Policy.pmod/Base.pike
new file mode 100644
index 0000000000000000000000000000000000000000..93f17d2866fb108cdf9aa0ccf1b019104d8e4c5b
--- /dev/null
+++ b/lib/modules/Cache.pmod/Policy.pmod/Base.pike
@@ -0,0 +1,14 @@
+/*
+ * Sample class for the Cache.Storage stuff.
+ * by Francesco Chemolli <kinkie@roxen.com>
+ * (C) 2000 Roxen IS
+ *
+ * All Storage-related class must MUST implement this method.
+ *
+ * $Id: Base.pike,v 1.1 2000/07/02 20:14:39 kinkie Exp $
+ */
+
+void expire(Cache.Storage storage) {
+  throw("Override this!");
+}
+
diff --git a/lib/modules/Cache.pmod/Policy.pmod/Multiple.pike b/lib/modules/Cache.pmod/Policy.pmod/Multiple.pike
new file mode 100644
index 0000000000000000000000000000000000000000..ec3109f10178a5b2d94a35a29caaf3bcf70b4c01
--- /dev/null
+++ b/lib/modules/Cache.pmod/Policy.pmod/Multiple.pike
@@ -0,0 +1,20 @@
+/*
+ * A multiple-policies expiration policy manager.
+ * by Francesco Chemolli <kinkie@roxen.com>
+ * (C) 2000 Roxen IS
+ *
+ * $Id: Multiple.pike,v 1.1 2000/07/02 20:14:47 kinkie Exp $
+ */
+
+inherit Cache.Policy.Base;
+private array(Cache.Policy.Base) my_policies;
+
+void expire (Cache.Storage storage) {
+  foreach(my_policies, object policy) {
+    policy->expire(storage);
+  }
+}
+
+void create(Cache.Policy.Base ... policies) {
+  my_policies=policies;
+}
diff --git a/lib/modules/Cache.pmod/Policy.pmod/Sized.pike b/lib/modules/Cache.pmod/Policy.pmod/Sized.pike
new file mode 100644
index 0000000000000000000000000000000000000000..3e294930aeba193d90af6816ecd9f2c5fd572afb
--- /dev/null
+++ b/lib/modules/Cache.pmod/Policy.pmod/Sized.pike
@@ -0,0 +1,58 @@
+/*
+ * An LRU, size-constrained expiration policy manager.
+ * by Francesco Chemolli <kinkie@roxen.com>
+ * (C) 2000 Roxen IS
+ *
+ * $Id: Sized.pike,v 1.1 2000/07/02 20:14:56 kinkie Exp $
+ */
+
+inherit Cache.Policy.Base;
+//watermarks
+int max_size=0; //in bytes
+int min_size=0;
+
+//used for the "candidate-for-removal" array
+#define KEY 0
+#define SIZE 1
+
+void expire (Cache.Storage storage) {
+  ADT.Priority_queue removables=ADT.Priority_queue();
+  Cache.Data got;
+  mixed tmp;
+  int now=time(1);
+  int current_size=0; //in bytes. Should I use kb maybe?
+
+  werror("expiring cache\n");
+  string key=storage->first();
+  while (key) {
+    got=storage->get(key,1);
+    werror("examining: %s (age: %d, size: %d). Current size is %d\n",
+           key,now-(got->atime), got->size(), current_size);
+    if (tmp=(got->etime) && tmp < now) { //explicit expiration
+      werror("expired\n");
+      storage->delete(key);
+      key=storage->next();
+      continue;
+    }
+    current_size+=got->size();
+    removables->push( got->atime, ({ key, got->size() })  );
+    if (current_size > max_size) {
+      array candidate;
+      while (current_size > min_size) {
+        candidate=removables->pop();
+        werror("deleting %s (size: %d)\n",candidate[KEY],candidate[SIZE]);
+        storage->delete(candidate[KEY]);
+        current_size-=candidate[SIZE];
+      }
+    }
+    key=storage->next();
+  }
+}
+
+void create (int max, void|int min) {
+  max_size=max;
+  if (min)
+    min_size=min;
+  else if (zero_type(min)) //not specified
+    min_size=max_size/2;
+}
diff --git a/lib/modules/Cache.pmod/Policy.pmod/Timed.pike b/lib/modules/Cache.pmod/Policy.pmod/Timed.pike
new file mode 100644
index 0000000000000000000000000000000000000000..8e1d236e4b1a88aea30ac8a226e1146400124119
--- /dev/null
+++ b/lib/modules/Cache.pmod/Policy.pmod/Timed.pike
@@ -0,0 +1,37 @@
+/*
+ * An access-time-based expiration policy manager.
+ * by Francesco Chemolli <kinkie@roxen.com>
+ * (C) 2000 Roxen IS
+ *
+ * $Id: Timed.pike,v 1.1 2000/07/02 20:15:10 kinkie Exp $
+ */
+
+//TODO: use the preciousness somehow.
+// idea: expire if (now-atime)*cost < ktime
+
+#define DEFAULT_KTIME 300
+private int ktime;
+
+inherit Cache.Policy.Base;
+
+void expire(Cache.Storage storage) {
+  werror("Expiring cache\n");
+  int now=time(1);
+  int limit=now-ktime;
+  string key=storage->first();
+  while (key) {
+    Cache.Data got=storage->get(key,1);
+    werror(sprintf("examining '%s' (age: %d, e: %d, v: %f)\n",
+                   key, now-got->atime, got->etime, got->cost));
+    if (got->atime < limit ||
+        (got->etime && got->etime < now) ) {
+      werror("deleting\n");
+      storage->delete(key);
+    }
+    key=storage->next();
+  }
+}
+
+void create(void|int instance_ktime) {
+  ktime=(instance_ktime?instance_ktime:DEFAULT_KTIME);
+}
diff --git a/lib/modules/Cache.pmod/Storage.pmod/Base.pike b/lib/modules/Cache.pmod/Storage.pmod/Base.pike
new file mode 100644
index 0000000000000000000000000000000000000000..0ad7a0bd8ca149b277cda8d9163cececc89da85b
--- /dev/null
+++ b/lib/modules/Cache.pmod/Storage.pmod/Base.pike
@@ -0,0 +1,48 @@
+/*
+ * Storage Manager prototype.
+ * by Francesco Chemolli <kinkie@roxen.com>
+ * (C) 2000 Roxen IS
+ *
+ * $Id: Base.pike,v 1.1 2000/07/02 20:15:24 kinkie Exp $
+ *
+ * All storage managers must provide these methods.
+ */
+
+#define T() throw( ({"override this", backtrace()}))
+
+int(0..0)|string first() {
+  T();
+}
+
+int(0..0)|string next() {
+  T();
+}
+
+/*
+ * Guess what these do?
+ * I leave the data-object creation here, so that the storage manager
+ * can choose whatever data-class it pleases
+ */
+Cache.Data|int(0..0) set(string key, mixed value,
+                         void|int max_life, void|float preciousness) {
+  T();
+}
+
+//fetches some data from the cache synchronously.
+//be careful, as with some storage managers it might block the calling
+//thread for some time.
+int(0..0)|Cache.Data get(string key) {
+  T();
+}
+
+//fetches some data from the cache asynchronously.
+//the callback will get as first argument the key, and as second
+//argument 0 (cache miss) or an Cache.Data object.
+void aget(string key,
+          function(string,int(0..0)|Cache.Data:void) callback) {
+  T();
+}
+
+Cache.Data|int(0..0) delete(string key, void|int(0..1) hard) {
+  T();
+}
diff --git a/lib/modules/Cache.pmod/Storage.pmod/Gdbm.pike b/lib/modules/Cache.pmod/Storage.pmod/Gdbm.pike
new file mode 100644
index 0000000000000000000000000000000000000000..e7298607bea6b9d090081cac2dbe580cb696b80c
--- /dev/null
+++ b/lib/modules/Cache.pmod/Storage.pmod/Gdbm.pike
@@ -0,0 +1,180 @@
+/*
+ * A GDBM-based storage manager.
+ * by Francesco Chemolli <kinkie@roxen.com>
+ * (C) 2000 Roxen IS
+ *
+ * $Id: Gdbm.pike,v 1.1 2000/07/02 20:15:42 kinkie Exp $
+ *
+ * This storage manager provides the means to save data to memory.
+ * In this manager I'll add reference documentation as comments to
+ * interfaces. It will be organized later in a more comprehensive format
+ *
+ * Settings will be added later.
+ */
+
+//after this many deletion ops, the databases will be compacted.
+#define CLUTTERED 100
+
+
+Gdbm.gdbm db, metadb;
+int deletion_ops=0; //every 1000 deletion ops, we'll reorganize.
+
+class Data {
+  inherit Cache.Data;
+  //metadata is kept around, data loaded on demand.
+
+  int _size=0;
+  string _key=0;
+  mixed _data=0;
+  private Gdbm.gdbm db, metadb;
+  
+  int size() {
+    if (_size) return _size;
+    _size=recursive_low_size(data());
+    return _size;
+  }
+  
+  mixed data() {
+    if (!_data) 
+      _data=decode_value(db->fetch(_key));
+    return _data;
+  }
+  
+  private inline string metadata_dump () {
+    return encode_value( (["size":_size,"atime":atime,
+                           "ctime":ctime,"etime":etime,"cost":cost]) );
+  }
+  
+  //dumps the metadata if necessary.
+  void sync() {
+    metadb->store(_key,metadata_dump());
+  }
+  //restores a dumped object
+  //basically a kind of second-stage constructor for objects retrieved
+  //from the db.
+  //the dumped value is passed for efficiency reasons, otherwise we
+  //might have to perform two lookups to successfully retrieve an object
+  Data undump( string dumped_value) {
+    mapping m=decode_value(dumped_value);
+    if (!m) throw ( ({"Can't decode dumped value",backtrace()}) );
+    _size=m->size;
+    atime=m->atime;
+    ctime=m->ctime;
+    etime=m->etime;
+    cost=m->cost;
+    return this_object();
+  }
+  
+  inline void touch() {
+    atime=time(1);
+    sync();
+  }
+  
+  //initializes a new object with a fresh value. It's used only
+  //for the first instantiation, after that undump is to be used.
+  //The data is not immediately dumped to the DB, as the destructor
+  //will take care of that.
+  Data init(mixed value, void|int expires, void|float 
+            preciousness) {
+    atime=ctime=time(1);
+    if (expires) etime=expires;
+    if (preciousness) cost=preciousness;
+    sync();
+    db->store(_key,encode_value(value));
+    return this_object();
+  }
+  
+  void create(string key, Gdbm.gdbm data_db, Gdbm.gdbm metadata_db) {
+    _key=key;
+    db=data_db;
+    metadb=metadata_db;
+  }
+  
+}
+
+
+//we could maybe use some kind of progressive approach: keep a few
+//items in queue, then fetch more as we need them. This approach
+//can be expensive in terms of memory
+//Something I can't figure out a clean solution for: reorganizing
+//the database. It would be cool to do that when we know it to be
+//somewhat junky, but guessing that kind of information would be
+//quite hard, especially if we consider caches surviving the process
+//that created them
+//Maybe we can put some heuristics: since almost only the policy manager
+//uses first(), next() and delete(), we might count the deletion operations
+//and reorganize when we reach some kind of threshold.
+private ADT.Queue keys;
+int(0..0)|string first() {
+  string tmp;
+  keys=ADT.Queue();
+  tmp=metadb->firstkey();
+  while (tmp) {
+    keys->put(tmp);
+    tmp=metadb->nextkey(tmp);
+  }
+  return keys->get();
+}
+
+int(0..0)|string next() {
+  if (!keys) return 0;
+  return keys->get();
+}
+
+void set(string key, mixed value,
+         void|int expire_time, void|float preciousness) {
+  //should I refuse storing objects too?
+  if (programp(value)||functionp(value)) {
+    werror("can't store value\n"); //TODO: use crumbs
+    return 0;
+  }
+  Data(key,db,metadb)->init(value,expire_time,preciousness);
+}
+
+int(0..0)|Cache.Data get(string key,void|int notouch) {
+  mixed tmp=metadb->fetch(key);
+  if (tmp) {
+    tmp=(Data(key,db,metadb)->undump(tmp));
+    if (!notouch) {
+      tmp->touch();
+    }
+  }
+  return tmp;
+}
+
+//fetches some data from the cache asynchronously.
+//the callback will get as first argument the key, and as second
+//argument 0 (cache miss) or a Cache.Data object.
+void aget(string key,
+          function(string,int(0..0)|Cache.Data:void) callback) {
+  callback(key,get(key));
+}
+
+Cache.Data|int(0..0) delete(string key, void|int(0..1) hard) {
+  Data rv=(hard?0:get(key,1));
+  db->delete(key);
+  metadb->delete(key);
+  deletion_ops++;
+  if (deletion_ops > CLUTTERED) {
+    werror("Reorganizing database\n");
+    db->reorganize();
+    metadb->reorganize();
+    deletion_ops=0;
+  }
+  return rv;
+}
+
+//A GDBM storage-manager must be hooked to a GDBM Database.
+void create(string path) {
+  db=Gdbm.gdbm(path+".db","rwcf");
+  metadb=Gdbm.gdbm(path+"_meta.db","rwcf");
+}
+
+
+/**************** thoughts and miscellanea ******************/
+//maybe we should split the database into two databases, one for the data
+//and one for the metadata.
+
+//we should really use an in-memory cache for the objects. I delay that
+//for now, since we don't have a decent footprint-constrained policy
+//manager yet.
diff --git a/lib/modules/Cache.pmod/Storage.pmod/Memory.pike b/lib/modules/Cache.pmod/Storage.pmod/Memory.pike
new file mode 100644
index 0000000000000000000000000000000000000000..f9b7d7263d855b5de50f382f15bef9c4d2424109
--- /dev/null
+++ b/lib/modules/Cache.pmod/Storage.pmod/Memory.pike
@@ -0,0 +1,118 @@
+/*
+ * A RAM-based storage manager.
+ * by Francesco Chemolli <kinkie@roxen.com>
+ * (C) 2000 Roxen IS
+ *
+ * $Id: Memory.pike,v 1.1 2000/07/02 20:15:57 kinkie Exp $
+ *
+ * This storage manager provides the means to save data to memory.
+ * In this manager I'll add reference documentation as comments to
+ * interfaces. It will be organized later in a more comprehensive format
+ *
+ * Settings will be added later.
+ */
+
+class Data {
+
+  inherit Cache.Data;
+  
+  int _size=0;
+  mixed _data=0;
+  
+  void create(void|mixed value, void|int abs_expire_time, 
+              void|float preciousness) {
+    _data=value;
+    atime=ctime=time(1);
+    if (abs_expire_time) etime=abs_expire_time;
+    if (preciousness) cost=preciousness;
+  }
+  
+  int size() {
+    if (_size) return _size;
+    return (_size=recursive_low_size(_data));
+  }
+  
+  mixed data() {
+    return _data;
+  }
+  
+}
+
+inherit Cache.Storage.Base;
+
+private mapping(string:mixed) data=([]);
+
+
+/*
+ * First an iterator over the contents.
+ * Since accesses to the data are not serialized to increase efficiency
+ * there are a few guidelines that must be followed in order to maintain
+ * consistency.
+ *
+ * First off, only one entity must be using the iterator at one time.
+ * That's not as bad as it seems, as the only entity needing to enumerate
+ * the entries in cache is the expiration policy manager, and there can
+ * be only one for each storage manager.
+ *
+ * The enumerator over the cache is initialized by a call to first().
+ * Subsequent calls to next() return following entries in the cache.
+ * While it is guarranteed that each and all entries in the cache 
+ * will be iterated upon by the enumerator, their order is NOT guarranteed
+ * to remain consistent across calls.
+ */
+
+// these are used by the enumerator. While entries might be deleted while
+// enumerating, it won't bite us.
+private array(string) iter=0;
+private int current=0;
+
+int(0..0)|string first() {
+  iter=indices(data);
+  current=0;
+  return next();
+}
+
+int(0..0)|string next() {
+  if (iter && current < sizeof(iter))
+    return iter[current++];
+  iter=0;
+  return 0;
+}
+
+/*
+ * Guess what these do?
+ * I leave the data-object creation here, so that the storage manager
+ * can choose whatever data-class it pleases
+ */
+void set(string key, mixed value,
+         void|int absolute_expire, 
+         void|float preciousness) {
+  data[key]=Data(value,absolute_expire,preciousness);
+}
+
+// fetches some data from the cache. If notouch is set, don't touch the
+// data from the cache (meant to be used by the storage manager only)
+int(0..0)|Cache.Data get(string key, void|int notouch) {
+  mixed tmp;
+  tmp=data[key];
+  if (!notouch && tmp) tmp->touch();
+  return tmp;
+}
+
+void aget(string key, 
+          function(string,int(0..0)|Cache.Data:void) callback) {
+  mixed rv=get(key);
+  callback(key,rv);
+}
+
+Cache.Data|int(0..0) delete(string key, void|int(0..1) hard) {
+  object(Cache.Data) rv=data[key];
+  if (hard) {
+    destruct(rv->value());
+    m_delete(data,key);
+    return 0;
+  }
+  m_delete(data,key);
+  return rv;
+}
+
diff --git a/lib/modules/Cache.pmod/Storage.pmod/Yabu.pike b/lib/modules/Cache.pmod/Storage.pmod/Yabu.pike
new file mode 100644
index 0000000000000000000000000000000000000000..1403ee54ad63911e985ab62856144aab7203b41d
--- /dev/null
+++ b/lib/modules/Cache.pmod/Storage.pmod/Yabu.pike
@@ -0,0 +1,173 @@
+/*
+ * A Yabu-based storage manager.
+ * by Francesco Chemolli <kinkie@roxen.com>
+ * (C) 2000 Roxen IS
+ *
+ * $Id: Yabu.pike,v 1.1 2000/07/02 20:16:55 kinkie Exp $
+ *
+ * Settings will be added later.
+ */
+
+#define CLUTTERED 200
+
+Yabu.Table db, metadb;
+Yabu.db yabudb;
+
+int deletion_ops=0;
+
+class Data {
+  inherit Cache.Data;
+  //metadata is kept around, data loaded on demand.
+
+  int _size=0;
+  string _key=0;
+  mixed _data=0;
+  private Gdbm.gdbm db, metadb;
+  
+  int size() {
+    if (_size) return _size;
+    _size=recursive_low_size(data());
+    return _size;
+  }
+  
+  mixed data() {
+    if (!_data) 
+      _data=db->get(_key);
+    return _data;
+  }
+  
+  private inline mapping metadata_dump () {
+    return (["size":_size,"atime":atime,
+             "ctime":ctime,"etime":etime,"cost":cost]);
+  }
+  
+  //dumps the metadata if necessary.
+  void sync() {
+    metadb->set(_key,metadata_dump());
+  }
+
+  //restores a dumped object
+  //basically a kind of second-stage constructor for objects retrieved
+  //from the db.
+  //the dumped value is passed for efficiency reasons, otherwise we
+  //might have to perform two lookups to successfully retrieve an object
+  Data undump(mapping dumped_value) {
+     mapping m=dumped_value;
+     _size=m->size;
+     atime=m->atime;
+     ctime=m->ctime;
+     etime=m->etime;
+     cost=m->cost;
+     return this_object();
+  }
+  
+  inline void touch() {
+    atime=time(1);
+    sync();
+  }
+  
+  //initializes a new object with a fresh value. It's used only
+  //for the first instantiation, after that undump is to be used.
+  //The data is not immediately dumped to the DB, as the destructor
+  //will take care of that.
+  Data init(mixed value, void|int expires, void|float 
+            preciousness) {
+     atime=ctime=time(1);
+     if (expires) etime=expires;
+     if (preciousness) cost=preciousness;
+     sync();
+     db->set(_key,value);
+     return this_object();
+  }
+  
+  void create(string key, Gdbm.gdbm data_db, Gdbm.gdbm metadata_db) {
+    _key=key;
+     db=data_db;
+     metadb=metadata_db;
+  }
+  
+}
+
+
+//we could maybe use some kind of progressive approach: keep a few
+//items in queue, then fetch more as we need them. This approach
+//can be expensive in terms of memory
+//Something I can't figure out a clean solution for: reorganizing
+//the database. It would be cool to do that when we know it to be
+//somewhat junky, but guessing that kind of information would be
+//quite hard, especially if we consider caches surviving the process
+//that created them
+//Maybe we can put some heuristics: since almost only the policy manager
+//uses first(), next() and delete(), we might count the deletion operations
+//and reorganize when we reach some kind of threshold.
+private ADT.Queue keys;
+int(0..0)|string first() {
+  keys=ADT.Queue(@indices(metadb));
+  return keys->get();
+}
+
+int(0..0)|string next() {
+  if (!keys) return 0;
+  return keys->get();
+}
+
+void set(string key, mixed value,
+          void|int expire_time, void|float preciousness) {
+  //problem: we can't store objects, functions or programs.
+  //we check here for the actual type. BUT if some 'forbidden' type
+  //is in a composite type's element, we'll mangle it along the way.
+  //Checking for bad types in composite arguments would be very expensive
+  //so I'd favour just stating the problem in the documentation and let
+  //developers take care of this themselves.
+   if (programp(value)||functionp(value)||objectp(value)) {
+     werror("can't store value\n"); //TODO: use crumbs
+     return 0;
+   }
+   Data(key,db,metadb)->init(value,expire_time,preciousness);
+}
+
+int(0..0)|Cache.Data get(string key,void|int notouch) {
+  mixed tmp=metadb->get(key);
+  if (tmp) {
+    tmp=(Data(key,db,metadb))->undump(tmp);
+    if (!notouch) {
+      tmp->touch();
+    }
+  }
+  return tmp;
+}
+
+//fetches some data from the cache asynchronously.
+//the callback will get as first argument the key, and as second
+//argument 0 (cache miss) or a Cache.Data object.
+void aget(string key,
+          function(string,int(0..0)|Cache.Data:void) callback) {
+  callback(key,get(key));
+}
+
+Cache.Data|int(0..0) delete(string key, void|int(0..1) hard) {
+  Data rv=(hard?0:get(key,1));
+  db->delete(key);
+  metadb->delete(key);
+  deletion_ops++;
+  if (deletion_ops > CLUTTERED) {
+    yabudb->reorganize();
+    deletion_ops=0;
+  }
+  return rv;
+}
+
+void create(string path) {
+  yabudb=Yabu.db(path+".yabu","wcSQ"); //let's hope I got the mode right.
+  db=yabudb["data"];
+  metadb=yabudb["metadata"];
+}
+
+
+/**************** thoughts and miscellanea ******************/
+//maybe we should split the database into two databases, one for the data
+//and one for the metadata.
+
+//we should really use an in-memory cache for the objects. I delay that
+//for now, since we don't have a decent footprint-constrained policy
+//manager yet.
diff --git a/lib/modules/Cache.pmod/cache.pike b/lib/modules/Cache.pmod/cache.pike
new file mode 100644
index 0000000000000000000000000000000000000000..d0f8ff5e860505c3db08100fd97238f7d4b3147c
--- /dev/null
+++ b/lib/modules/Cache.pmod/cache.pike
@@ -0,0 +1,174 @@
+/*
+ * A generic cache front-end
+ * by Francesco Chemolli <kinkie@roxen.com>
+ * (C) 2000 Roxen IS
+ *
+ * $Id: cache.pike,v 1.1 2000/07/02 20:14:27 kinkie Exp $
+ *
+ * This module serves as a front-end to different kinds of caching system
+ * It uses two helper objects to actually store data, and to determine
+ * expiration policies. Mechanisms to allow for distributed caching systems
+ * will be added in time, or at least this is the plan.
+ */
+
+#if constant(thread_create)
+#define do_possibly_threaded_call thread_create
+#else
+#define do_possibly_threaded_call call_function
+#endif
+
+#define DEFAULT_CLEANUP_CYCLE 300
+
+
+private int cleanup_cycle=DEFAULT_CLEANUP_CYCLE;
+private object(Cache.Storage.Base) storage;
+private object(Cache.Policy.Base) policy;
+
+
+//. Looks in the cache for an element with the given key and, if available,
+//. returns it. Returns 0 if the element is not available
+mixed lookup(string key) {
+  object(Cache.Data) tmp=storage->get(key);
+  return (tmp?tmp->data():0);
+}
+
+//structure: "key" -> (< ({function,args,0|timeout_call_out_id}) ...>)
+private mapping (string:multiset(array)) pending_requests=([]);
+
+private void got_results(string key, int|Cache.Data value) {
+  mixed data=([])[0]; //undef
+  if (pending_requests[key]) {
+    if (value) {
+      data=value->data();
+    }
+    foreach(indices(pending_requests[key]),array cb) {
+      cb[0](key,value,@cb[1]);
+      if (cb[2]) remove_call_out(cb[2]);
+    }
+    m_delete(pending_requests,key);
+  }
+  //pending requests have timed out. Let's just ignore this result.
+}
+
+//hooray for aliasing. This implementation relies _heavily_ on it
+//for the "req" argument
+private void no_results(string key, array req, mixed call_out_id) {
+  pending_requests[key][req]=0; //remove the pending request
+  req[0](key,0,@req[1]);        //invoke the callback with no data
+}
+
+//. asynchronously look the cache up.
+//. The callback will be given as arguments the key, the value, and then
+//. any user-supplied arguments.
+//. If the timeout (in seconds) expires before any data could be retrieved,
+//. the callback is called anyways, with 0 as value.
+void alookup(string key,
+              function(string,mixed,mixed...:void) callback,
+              int|float timeout,
+              mixed ... args) {
+  array req = ({callback,args,0});
+  if (!pending_requests[key]) { //FIXME: add double-indirection
+    pending_requests[key]=(< req >);
+    storage->aget(key,got_results);
+  } else {
+    pending_requests[key][req]=1;
+    //no need to ask the storage manager, since a query is already pending
+  }
+  if (timeout)
+    req[2]=call_out(no_results,timeout,key,req); //aliasing, gotta love it
+}
+
+//. Sets some value in the cache. Notice that the actual set operation
+//. might even not happen at all if the set data doesn't make sense. For
+//. instance, storing an object or a program in an SQL-based backend
+//. will not be done, and no error will be given about the operation not being
+//. performed.
+//. Notice that while max_life will most likely be respected (objects will
+//. be garbage-collected at pre-determined intervals anyways), the
+//. preciousness . is to be seen as advisory only for the garbage collector
+//. If some data was stored with the same key, it gets returned.
+//. Also notice that max_life is _RELATIVE_ and in seconds.
+void store(string key, mixed value, void|int max_life,
+            void|float preciousness) {
+  storage->set(key,value,
+               (max_life?time(1)+max_life:0),
+               preciousness);
+}
+
+//. Forcibly removes some key. If data was actually present under that key,
+//. it is returned. Otherwise 0 is returned.
+//. If the 'hard' parameter is supplied and true, deleted objects will also
+//. be destruct()-ed upon removal.
+mixed delete(string key, void|int(0..1)hard) {
+  object(Cache.Data) tmp=storage->delete(key,hard);
+  return (tmp?tmp->data():0);
+}
+
+
+object cleanup_thread=0;
+
+void start_cleanup_cycle() {
+  if (master()->asyncp()) { //we're asynchronous. Let's use call_outs
+    call_out(async_cleanup_cache,cleanup_cycle);
+    return;
+  }
+#if constant(thread_create)
+  cleanup_thread=thread_create(threaded_cleanup_cycle);
+#else
+  call_out(async_cleanup_cache,cleanup_cycle); //let's hope we'll get async
+                                               //sooner or later.
+#endif
+}
+
+void async_cleanup_cache() {
+  call_out(async_cleanup_cache,cleanup_cycle);
+  do_possibly_threaded_call(policy->expire,storage);
+}
+
+void threaded_cleanup_cycle() {
+  while (1) {
+    if (master()->asyncp()) {
+      call_out(async_cleanup_cache,0);
+      return;
+    }
+    sleep(cleanup_cycle);
+    policy->expire(storage);
+  }
+}
+
+//. Creates a new cache object. Required are a storage manager, and an
+//. expiration policy object.
+void create(Cache.Storage.Base storage_mgr,
+            Cache.Policy.Base policy_mgr,
+            void|int cleanup_cycle_delay) {
+  if (!storage_mgr || !policy_mgr)
+    throw ( ({ "I need a storage manager and a policy manager",
+               backtrace() }) );
+  storage=storage_mgr;
+  policy=policy_mgr;
+  if (cleanup_cycle_delay) cleanup_cycle=cleanup_cycle_delay;
+  start_cleanup_cycle();
+}
+
+
+/*
+ * Miscellaneous thoughts.
+ *
+ * Some kind of settings-system will be needed, at least for the policy
+ * manager. Maybe having a couple of pass-throught functions here might help.
+ *
+ * Data-objects should really be created by the Storage Manager, which can
+ * then choose to use specialized forms (i.e. using some SQL tricks to
+ * perform lazy work).
+ *
+ * I chose to go with call_outs for the cleanup cycle, and start a new thread
+ * if possible when doing
+ * cleanup. I have mixed feelings for this choice. On one side, it is quite
+ * cheap and easily implemented. On the other side, it restricts us to
+ * async mode, and creating a new thread can be not-so-cheap.
+ *
+ * It would be nice to have some statistics collection. But for some kind of
+ * stats the storage manager has to be involved (if any kind of efficiency
+ * is desired). However if we wish to do so, either we extend the storage
+ * manager's API, or we're in trouble.
+ */