diff --git a/src/acconfig.h b/src/acconfig.h
index f39cbc585d1a84449588c0fb32383aa8ee6e7c2e..b0cf3941080d64731fd57b9d85c0eef99c201ddf 100644
--- a/src/acconfig.h
+++ b/src/acconfig.h
@@ -1,5 +1,5 @@
 /*
- * $Id: acconfig.h,v 1.93 2001/08/15 14:42:10 marcus Exp $
+ * $Id: acconfig.h,v 1.94 2001/12/10 02:08:11 mast Exp $
  */
 #ifndef MACHINE_H
 #define MACHINE_H
@@ -45,6 +45,9 @@
 /* Define this to use the new keypair loop. */
 #undef PIKE_MAPPING_KEYPAIR_LOOP
 
+/* Define this to use the new multiset implementation. */
+#undef PIKE_NEW_MULTISETS
+
 /* The following USE_* are used by smartlink */
 /* Define this if your ld sets the run path with -rpath */
 #undef USE_RPATH
diff --git a/src/builtin.cmod b/src/builtin.cmod
index 918fe3a2ec5139a579061e46a37872da5ca29c07..24793a2e1fc8facfb2f4a8b67c9fe2e3c8ca79c5 100644
--- a/src/builtin.cmod
+++ b/src/builtin.cmod
@@ -1,5 +1,5 @@
 /* -*- c -*-
- * $Id: builtin.cmod,v 1.70 2001/11/08 23:34:27 nilsson Exp $
+ * $Id: builtin.cmod,v 1.71 2001/12/10 02:08:12 mast Exp $
  */
 
 #include "global.h"
@@ -595,8 +595,12 @@ PIKEFUN int get_weak_flag(mapping m)
 
 PIKEFUN int get_weak_flag(multiset m)
 {
+#ifdef PIKE_NEW_MULTISETS
+  RETURN multiset_get_flags(m) & MULTISET_WEAK;
+#else
   RETURN (m->ind->flags & (ARRAY_WEAK_FLAG|ARRAY_WEAK_SHRINK)) ?
 	PIKE_WEAK_INDICES : 0;
+#endif
 }
 
 PIKEFUN program __empty_program()
@@ -773,14 +777,28 @@ PIKEFUN mixed random(array a)
 
 PIKEFUN mixed random(multiset m)
 {
-  if(!m->ind->size)
+  if(multiset_is_empty (m))
     SIMPLE_BAD_ARG_ERROR("random", 1, "multiset with elements in it");
+#ifdef PIKE_NEW_MULTISETS
+  if (multiset_indval (m)) {
+    ptrdiff_t nodepos = multiset_get_nth (m, my_rand() % multiset_sizeof (m));
+    push_multiset_index (m, nodepos);
+    push_multiset_value (m, nodepos);
+    sub_msnode_ref (m);
+    f_aggregate (2);
+  }
+  else {
+    push_multiset_index (m, multiset_get_nth (m, my_rand() % multiset_sizeof (m)));
+    sub_msnode_ref (m);
+  }
+#else
   push_svalue(m->ind->item + (my_rand() % m->ind->size));
+#endif
   stack_swap();
   pop_stack();
 }
 
-PIKEFUN mapping random(mapping m)
+PIKEFUN array random(mapping m)
 {
   struct mapping_data *md=m->data;
   size_t bucket, count;
diff --git a/src/builtin_functions.c b/src/builtin_functions.c
index 351a323f3df22a5c851d25cb5de9210c4ca21847..97f07e87eb03167742b48e68f008d93258f9f942 100644
--- a/src/builtin_functions.c
+++ b/src/builtin_functions.c
@@ -5,7 +5,7 @@
 \*/
 /**/
 #include "global.h"
-RCSID("$Id: builtin_functions.c,v 1.415 2001/12/03 15:46:54 grubba Exp $");
+RCSID("$Id: builtin_functions.c,v 1.416 2001/12/10 02:08:12 mast Exp $");
 #include "interpret.h"
 #include "svalue.h"
 #include "pike_macros.h"
@@ -2167,7 +2167,11 @@ PMOD_EXPORT void f_indices(INT32 args)
     break;
 
   case T_MULTISET:
+#ifdef PIKE_NEW_MULTISETS
+    a = multiset_indices (Pike_sp[-args].u.multiset);
+#else
     a=copy_array(Pike_sp[-args].u.multiset->ind);
+#endif
     break;
 
   case T_OBJECT:
@@ -2479,6 +2483,9 @@ PMOD_EXPORT void f_values(INT32 args)
     break;
 
   case T_MULTISET:
+#ifdef PIKE_NEW_MULTISETS
+    a = multiset_values (Pike_sp[-args].u.multiset);
+#else
     size=Pike_sp[-args].u.multiset->ind->size;
     a=allocate_array_no_init(size,0);
     while(--size>=0)
@@ -2487,6 +2494,7 @@ PMOD_EXPORT void f_values(INT32 args)
       ITEM(a)[size].subtype=NUMBER_NUMBER;
       ITEM(a)[size].u.integer=1;
     }
+#endif
     break;
 
   case T_OBJECT:
@@ -3199,9 +3207,15 @@ void f_set_weak_flag(INT32 args)
       mapping_set_flags(s->u.mapping, flags);
       break;
     case T_MULTISET:
+#ifdef PIKE_NEW_MULTISETS
+      flags = multiset_get_flags (s->u.multiset);
+      flags = (flags & ~PIKE_WEAK_BOTH) | (ret & PIKE_WEAK_BOTH);
+      multiset_set_flags (s->u.multiset, flags);
+#else
       flags = array_get_flags(s->u.multiset->ind);
       SETFLAG(flags,(ARRAY_WEAK_FLAG|ARRAY_WEAK_SHRINK), ret & PIKE_WEAK_INDICES);
       s->u.multiset->ind = array_set_flags(s->u.multiset->ind, flags);
+#endif
       break;
     default:
       SIMPLE_BAD_ARG_ERROR("set_weak_flag",1,"array|mapping|multiset");
@@ -6523,10 +6537,18 @@ PMOD_EXPORT void f_map(INT32 args)
 	 dmalloc_touch_svalue(Pike_sp);
 	 Pike_sp[-args]=Pike_sp[0];           /* move it back */
 	 f_map(args);               
+#ifdef PIKE_NEW_MULTISETS
+	 /* FIXME: Handle multisets with values like mappings. */
+	 push_multiset (mkmultiset_2 (Pike_sp[-1].u.array, NULL, NULL));
+	 free_array (Pike_sp[-2].u.array);
+	 Pike_sp[-2] = Pike_sp[-1];
+	 Pike_sp--;
+#else
 	 Pike_sp--;                      /* allocate_multiset is destructive */
 	 dmalloc_touch_svalue(Pike_sp);
 	 push_multiset(allocate_multiset(Pike_sp->u.array));
 	 order_multiset(sp[-1].u.multiset);
+#endif
 	 return;
 
       case T_STRING:
@@ -6913,11 +6935,19 @@ PMOD_EXPORT void f_filter(INT32 args)
 	 Pike_sp--;                       
 	 dmalloc_touch_svalue(Pike_sp);
 	 Pike_sp[-args]=Pike_sp[0];           /* move it back */
-	 f_filter(args);               
+	 f_filter(args);
+#ifdef PIKE_NEW_MULTISETS
+	 /* FIXME: Handle multisets with values like mappings. */
+	 push_multiset (mkmultiset_2 (Pike_sp[-1].u.array, NULL, NULL));
+	 free_array (Pike_sp[-2].u.array);
+	 Pike_sp[-2] = Pike_sp[-1];
+	 Pike_sp--;
+#else
 	 Pike_sp--;                      /* allocate_multiset is destructive */
 	 dmalloc_touch_svalue(Pike_sp);
 	 push_multiset(allocate_multiset(Pike_sp->u.array));
 	 order_multiset(sp[-1].u.multiset);
+#endif
 	 return;
 
       case T_STRING:
diff --git a/src/configure.in b/src/configure.in
index 1384e133a07f2fcb985eedd40f512491986c3527..f1957756938f6cbeb9c2ad0772b5cd7479aef827 100644
--- a/src/configure.in
+++ b/src/configure.in
@@ -1,4 +1,4 @@
-AC_REVISION("$Id: configure.in,v 1.561 2001/12/07 12:01:07 grubba Exp $")
+AC_REVISION("$Id: configure.in,v 1.562 2001/12/10 02:08:13 mast Exp $")
 AC_INIT(interpret.c)
 AC_CONFIG_HEADER(machine.h)
 
@@ -1087,6 +1087,11 @@ MY_AC_ARG_WITH(lock,
 	  [enable experimental code for multicpu machines (EXPERIMENTAL).]),
 		[],[AC_DEFINE(PIKE_RUN_UNLOCKED)])
 
+MY_AC_ARG_WITH(new-multisets,
+	       MY_DESCR([--with-new-multisets],
+			[enable new multiset implementation (EXPERIMENTAL).]),
+	       [AC_DEFINE(PIKE_NEW_MULTISETS)],[])
+
 # This makes configure default to --without-Perl
 # Remove this when the Perl module stops crashing and hanging. -Hubbe
 AC_ARG_WITH(perl, MY_DESCR([--with-perl],
diff --git a/src/docode.c b/src/docode.c
index c0be94aeacb3fe2857f3f02feff7a2dc202dd202..45cd23a29ce2dfac0e0fde3784719479f878424d 100644
--- a/src/docode.c
+++ b/src/docode.c
@@ -5,7 +5,7 @@
 \*/
 /**/
 #include "global.h"
-RCSID("$Id: docode.c,v 1.136 2001/10/05 22:55:32 hubbe Exp $");
+RCSID("$Id: docode.c,v 1.137 2001/12/10 02:08:13 mast Exp $");
 #include "las.h"
 #include "program.h"
 #include "pike_types.h"
@@ -2073,8 +2073,8 @@ static int do_docode2(node *n, INT16 flags)
 	      break;
 
 	    case T_MULTISET:
-	      array_fix_type_field(n->u.sval.u.multiset->ind);
-	      if(n->u.sval.u.multiset->ind-> type_field & BIT_COMPLEX)
+	      multiset_fix_type_field(n->u.sval.u.multiset);
+	      if(multiset_ind_types(n->u.sval.u.multiset) & BIT_COMPLEX)
 		emit0(F_COPY_VALUE);
 	      break;
 	  }
diff --git a/src/encode.c b/src/encode.c
index 2d8d69e2ed092a6d8dc5036d1a914d4202a2c814..256953790c108fc3474fca16f51ae58fc3113eb9 100644
--- a/src/encode.c
+++ b/src/encode.c
@@ -25,7 +25,7 @@
 #include "version.h"
 #include "bignum.h"
 
-RCSID("$Id: encode.c,v 1.132 2001/11/10 17:22:40 mast Exp $");
+RCSID("$Id: encode.c,v 1.133 2001/12/10 02:08:14 mast Exp $");
 
 /* #define ENCODE_DEBUG */
 
@@ -775,31 +775,57 @@ static void encode_value2(struct svalue *val, struct encode_data *data)
       pop_n_elems(2);
       break;
 
-    case T_MULTISET:
-      code_entry(TAG_MULTISET, val->u.multiset->ind->size,data);
-      if (data->canonic) {
-	INT32 *order;
-	if (val->u.multiset->ind->type_field & ~(BIT_BASIC & ~BIT_TYPE)) {
-	  array_fix_type_field(val->u.multiset->ind);
-	  if (val->u.multiset->ind->type_field & ~(BIT_BASIC & ~BIT_TYPE))
-	    /* This doesn't let bignums through. That's necessary as
-	     * long as they aren't handled deterministically by the
-	     * sort function. */
-	    Pike_error("Canonical encoding requires basic types in indices.\n");
+    case T_MULTISET: {
+      struct multiset *l = val->u.multiset;
+
+#ifdef PIKE_NEW_MULTISETS
+      if (multiset_indval (l) || multiset_get_cmp_less (l)->type != T_INT)
+	Pike_error ("FIXME: Encoding of multisets with values and/or "
+		    "custom sort function not yet implemented.\n");
+      else {
+	/* Encode valueless multisets without compare functions in a
+	 * compatible way. */
+#endif
+	code_entry(TAG_MULTISET, multiset_sizeof (l), data);
+	if (data->canonic) {
+	  INT32 *order;
+	  if (multiset_ind_types(l) & ~(BIT_BASIC & ~BIT_TYPE)) {
+	    multiset_fix_type_field(l);
+	    if (multiset_ind_types(l) & ~(BIT_BASIC & ~BIT_TYPE))
+	      /* This doesn't let bignums through. That's necessary as
+	       * long as they aren't handled deterministically by the
+	       * sort function. */
+	      Pike_error("Canonical encoding requires basic types in indices.\n");
+	  }
+	  check_stack(1);
+#ifdef PIKE_NEW_MULTISETS
+	  push_array(multiset_indices(l));
+#else
+	  push_array(copy_array(l->ind));
+#endif
+	  order = get_switch_order(Pike_sp[-1].u.array);
+	  order_array(Pike_sp[-1].u.array, order);
+	  free((char *) order);
+	  for (i = 0; i < Pike_sp[-1].u.array->size; i++)
+	    encode_value2(ITEM(Pike_sp[-1].u.array)+i, data);
+	  pop_stack();
 	}
-	check_stack(1);
-	ref_push_array(val->u.multiset->ind);
-	order = get_switch_order(Pike_sp[-1].u.array);
-	order_array(Pike_sp[-1].u.array, order);
-	free((char *) order);
-	for (i = 0; i < Pike_sp[-1].u.array->size; i++)
-	  encode_value2(ITEM(Pike_sp[-1].u.array)+i, data);
-	pop_stack();
+	else {
+#ifdef PIKE_NEW_MULTISETS
+	  struct svalue ind;
+	  union msnode *node = low_multiset_first (l->msd);
+	  for (; node; node = low_multiset_next (node))
+	    encode_value2 (low_use_multiset_index (node, ind), data);
+#else
+	  for(i=0; i<l->ind->size; i++)
+	    encode_value2(ITEM(l->ind)+i, data);
+#endif
+	}
+#ifdef PIKE_NEW_MULTISETS
       }
-      else
-	for(i=0; i<val->u.multiset->ind->size; i++)
-	  encode_value2(ITEM(val->u.multiset->ind)+i, data);
+#endif
       break;
+    }
 
     case T_OBJECT:
       check_stack(1);
@@ -1924,9 +1950,16 @@ static void decode_value2(struct decode_data *data)
 
       EDB(2,fprintf(stderr, "%*sDecoding multiset of size %d to <%d>\n",
 		  data->depth, "", num, data->counter.u.integer));
+#ifdef PIKE_NEW_MULTISETS
+      SETUP_DECODE_MEMOBJ (T_MULTISET, multiset, m,
+			   allocate_multiset (0, 0, NULL), ;);
+      /* FIXME: This array could be avoided by building the multiset directly. */
+      a = low_allocate_array (num, 0);
+#else
       SETUP_DECODE_MEMOBJ(T_MULTISET, multiset, m,
 			  allocate_multiset(low_allocate_array(num, 0)), ;);
       a=m->ind;
+#endif
 
       for(e=0;e<num;e++)
       {
@@ -1936,7 +1969,17 @@ static void decode_value2(struct decode_data *data)
 	dmalloc_touch_svalue(sp);
       }
       array_fix_type_field(a);
+#ifdef PIKE_NEW_MULTISETS
+      {
+	struct multiset *l = mkmultiset (a);
+	free_array (a);
+	/* This special case is handled efficiently by merge_multisets. */
+	merge_multisets (m, l, PIKE_MERGE_DESTR_A | PIKE_ARRAY_OP_ADD);
+	free_multiset (l);
+      }
+#else
       order_multiset(m);
+#endif
       ref_push_multiset(m);
 #ifdef ENCODE_DEBUG
       data->depth -= 2;
diff --git a/src/gc.c b/src/gc.c
index 94489b6a552b0f9cf44b26896ffde9c8d5268091..11e46a0626c034a03059a16a117c81daf358afbd 100644
--- a/src/gc.c
+++ b/src/gc.c
@@ -30,7 +30,7 @@ struct callback *gc_evaluator_callback=0;
 
 #include "block_alloc.h"
 
-RCSID("$Id: gc.c,v 1.177 2001/09/24 14:36:51 grubba Exp $");
+RCSID("$Id: gc.c,v 1.178 2001/12/10 02:08:14 mast Exp $");
 
 /* Run garbage collect approximately every time
  * 20 percent of all arrays, objects and programs is
@@ -302,6 +302,10 @@ int attempt_to_identify(void *something, void **inblock)
   for(mu=first_multiset;mu;mu=mu->next)
     if(mu==(struct multiset *)something)
       return T_MULTISET;
+#ifdef PIKE_NEW_MULTISETS
+    else if (mu->msd == (struct multiset_data *) something)
+      return T_MULTISET_DATA;
+#endif
 
   if(safe_debug_findstring((struct pike_string *)something))
     return T_STRING;
@@ -502,9 +506,39 @@ void describe_location(void *real_memblock,
       fprintf(stderr, "%*s  **In storage of object\n", indent, "");
       break;
 
+#ifdef PIKE_NEW_MULTISETS
+    case T_MULTISET:
+      descblock = ((struct multiset *) memblock)->msd;
+      /* FALL THROUGH */
+
+    case T_MULTISET_DATA: {
+      struct multiset_data *msd = (struct multiset_data *) descblock;
+      union msnode *node = low_multiset_first (msd);
+      struct svalue ind;
+      int indval = msd->flags & MULTISET_INDVAL;
+      for (; node; node = low_multiset_next (node)) {
+	if (&node->i.ind == (struct svalue *) location) {
+	  fprintf (stderr, "%*s  **In index ", indent, "");
+	  print_svalue (stderr, low_use_multiset_index (node, ind));
+	  fputc ('\n', stderr);
+	  break;
+	}
+	else if (indval && &node->iv.val == (struct svalue *) location) {
+	  fprintf(stderr, "%*s  **In value with index ", indent, "");
+	  print_svalue(stderr, low_use_multiset_index (node, ind));
+	  fputc('\n', stderr);
+	  break;
+	}
+      }
+      break;
+    }
+
+#else  /* PIKE_NEW_MULTISETS */
     case T_MULTISET:
       descblock = ((struct multiset *) memblock)->ind;
       /* FALL THROUGH */
+#endif
+
     case T_ARRAY:
     {
       struct array *a=(struct array *)descblock;
@@ -874,10 +908,32 @@ again:
       break;
     }
 
+#ifdef PIKE_NEW_MULTISETS
+    case T_MULTISET_DATA: {
+      struct multiset *l;
+      for (l = first_multiset; l; l = l->next) {
+	if (l->msd == (struct multiset_data *) a) {
+	  fprintf(stderr, "%*s**Describing multiset for this data block:\n", indent, "");
+	  debug_dump_multiset(l);
+	}
+      }
+      break;
+    }
+
+    case T_MULTISET:
+      fprintf(stderr, "%*s**Describing multiset:\n", indent, "");
+      debug_dump_multiset((struct multiset *) a);
+      fprintf(stderr, "%*s**Describing multiset data block:\n", indent, "");
+      describe_something(((struct multiset *) a)->msd, T_MULTISET_DATA,
+			 indent + 2, -1, flags, 0);
+      break;
+
+#else  /* PIKE_NEW_MULTISETS */
     case T_MULTISET:
       fprintf(stderr,"%*s**Describing array of multiset:\n",indent,"");
       debug_dump_array(((struct multiset *)a)->ind);
       break;
+#endif
 
     case T_ARRAY:
       fprintf(stderr,"%*s**Describing array:\n",indent,"");
@@ -2529,7 +2585,11 @@ int do_gc(void)
      * things. */
     gc_zap_ext_weak_refs_in_mappings();
     gc_zap_ext_weak_refs_in_arrays();
+#ifdef PIKE_NEW_MULTISETS
+    gc_zap_ext_weak_refs_in_multisets();
+#else
     /* Multisets handled as arrays. */
+#endif
     gc_zap_ext_weak_refs_in_objects();
     gc_zap_ext_weak_refs_in_programs();
     GC_VERBOSE_DO(
diff --git a/src/interpret.c b/src/interpret.c
index 6e3235bd82bdd9912bf34f0b23be24a08853c45f..593710f5cf9b2d5b3a7290e6d7cca735354d5289 100644
--- a/src/interpret.c
+++ b/src/interpret.c
@@ -5,7 +5,7 @@
 \*/
 /**/
 #include "global.h"
-RCSID("$Id: interpret.c,v 1.252 2001/11/10 19:43:51 mast Exp $");
+RCSID("$Id: interpret.c,v 1.253 2001/12/10 02:08:14 mast Exp $");
 #include "interpret.h"
 #include "object.h"
 #include "program.h"
@@ -1990,7 +1990,7 @@ void gdb_backtrace (
 	    break;
 
 	  case T_MULTISET:
-	    fprintf (stderr, "multiset[%ld]", (long) arg->u.multiset->ind->size);
+	    fprintf (stderr, "multiset[%ld]", (long) multiset_sizeof (arg->u.multiset));
 	    break;
 
 	  case T_MAPPING:
diff --git a/src/iterators.cmod b/src/iterators.cmod
index b7608ee57ab296edfc21b583ccac5c7b25dcb4ae..36dc466b62e75f0c319442c7a608d3de999863bd 100644
--- a/src/iterators.cmod
+++ b/src/iterators.cmod
@@ -1,11 +1,11 @@
-/*\
+/*\ -*- c -*-
 ||| This file a part of Pike, and is copyright by Fredrik Hubinette
 ||| Pike is distributed as GPL (General Public License)
 ||| See the files COPYING and DISCLAIMER for more information.
 \*/
 /**/
 #include "global.h"
-RCSID("$Id: iterators.cmod,v 1.14 2001/09/24 16:47:27 grubba Exp $");
+RCSID("$Id: iterators.cmod,v 1.15 2001/12/10 02:08:15 mast Exp $");
 #include "main.h"
 #include "object.h"
 #include "mapping.h"
@@ -251,6 +251,197 @@ PIKECLASS array_iterator
     
 };
 
+#ifdef PIKE_NEW_MULTISETS
+
+PIKECLASS multiset_iterator
+{
+  CVAR struct multiset *l;
+  CVAR int lock_index;
+
+#define MSNODE_BEFORE_BEG -2
+#define MSNODE_AFTER_END -1
+  CVAR ptrdiff_t nodepos;
+
+  PIKEFUN int value()
+    {
+      if (!THIS->l) Pike_error ("Iterator not initialized.\n");
+      if (THIS->nodepos < 0 || msnode_is_deleted (THIS->l, THIS->nodepos))
+	push_undefined();
+      else
+	push_multiset_value (THIS->l, THIS->nodepos);
+    }
+
+  PIKEFUN mixed index()
+    {
+      if (!THIS->l) Pike_error ("Iterator not initialized.\n");
+      if (THIS->nodepos < 0 || msnode_is_deleted (THIS->l, THIS->nodepos))
+	push_undefined();
+      else
+	push_multiset_index (THIS->l, THIS->nodepos);
+    }
+
+  static void li_step (struct multiset_iterator_struct *li, int steps)
+    {
+      ptrdiff_t newpos = li->nodepos;
+
+      switch (li->nodepos) {
+	case MSNODE_BEFORE_BEG:
+	  if (steps <= 0) return;
+	  newpos = multiset_first (li->l);
+	  steps--;
+	  break;
+	case MSNODE_AFTER_END:
+	  if (steps >= 0) return;
+	  newpos = multiset_last (li->l);
+	  steps++;
+	  break;
+	default:;
+      }
+
+      if (steps > 0)
+	do {
+	  newpos = multiset_next (li->l, newpos);
+	  if (newpos < 0) {
+	    sub_msnode_ref (li->l);
+	    li->nodepos = MSNODE_AFTER_END;
+	    return;
+	  }
+	} while (--steps);
+      else if (steps < 0)
+	do {
+	  newpos = multiset_prev (li->l, newpos);
+	  if (newpos < 0) {
+	    sub_msnode_ref (li->l);
+	    li->nodepos = MSNODE_BEFORE_BEG;
+	    return;
+	  }
+	} while (++steps);
+
+      li->nodepos = newpos;
+    }
+
+  PIKEFUN object `+ (int steps)
+    {
+      struct object *o;
+      if (!THIS->l) Pike_error ("Iterator not initialized.\n");
+      o = low_clone (multiset_iterator_program);
+      add_ref (OBJ2_MULTISET_ITERATOR (o)->l = THIS->l);
+      if ((OBJ2_MULTISET_ITERATOR (o)->nodepos = THIS->nodepos) >= 0)
+	add_msnode_ref (OBJ2_MULTISET_ITERATOR (o)->l);
+      li_step (OBJ2_MULTISET_ITERATOR (o), steps);
+      RETURN o;
+    }
+
+  PIKEFUN object `+= (int steps)
+  {
+    if (!THIS->l) Pike_error ("Iterator not initialized.\n");
+    li_step (THIS, steps);
+    REF_RETURN Pike_fp->current_object;
+  }
+
+  PIKEFUN int first()
+    {
+      if (!THIS->l) Pike_error ("Iterator not initialized.\n");
+      if (THIS->nodepos >= 0) sub_msnode_ref (THIS->l);
+      THIS->nodepos = multiset_first (THIS->l);
+      if (THIS->nodepos >= 0) RETURN 1;
+      THIS->nodepos = MSNODE_AFTER_END;
+      RETURN 0;
+    }
+
+  /* Hubbe: Should this really be destructive ??
+   * I let this question stand; I'm only adapting multiset_iterator. /mast */
+  PIKEFUN object _random()
+    {
+      if (!THIS->l) Pike_error ("Iterator not initialized.\n");
+      if (THIS->nodepos >= 0) {
+	sub_msnode_ref (THIS->l);
+	THIS->nodepos = MSNODE_BEFORE_BEG;
+      }
+      if (!multiset_is_empty (THIS->l))
+	THIS->nodepos =
+	  multiset_get_nth (THIS->l, my_rand() % multiset_sizeof (THIS->l));
+      REF_RETURN Pike_fp->current_object;
+    }
+
+  PIKEFUN int next()
+    {
+      if (!THIS->l) Pike_error ("Iterator not initialized.\n");
+      if (THIS->nodepos >= 0) {
+	THIS->nodepos = multiset_next (THIS->l, THIS->nodepos);
+	if (THIS->nodepos >= 0) RETURN 1;
+	THIS->nodepos = MSNODE_AFTER_END;
+	sub_msnode_ref (THIS->l);
+      }
+      RETURN 0;
+    }
+
+  PIKEFUN int `!()
+    {
+      if (!THIS->l) Pike_error ("Iterator not initialized.\n");
+      RETURN THIS->nodepos < 0 || msnode_is_deleted (THIS->l, THIS->nodepos);
+    }
+
+  PIKEFUN void lock_index()
+    {
+      if (!THIS->l) Pike_error ("Iterator not initialized.\n");
+      if (!THIS->lock_index) {
+	add_ref (THIS->l->msd);
+	THIS->l->msd->noval_refs++;
+	THIS->lock_index = 1;
+      }
+    }
+
+  PIKEFUN void unlock_index()
+    {
+      if (!THIS->l) Pike_error ("Iterator not initialized.\n");
+      if (THIS->lock_index) {
+	THIS->l->msd->noval_refs--;
+	sub_ref (THIS->l->msd);
+#ifdef PIKE_DEBUG
+	if (!THIS->l->msd->refs) fatal ("msd ran out of refs unexpectedly.\n");
+#endif
+	THIS->lock_index = 0;
+      }
+    }
+
+  /* FIXME: Add more functions, e.g. insert, delete, add. */
+
+  /* FIXME: Maybe the index should be locked when the iterator is used
+   * in foreach, to behave more like the mapping iterator. */
+
+  PIKEFUN void create (multiset l)
+    {
+      if (THIS->l) Pike_error ("Multiset iterators cannot be reused.\n");
+      add_ref (THIS->l = l);
+      THIS->nodepos = MSNODE_BEFORE_BEG;
+      THIS->lock_index = 0;
+    }
+
+  INIT 
+    {
+      THIS->l = NULL;
+    }
+
+  EXIT
+    {
+      if (THIS->l) {
+	if (THIS->nodepos >= 0) sub_msnode_ref (THIS->l);
+	if (THIS->lock_index) {
+	  THIS->l->msd->noval_refs--;
+	  sub_ref (THIS->l->msd);
+#ifdef PIKE_DEBUG
+	  if (!THIS->l->msd->refs) fatal ("msd ran out of refs unexpectedly.\n");
+#endif
+	}
+	free_multiset (THIS->l);
+	THIS->l = NULL;
+      }
+    }
+};
+
+#else  /* PIKE_NEW_MULTISETS */
+
 PIKECLASS multiset_iterator
 {
   CVAR int pos;
@@ -280,7 +471,7 @@ PIKECLASS multiset_iterator
 
   PIKEFUN object `+(int steps)
     {
-      struct object *o=low_clone(array_iterator_program);
+      struct object *o=low_clone(multiset_iterator_program);
       OBJ2_MULTISET_ITERATOR(o)[0]=*THIS;
       add_ref(THIS->a);
       OBJ2_MULTISET_ITERATOR(o)->pos+=steps;
@@ -323,7 +514,7 @@ PIKECLASS multiset_iterator
   PIKEFUN void create(multiset m)
     {
       if(THIS->a)
-	Pike_error("Array iterators cannot be reused.\n");
+	Pike_error("Multiset iterators cannot be reused.\n");
       
       add_ref(THIS->a=m->ind);
     }
@@ -341,6 +532,8 @@ PIKECLASS multiset_iterator
     
 };
 
+#endif	/* PIKE_NEW_MULTISETS */
+
 PIKECLASS string_iterator
 {
   CVAR int pos;
@@ -1029,7 +1222,12 @@ PIKECLASS string_split_iterator
 	if (split_set->type == T_ARRAY) {
 	  a = split_set->u.array;
 	} else if (split_set->type == T_MULTISET) {
+#ifdef PIKE_NEW_MULTISETS
+	  a = multiset_indices (split_set->u.multiset);
+	  push_array (a);
+#else
 	  a = split_set->u.multiset->ind;
+#endif
 	} else {
 	  SIMPLE_BAD_ARG_ERROR("String.split", 2,
 			       "int|array(int)|multiset(int)");
@@ -1049,6 +1247,9 @@ PIKECLASS string_split_iterator
 	  THIS->split_set[i] = a->item[i].u.integer;
 	}
 	THIS->split_set_size = a->size;
+#ifdef PIKE_NEW_MULTISETS
+	if (split_set->type == T_MULTISET) pop_stack();
+#endif
       }
       add_ref(THIS->buffer = buffer);
       if (args > 2) {
@@ -1400,6 +1601,38 @@ int foreach_iterate(struct object *o)
     } else if(o->prog == multiset_iterator_program)
     {
       struct multiset_iterator_struct *i=OBJ2_MULTISET_ITERATOR(o);
+
+#ifdef PIKE_NEW_MULTISETS
+      struct svalue ind;
+
+      switch (i->nodepos) {
+	case MSNODE_BEFORE_BEG:
+	  i->nodepos = multiset_first (i->l);
+	  if (i->nodepos < 0) {
+	    i->nodepos = MSNODE_AFTER_END;
+	    return 0;
+	  }
+	  break;
+
+	default:
+	  i->nodepos = multiset_next (i->l, i->nodepos);
+	  if (i->nodepos >= 0) break;
+	  sub_msnode_ref (i->l);
+	  i->nodepos = MSNODE_AFTER_END;
+	  /* FALL THROUGH */
+
+	case MSNODE_AFTER_END:
+	  return 0;
+      }
+
+      if (Pike_sp[-4].type != T_INT)
+	assign_lvalue (Pike_sp - 4, use_multiset_index (i->l, i->nodepos, ind));
+      if (Pike_sp[-2].type != T_INT)
+	assign_lvalue (Pike_sp - 2, get_multiset_value (i->l, i->nodepos));
+
+      return 1;
+
+#else  /* PIKE_NEW_MULTISETS */
       if(i->pos < i->a->size)
       {
 	if(Pike_sp[-4].type != T_INT)
@@ -1417,6 +1650,8 @@ int foreach_iterate(struct object *o)
       }else{
 	return 0;
       }
+#endif	/* PIKE_NEW_MULTISETS */
+
     } else if(o->prog == string_iterator_program)
     {
       struct string_iterator_struct *i=OBJ2_STRING_ITERATOR(o);
diff --git a/src/main.c b/src/main.c
index 334897703a8c281b71ea1f495a2110b48d9be4e5..94209eff28555db6803c9eb59baa90d2653d02a6 100644
--- a/src/main.c
+++ b/src/main.c
@@ -5,7 +5,7 @@
 \*/
 /**/
 #include "global.h"
-RCSID("$Id: main.c,v 1.137 2001/09/24 14:41:37 grubba Exp $");
+RCSID("$Id: main.c,v 1.138 2001/12/10 02:08:15 mast Exp $");
 #include "fdlib.h"
 #include "backend.h"
 #include "module.h"
@@ -235,7 +235,9 @@ int dbm_main(int argc, char **argv)
     init_node_s_blocks();
     init_object_blocks();
     init_callback_blocks();
-    init_rbtree();
+#ifdef PIKE_NEW_MULTISETS
+    init_multiset();
+#endif
   }
 
 #ifdef SHARED_NODES
@@ -668,6 +670,11 @@ int dbm_main(int argc, char **argv)
   call_callback(& post_master_callbacks, 0);
   free_callback_list(& post_master_callbacks);
 
+#ifdef TEST_MULTISET
+  /* A C-level testsuite for the low level stuff in multisets. */
+  test_multiset();
+#endif
+
   TRACE((stderr, "Call master->_main()...\n"));
   
   if(SETJMP(back))
@@ -943,7 +950,9 @@ void low_exit_main(void)
   free_all_object_blocks();
   first_program=0;
   free_all_program_blocks();
-  exit_rbtree();
+#ifdef PIKE_NEW_MULTISETS
+  exit_multiset();
+#endif
 #endif
 }
 
diff --git a/src/multiset.c b/src/multiset.c
index 8588c2dbb04348c0cf842d19f8eebb4d5ab03a93..117b73237fbfc4f29128b941fbb6b52eaa749022 100644
--- a/src/multiset.c
+++ b/src/multiset.c
@@ -1,9 +1,5156 @@
+#include "global.h"
+
+#ifdef PIKE_NEW_MULTISETS
+
+/* Multisets using rbtree.
+ *
+ * Created by Martin Stjernholm 2001-05-07
+ */
+
+RCSID("$Id: multiset.c,v 1.40 2001/12/10 02:08:16 mast Exp $");
+
+#include "builtin_functions.h"
+#include "gc.h"
+#include "interpret.h"
+#include "multiset.h"
+#include "object.h"
+#include "opcodes.h"
+#include "pike_error.h"
+#include "rbtree_low.h"
+#include "security.h"
+#include "svalue.h"
+
+#include "block_alloc.h"
+
+#include <assert.h>
+
+/* The following defines the allocation policy. It's almost the same
+ * as for mappings. */
+#define ALLOC_SIZE(size) ((size) ? (size) + 4 : 0)
+#define ENLARGE_SIZE(size) (((size) << 1) + 4)
+#define DO_SHRINK(msd, extra) ((((msd)->size + extra) << 2) + 4 <= (msd)->allocsize)
+
+#if defined (PIKE_DEBUG) || defined (TEST_MULTISET)
+static void debug_dump_ind_data (struct msnode_ind *node,
+				 struct multiset_data *msd);
+static void debug_dump_indval_data (struct msnode_indval *node,
+				    struct multiset_data *msd);
+DECLSPEC(noreturn) static void debug_multiset_fatal (
+  struct multiset *l, const char *fmt, ...) ATTRIBUTE((noreturn, format (printf, 2, 3)));
+#define multiset_fatal (fprintf (stderr, "%s:%d: Fatal in multiset: ", \
+				 __FILE__, __LINE__), debug_multiset_fatal)
+#endif
+
+#ifdef PIKE_DEBUG
+
+/* To get good type checking. */
+static inline union msnode **msnode_ptr_check (union msnode **x)
+  {return x;}
+static inline struct msnode_ind *msnode_ind_check (struct msnode_ind *x)
+  {return x;}
+static inline struct msnode_indval *msnode_indval_check (struct msnode_indval *x)
+  {return x;}
+
+#define sub_extra_ref(X) do {						\
+    if (!sub_ref (X)) fatal ("Got zero refs to " #X " unexpectedly.\n"); \
+  } while (0)
+
+#else
+
+#define msnode_ptr_check(X) ((union msnode **) (X))
+#define msnode_ind_check(X) ((struct msnode_ind *) (X))
+#define msnode_indval_check(X) ((struct msnode_indval *) (X))
+
+#define sub_extra_ref(X) do {sub_ref (X);} while (0)
+
+#endif
+
+/* #define MULTISET_ALLOC_DEBUG */
+#ifdef MULTISET_ALLOC_DEBUG
+#define ALLOC_TRACE(X) X
+#else
+#define ALLOC_TRACE(X)
+#endif
+
+/* #define MULTISET_CMP_DEBUG */
+#if defined (MULTISET_CMP_DEBUG) && defined (PIKE_DEBUG)
+
+#define INTERNAL_CMP(A, B, CMP_RES) do {				\
+    struct svalue *_cmp_a_ = (A);					\
+    struct svalue *_cmp_b_ = (B);					\
+    int _cmp_res_;							\
+    if (t_flag) {							\
+      fputs ("internal cmp ", stderr);					\
+      print_svalue (stderr, _cmp_a_);					\
+      fputs (" <=> ", stderr);						\
+      print_svalue (stderr, _cmp_b_);					\
+      fputs (": ", stderr);						\
+    }									\
+    _cmp_res_ = (CMP_RES) = set_svalue_cmpfun (_cmp_a_, _cmp_b_);	\
+    if (t_flag)								\
+      fprintf (stderr, "%d\n", _cmp_res_);				\
+  } while (0)
+
+#define EXTERNAL_CMP(CMP_LESS) do {					\
+    if (t_flag) {							\
+      fputs ("external cmp ", stderr);					\
+      print_svalue (stderr, sp - 2);					\
+      fputs (" <=> ", stderr);						\
+      print_svalue (stderr, sp - 1);					\
+      fputs (": ", stderr);						\
+    }									\
+    apply_svalue (CMP_LESS, 2);						\
+    if (t_flag) {							\
+      print_svalue (stderr, sp - 1);					\
+      fputc ('\n', stderr);						\
+    }									\
+  } while (0)
+
+#else
+
+#define INTERNAL_CMP(A, B, CMP_RES) do {				\
+    (CMP_RES) = set_svalue_cmpfun (A, B);				\
+  } while (0)
+
+#define EXTERNAL_CMP(CMP_LESS) do {					\
+    apply_svalue (CMP_LESS, 2);						\
+  } while (0)
+
+#endif
+
+#define SAME_CMP_LESS(MSD_A, MSD_B)					\
+  ((MSD_A)->cmp_less.type == T_INT ?					\
+   (MSD_B)->cmp_less.type == T_INT :					\
+   is_identical (&(MSD_A)->cmp_less, &(MSD_B)->cmp_less))
+
+#define HDR(NODE) ((struct rb_node_hdr *) msnode_check (NODE))
+#define PHDR(NODEPTR) ((struct rb_node_hdr **) msnode_ptr_check (NODEPTR))
+#define RBNODE(NODE) ((union msnode *) rb_node_check (NODE))
+#define INODE(NODE) ((union msnode *) msnode_ind_check (NODE))
+#define IVNODE(NODE) ((union msnode *) msnode_indval_check (NODE))
+
+#define NEXT_FREE(NODE) INODE (msnode_check (NODE)->i.next)
+#define SET_NEXT_FREE(NODE, NEXT)					\
+  (msnode_check (NODE)->i.next = (struct msnode_ind *) msnode_check (NEXT))
+
+#define DELETED_PREV(NODE) INODE (msnode_check (NODE)->i.prev)
+#define DELETED_NEXT(NODE) ((union msnode *) msnode_check (NODE)->i.ind.u.ptr)
+
+#define NODE_AT(MSD, TYPE, POS) ((struct TYPE *) &(MSD)->nodes + (POS))
+#define NODE_OFFSET(TYPE, POS)						\
+  ((size_t) NODE_AT ((struct multiset_data *) NULL, TYPE, POS))
+
+#define SHIFT_PTR(PTR, FROM, TO) ((char *) (PTR) - (char *) (FROM) + (char *) (TO))
+#define SHIFT_NODEPTR(NODEPTR, FROM_MSD, TO_MSD)			\
+  ((union msnode *) SHIFT_PTR (msnode_check (NODEPTR), FROM_MSD, TO_MSD))
+#define SHIFT_HDRPTR(HDRPTR, FROM_MSD, TO_MSD)				\
+  ((struct rb_node_hdr *) SHIFT_PTR (rb_node_check (HDRPTR), FROM_MSD, TO_MSD))
+
+#define COPY_NODE_PTRS(OLD, OLDBASE, NEW, NEWBASE, TYPE) do {		\
+    (NEW)->prev = (OLD)->prev ?						\
+      (struct TYPE *) SHIFT_PTR ((OLD)->prev, OLDBASE, NEWBASE) : NULL;	\
+    (NEW)->next = (OLD)->next ?						\
+      (struct TYPE *) SHIFT_PTR ((OLD)->next, OLDBASE, NEWBASE) : NULL;	\
+  } while (0)
+
+#define COPY_DELETED_PTRS_EXTRA(OLD, OLDBASE, NEW, NEWBASE) do {	\
+    (NEW)->ind.u.ptr = (OLD)->ind.u.ptr ?				\
+      SHIFT_PTR ((OLD)->ind.u.ptr, OLDBASE, NEWBASE) : NULL;		\
+  } while (0)
+
+#define COPY_NODE_IND(OLD, NEW, TYPE) do {				\
+    (NEW)->ind = (OLD)->ind;						\
+    (NEW)->ind.type &= ~MULTISET_FLAG_MASK;				\
+    add_ref_svalue (&(NEW)->ind);					\
+    (NEW)->ind.type = (OLD)->ind.type;					\
+  } while (0)
+
+#define EXPAND_ARG(X) X
+#define IGNORE_ARG(X)
+
+#define DO_WITH_NODES(MSD) do {						\
+    if ((MSD)->flags & MULTISET_INDVAL) {				\
+      WITH_NODES_BLOCK (msnode_indval, msnode_ind, IGNORE_ARG, EXPAND_ARG); \
+    }									\
+    else {								\
+      WITH_NODES_BLOCK (msnode_ind, msnode_indval, EXPAND_ARG, IGNORE_ARG); \
+    }									\
+  } while (0)
+
+struct multiset *first_multiset = NULL;
+struct multiset *gc_internal_multiset = NULL;
+static struct multiset *gc_mark_multiset_pos = NULL;
+
+static struct multiset_data empty_ind_msd = {
+  1, 0, NULL, NULL,
+  {T_INT, 0,
+#ifdef HAVE_UNION_INIT
+   {0}
+#endif
+  },
+  0, 0, 0,
+  BIT_INT,
+  0,
+#ifdef HAVE_UNION_INIT
+  {{{0, 0, {0, 0, {0}}}}}
+#endif
+};
+
+static struct multiset_data empty_indval_msd = {
+  1, 0, NULL, NULL,
+  {T_INT, 0,
+#ifdef HAVE_UNION_INIT
+   {0}
+#endif
+  },
+  0, 0, 0,
+  0,
+  MULTISET_INDVAL,
+#ifdef HAVE_UNION_INIT
+  {{{0, 0, {0, 0, {0}}}}}
+#endif
+};
+
+struct svalue svalue_int_one = {T_INT, NUMBER_NUMBER,
+#ifdef HAVE_UNION_INIT
+				{1}
+#endif
+			       };
+
+void free_multiset_data (struct multiset_data *msd);
+
+#define INIT_MULTISET(L) do {						\
+    GC_ALLOC (L);							\
+    INITIALIZE_PROT (L);						\
+    L->refs = 1;							\
+    L->node_refs = 0;							\
+    DOUBLELINK (first_multiset, L);					\
+  } while (0)
+
+#undef EXIT_BLOCK
+#define EXIT_BLOCK(L) do {						\
+    FREE_PROT (L);							\
+    DO_IF_DEBUG (							\
+      if (L->node_refs)							\
+	fatal ("Freeing multiset with %d node refs.\n", L->node_refs);	\
+      if (L->msd->refs <= 0)						\
+	fatal ("Too few refs %ld to multiset data.\n", L->msd->refs);	\
+      if (L->msd->noval_refs)						\
+	fatal ("Freeing multiset data with %d noval_refs.\n", L->msd->noval_refs); \
+    );									\
+    if (!sub_ref (L->msd)) free_multiset_data (L->msd);			\
+    DOUBLEUNLINK (first_multiset, L);					\
+    GC_FREE (L);							\
+  } while (0)
+
+#undef COUNT_OTHER
+#define COUNT_OTHER() do {						\
+    struct multiset *l;							\
+    double datasize = 0.0;						\
+    for (l = first_multiset; l; l = l->next)				\
+      datasize += (l->msd->flags & MULTISET_INDVAL ?			\
+		   NODE_OFFSET (msnode_indval, l->msd->allocsize) :	\
+		   NODE_OFFSET (msnode_ind, l->msd->allocsize)) /	\
+	(double) l->msd->refs;						\
+    size += (int) datasize;						\
+  } while (0)
+
+BLOCK_ALLOC (multiset, 511)
+
+/* Note: The returned block has no refs. */
+static struct multiset_data *low_alloc_multiset_data (int allocsize, INT16 flags)
+{
+  struct multiset_data *msd;
+
+#ifdef PIKE_DEBUG
+  if (allocsize < 0) fatal ("Invalid alloc size %d\n", allocsize);
+#endif
+
+  msd = (struct multiset_data *) xalloc (
+    flags & MULTISET_INDVAL ?
+    NODE_OFFSET (msnode_indval, allocsize) : NODE_OFFSET (msnode_ind, allocsize));
+  msd->refs = msd->noval_refs = 0;
+  msd->root = NULL;
+  msd->allocsize = allocsize;
+  msd->size = 0;
+  msd->ind_types = 0;
+  msd->val_types = flags & MULTISET_INDVAL ? 0 : BIT_INT;
+  msd->flags = flags;
+  msd->free_list = NULL;	/* Use fix_free_list to init this. */
+
+  ALLOC_TRACE (fprintf (stderr, "%p alloced size %d\n", msd, allocsize));
+  return msd;
+}
+
+struct tree_build_data
+{
+  union msnode *list;		/* List of nodes in msd linked with rb_next() */
+  union msnode *node;		/* If set, a single extra node in msd. */
+  struct multiset_data *msd;	/* Contains tree finished so far. */
+  struct multiset *l;		/* If set, a multiset with an extra
+				 * ref (containing another msd). */
+  struct multiset_data *msd2;	/* If set, yet another msd with an extra ref. */
+};
+
+static void free_tree_build_data (struct tree_build_data *build)
+{
+  union msnode *node, *next;
+
+#define WITH_NODES_BLOCK(TYPE, OTHERTYPE, IND, INDVAL)			\
+  if ((node = build->node)) {						\
+    node->i.ind.type &= ~MULTISET_FLAG_MASK;				\
+    free_svalue (&node->i.ind);						\
+    INDVAL (free_svalue (&node->iv.val));				\
+  }									\
+  if ((node = build->list))						\
+    do {								\
+      next = low_multiset_next (node);					\
+      node->i.ind.type &= ~MULTISET_FLAG_MASK;				\
+      free_svalue (&node->i.ind);					\
+      INDVAL (free_svalue (&node->iv.val));				\
+    } while ((node = next));
+
+  DO_WITH_NODES (build->msd);
+
+#undef WITH_NODES_BLOCK
+
+  free_multiset_data (build->msd);
+  if (build->l) free_multiset (build->l);
+  if (build->msd2 && !sub_ref (build->msd2)) free_multiset_data (build->msd2);
+}
+
+void free_multiset_data (struct multiset_data *msd)
+{
+  union msnode *node, *next;
+
+#ifdef PIKE_DEBUG
+  if (msd->refs)
+    fatal ("Attempt to free multiset_data with refs.\n");
+  if (msd->noval_refs)
+    fatal ("There are forgotten noval_refs.\n");
+#endif
+
+  /* We trust as few values as possible here, e.g. size and
+   * free_list are ignored. */
+
+  GC_FREE_BLOCK (msd);
+
+  free_svalue (&msd->cmp_less);
+
+  if ((node = low_multiset_first (msd))) {
+    /* Note: Can't check for MULTISET_FLAG_MARKER here; see e.g. the
+     * error recovery case in mkmultiset_2. */
+#define WITH_NODES_BLOCK(TYPE, OTHERTYPE, IND, INDVAL)			\
+    do {								\
+      next = low_multiset_next (node);					\
+      node->i.ind.type &= ~MULTISET_FLAG_MASK;				\
+      free_svalue (&node->i.ind);					\
+      INDVAL (free_svalue (&node->iv.val));				\
+    } while ((node = next));
+
+    DO_WITH_NODES (msd);
+
+#undef WITH_NODES_BLOCK
+  }
+
+  ALLOC_TRACE (fprintf (stderr, "%p free\n", msd));
+  xfree (msd);
+}
+
+static void free_indirect_multiset_data (struct multiset_data **pmsd)
+{
+  if (*pmsd && !sub_ref (*pmsd)) free_multiset_data (*pmsd);
+}
+
+struct recovery_data
+{
+  struct multiset_data *a_msd;	/* If nonzero, it's freed by free_recovery_data */
+  struct multiset_data *b_msd;	/* If nonzero, it's freed by free_recovery_data */
+};
+
+static void free_recovery_data (struct recovery_data *rd)
+{
+  if (rd->a_msd && !sub_ref (rd->a_msd)) free_multiset_data (rd->a_msd);
+  if (rd->b_msd && !sub_ref (rd->b_msd)) free_multiset_data (rd->b_msd);
+}
+
+/* Links the nodes from and including first_free to the end of the
+ * node block onto (the beginning of) msd->free_list. */
+static void fix_free_list (struct multiset_data *msd, int first_free)
+{
+  int alloclast = msd->allocsize - 1;
+
+  if (first_free <= alloclast) {
+    union msnode *orig_free_list = msd->free_list;
+
+#define WITH_NODES_BLOCK(TYPE, OTHERTYPE, IND, INDVAL)			\
+    msd->free_list = (union msnode *) NODE_AT (msd, TYPE, first_free);	\
+    for (; first_free < alloclast; first_free++) {			\
+      SET_NEXT_FREE ((union msnode *) NODE_AT (msd, TYPE, first_free),	\
+		     (union msnode *) NODE_AT (msd, TYPE, first_free + 1)); \
+      /* By setting prev to NULL we avoid shifting around garbage in */	\
+      /* COPY_NODE_PTRS. */						\
+      NODE_AT (msd, TYPE, first_free)->prev = NULL;			\
+      NODE_AT (msd, TYPE, first_free)->ind.type = PIKE_T_UNKNOWN;	\
+    }									\
+    SET_NEXT_FREE ((union msnode *) NODE_AT (msd, TYPE, first_free),	\
+		   orig_free_list);					\
+    NODE_AT (msd, TYPE, first_free)->prev = NULL;			\
+    NODE_AT (msd, TYPE, first_free)->ind.type = PIKE_T_UNKNOWN;
+
+    DO_WITH_NODES (msd);
+
+#undef WITH_NODES_BLOCK
+  }
+}
+
+#define CLEAR_DELETED_ON_FREE_LIST(MSD) do {				\
+    union msnode *node = (MSD)->free_list;				\
+    for (; node && node->i.ind.type == T_DELETED; node = NEXT_FREE (node)) { \
+      node->i.prev = NULL;						\
+      node->i.ind.type = PIKE_T_UNKNOWN;				\
+      (MSD)->size--;							\
+    }									\
+  } while (0)
+
+/* The copy has no refs. The copy is verbatim, i.e. the relative node
+ * positions are kept. */
+static struct multiset_data *copy_multiset_data (struct multiset_data *old)
+{
+  /* Note approximate code duplication in resize_multiset_data and
+   * multiset_set_flags. */
+
+  int pos = old->allocsize;
+  struct multiset_data *new = low_alloc_multiset_data (pos, old->flags);
+  assign_svalue_no_free (&new->cmp_less, &old->cmp_less);
+  new->ind_types = old->ind_types;
+  new->val_types = old->val_types;
+
+#define WITH_NODES_BLOCK(TYPE, OTHERTYPE, IND, INDVAL)			\
+  struct TYPE *onode, *nnode;						\
+  while (pos-- > 0) {							\
+    onode = NODE_AT (old, TYPE, pos);					\
+    nnode = NODE_AT (new, TYPE, pos);					\
+    COPY_NODE_PTRS (onode, old, nnode, new, TYPE);			\
+    switch (onode->ind.type) {						\
+      case T_DELETED:							\
+	COPY_DELETED_PTRS_EXTRA (onode, old, nnode, new);		\
+	/* FALL THROUGH */						\
+      case PIKE_T_UNKNOWN:						\
+	nnode->ind.type = onode->ind.type;				\
+	break;								\
+      default:								\
+	COPY_NODE_IND (onode, nnode, TYPE);				\
+	INDVAL (assign_svalue_no_free (&nnode->val, &onode->val));	\
+    }									\
+  }
+
+  DO_WITH_NODES (new);
+
+#undef WITH_NODES_BLOCK
+
+  if (old->free_list) new->free_list = SHIFT_NODEPTR (old->free_list, old, new);
+  if (old->root) new->root = SHIFT_NODEPTR (old->root, old, new);
+  new->size = old->size;
+
+  ALLOC_TRACE (fprintf (stderr, "%p -> %p: copied, alloc size %d, data size %d\n",
+			old, new, new->allocsize, new->size));
+  return new;
+}
+
+/* The first part of the new data block is a verbatim copy of the old
+ * one if verbatim is nonzero. This mode also handles link structures
+ * that aren't proper trees. If verbatim is zero, the tree is
+ * rebalanced, since the operation is already linear. The copy has no
+ * refs.
+ *
+ * The resize does not change the refs in referenced svalues, so the
+ * old block is always freed. The refs and noval_refs are transferred
+ * to the new block. */
+static struct multiset_data *resize_multiset_data (struct multiset_data *old,
+						   int newsize, int verbatim)
+{
+  struct multiset_data *new;
+
+#ifdef PIKE_DEBUG
+  if (old->refs > 1)
+    fatal ("Attempt to resize multiset_data with several refs.\n");
+  if (verbatim) {
+    if (newsize < old->allocsize)
+      fatal ("Cannot shrink multiset_data (from %d to %d) in verbatim mode.\n",
+	     old->allocsize, newsize);
+  }
+  else
+    if (newsize < old->size)
+      fatal ("Cannot resize multiset_data with %d elements to %d.\n",
+	     old->size, newsize);
+  if (newsize == old->allocsize)
+    fatal ("Unnecessary resize of multiset_data to same size.\n");
+#endif
+
+  /* Note approximate code duplication in copy_multiset_data and
+   * multiset_set_flags. */
+
+  new = low_alloc_multiset_data (newsize, old->flags);
+  dmalloc_touch_svalue (&old->cmp_less);
+  new->cmp_less = old->cmp_less;
+  dmalloc_touch_svalue (&new->cmp_less);
+  new->ind_types = old->ind_types;
+  new->val_types = old->val_types;
+
+  if (verbatim) {
+    int pos = old->allocsize;
+    fix_free_list (new, pos);
+
+#define WITH_NODES_BLOCK(TYPE, OTHERTYPE, IND, INDVAL)			\
+    struct TYPE *oldnodes = (struct TYPE *) old->nodes;			\
+    struct TYPE *newnodes = (struct TYPE *) new->nodes;			\
+    struct TYPE *onode, *nnode;						\
+    while (pos-- > 0) {							\
+      onode = NODE_AT (old, TYPE, pos);					\
+      nnode = NODE_AT (new, TYPE, pos);					\
+      COPY_NODE_PTRS (onode, old, nnode, new, TYPE);			\
+      switch (onode->ind.type) {					\
+	case T_DELETED:							\
+	  COPY_DELETED_PTRS_EXTRA (onode, old, nnode, new);		\
+	  /* FALL THROUGH */						\
+	  case PIKE_T_UNKNOWN:						\
+	  nnode->ind.type = onode->ind.type;				\
+	  break;							\
+	default:							\
+	  nnode->ind = onode->ind;					\
+	  INDVAL (nnode->val = onode->val);				\
+      }									\
+    }
+
+    DO_WITH_NODES (new);
+
+#undef WITH_NODES_BLOCK
+
+    if (old->free_list) {
+      union msnode *list = SHIFT_NODEPTR (old->free_list, old, new);
+      union msnode *node, *next;
+      for (node = list; (next = NEXT_FREE (node)); node = next) {}
+      SET_NEXT_FREE (node, new->free_list);
+      new->free_list = list;
+    }
+    if (old->root) new->root = SHIFT_NODEPTR (old->root, old, new);
+    new->size = old->size;
+  }
+
+  else {
+    union msnode *oldnode;
+    if ((oldnode = low_multiset_first (old))) {
+      int pos = 0;
+
+#define WITH_NODES_BLOCK(TYPE, OTHERTYPE, IND, INDVAL)			\
+      struct TYPE *node;						\
+      while (1) {							\
+	node = NODE_AT (new, TYPE, pos);				\
+	node->ind = oldnode->i.ind;					\
+	INDVAL (node->val = oldnode->iv.val);				\
+	if (!(oldnode = low_multiset_next (oldnode))) break;		\
+	node->next = NODE_AT (new, TYPE, ++pos);			\
+      }									\
+      NODE_AT (new, TYPE, pos)->next = NULL;
+
+      DO_WITH_NODES (new);
+
+#undef WITH_NODES_BLOCK
+
+      new->size = ++pos;
+      fix_free_list (new, pos);
+      new->root = RBNODE (rb_make_tree (HDR (new->nodes), pos));
+    }
+    else
+      fix_free_list (new, 0);
+  }
+
+  ALLOC_TRACE (fprintf (stderr, "%p -> %p: resized from %d to %d, data size %d (%s)\n",
+			old, new, old->allocsize, new->allocsize, new->size,
+			verbatim ? "verbatim" : "rebalance"));
+
+  new->refs = old->refs;
+  new->noval_refs = old->noval_refs;
+
+  /* No longer contains any references to account for, thus a simple
+   * free. */
+  GC_FREE_SIMPLE_BLOCK (old);
+  xfree (old);
+
+  return new;
+}
+
+#define MOVE_MSD_REF(L, MSD) do {					\
+    sub_extra_ref (MSD);						\
+    add_ref ((MSD) = (L)->msd);						\
+  } while (0)
+
+#define MOVE_MSD_REF_AND_FREE(L, MSD) do {				\
+    if (!sub_ref (MSD)) free_multiset_data (MSD);			\
+    add_ref ((MSD) = (L)->msd);						\
+  } while (0)
+
+/* There are several occasions when we might get "inflated" msd
+ * blocks, i.e. ones that are larger than the allocation strategy
+ * allows. This happens e.g. when combining node references with
+ * shared data blocks, and when the gc removes nodes in shared data
+ * blocks. Therefore all the copy-on-write functions tries to shrink
+ * them. */
+
+int prepare_for_change (struct multiset *l, int verbatim)
+{
+  struct multiset_data *msd = l->msd;
+  int msd_changed = 0;
+
+#ifdef PIKE_DEBUG
+  if (!verbatim && l->node_refs)
+    fatal ("The verbatim flag not set for multiset with node refs.\n");
+#endif
+
+  if (msd->refs > 1) {
+    l->msd = copy_multiset_data (msd);
+    MOVE_MSD_REF (l, msd);
+    msd_changed = 1;
+    if (!l->node_refs)
+      /* Look at l->node_refs and not verbatim here, since when
+       * verbatim is nonzero while l->node_refs is zero, we're only
+       * interested in keeping the tree structure for the allocated
+       * nodes. */
+      CLEAR_DELETED_ON_FREE_LIST (msd);
+  }
+
+  if (!verbatim && DO_SHRINK (msd, 0)) {
+#ifdef PIKE_DEBUG
+    if (d_flag > 1) check_multiset (l);
+#endif
+    l->msd = resize_multiset_data (msd, ALLOC_SIZE (msd->size), 0);
+    msd_changed = 1;
+  }
+
+  return msd_changed;
+}
+
+int prepare_for_add (struct multiset *l, int verbatim)
+{
+  struct multiset_data *msd = l->msd;
+  int msd_changed = 0;
+
+#ifdef PIKE_DEBUG
+  if (!verbatim && l->node_refs)
+    fatal ("The verbatim flag not set for multiset with node refs.\n");
+#endif
+
+  if (msd->refs > 1) {
+    l->msd = copy_multiset_data (msd);
+    MOVE_MSD_REF (l, msd);
+    msd_changed = 1;
+    if (!l->node_refs) CLEAR_DELETED_ON_FREE_LIST (msd);
+  }
+
+  if (msd->size == msd->allocsize) {
+    /* Can't call check_multiset here, since it might not even be a
+     * proper tree in verbatim mode. */
+    l->msd = resize_multiset_data (msd, ENLARGE_SIZE (msd->allocsize), verbatim);
+    msd_changed = 1;
+  }
+  else if (!verbatim && DO_SHRINK (msd, 1)) {
+#ifdef PIKE_DEBUG
+    if (d_flag > 1) check_multiset (l);
+#endif
+    l->msd = resize_multiset_data (msd, ALLOC_SIZE (msd->size + 1), 0);
+    msd_changed = 1;
+  }
+
+  return msd_changed;
+}
+
+int prepare_for_value_change (struct multiset *l, int verbatim)
+{
+  struct multiset_data *msd = l->msd;
+  int msd_changed = 0;
+
+#ifdef PIKE_DEBUG
+  if (!verbatim && l->node_refs)
+    fatal ("The verbatim flag not set for multiset with node refs.\n");
+#endif
+
+  /* Assume that the caller holds a value lock. */
+  if (msd->refs - msd->noval_refs > 1) {
+    l->msd = copy_multiset_data (msd);
+    MOVE_MSD_REF (l, msd);
+    msd_changed = 1;
+    if (!l->node_refs) CLEAR_DELETED_ON_FREE_LIST (msd);
+  }
+
+  if (!verbatim && DO_SHRINK (msd, 0)) {
+#ifdef PIKE_DEBUG
+    if (d_flag > 1) check_multiset (l);
+#endif
+    l->msd = resize_multiset_data (msd, ALLOC_SIZE (msd->size), 0);
+    msd_changed = 1;
+  }
+
+  return msd_changed;
+}
+
+static union msnode *alloc_msnode_verbatim (struct multiset_data *msd)
+{
+  union msnode *node = msd->free_list;
+#ifdef PIKE_DEBUG
+  if (!node) fatal ("Verbatim multiset data block unexpectedly full.\n");
+#endif
+
+  if (node->i.ind.type == T_DELETED) {
+    union msnode *prev;
+    do {
+      prev = node;
+      node = NEXT_FREE (node);
+#ifdef PIKE_DEBUG
+      if (!node) fatal ("Verbatim multiset data block unexpectedly full.\n");
+#endif
+    } while (node->i.ind.type == T_DELETED);
+    SET_NEXT_FREE (prev, NEXT_FREE (node));
+  }
+  else
+    msd->free_list = NEXT_FREE (node);
+
+  msd->size++;
+  return node;
+}
+
+#define ALLOC_MSNODE(MSD, GOT_NODE_REFS, NODE) do {			\
+    if (GOT_NODE_REFS)							\
+      (NODE) = alloc_msnode_verbatim (MSD);				\
+    else {								\
+      (NODE) = (MSD)->free_list;					\
+      DO_IF_DEBUG (if (!(NODE)) fatal ("Multiset data block unexpectedly full.\n")); \
+      (MSD)->free_list = NEXT_FREE (NODE);				\
+      (MSD)->size++;							\
+    }									\
+  } while (0)
+
+#define ADD_TO_FREE_LIST(MSD, NODE) do {				\
+    SET_NEXT_FREE (NODE, (MSD)->free_list);				\
+    (MSD)->free_list = (NODE);						\
+  } while (0)
+
+static void unlink_msnode (struct multiset *l, struct rbstack_ptr *track,
+			   int keep_rbstack)
+{
+  struct multiset_data *msd = l->msd;
+  struct rbstack_ptr rbstack = *track;
+  union msnode *unlinked_node;
+
+  if (prepare_for_change (l, 1)) {
+    rbstack_shift (rbstack, HDR (msd->nodes), HDR (l->msd->nodes));
+    msd = l->msd;
+  }
+
+  /* Note: Similar code in gc_unlink_node_shared. */
+
+  if (l->node_refs) {
+    union msnode *prev, *next;
+    unlinked_node = RBNODE (RBSTACK_PEEK (rbstack));
+    prev = low_multiset_prev (unlinked_node);
+    next = low_multiset_next (unlinked_node);
+    low_rb_unlink_without_move (PHDR (&msd->root), &rbstack, keep_rbstack);
+    ADD_TO_FREE_LIST (msd, unlinked_node);
+    unlinked_node->i.ind.type = T_DELETED;
+    unlinked_node->i.prev = (struct msnode_ind *) prev;
+    unlinked_node->i.ind.u.ptr = next;
+  }
+
+  else {
+    unlinked_node =
+      RBNODE (low_rb_unlink_with_move (
+		PHDR (&msd->root), &rbstack, keep_rbstack,
+		msd->flags & MULTISET_INDVAL ?
+		sizeof (struct msnode_indval) : sizeof (struct msnode_ind)));
+    ADD_TO_FREE_LIST (msd, unlinked_node);
+    unlinked_node->i.ind.type = PIKE_T_UNKNOWN;
+    unlinked_node->i.prev = NULL;
+    msd->size--;
+    if (!keep_rbstack && DO_SHRINK (msd, 0)) {
+#ifdef PIKE_DEBUG
+      if (d_flag > 1) check_multiset (l);
+#endif
+      l->msd = resize_multiset_data (msd, ALLOC_SIZE (msd->size), 0);
+    }
+  }
+
+  *track = rbstack;
+}
+
+PMOD_EXPORT void multiset_clear_node_refs (struct multiset *l)
+{
+  struct multiset_data *msd = l->msd;
+
+  debug_malloc_touch (l);
+  debug_malloc_touch (msd);
+  assert (!l->node_refs);
+  assert (msd->refs == 1);
+
+  CLEAR_DELETED_ON_FREE_LIST (msd);
+  if (DO_SHRINK (msd, 0)) {
+#ifdef PIKE_DEBUG
+    if (d_flag > 1) check_multiset (l);
+#endif
+    l->msd = resize_multiset_data (msd, ALLOC_SIZE (msd->size), 0);
+  }
+}
+
+PMOD_EXPORT INT32 multiset_sizeof (struct multiset *l)
+{
+  INT32 size = l->msd->size;
+  union msnode *node = l->msd->free_list;
+  for (; node && node->i.ind.type == T_DELETED; node = NEXT_FREE (node))
+    size--;
+  return size;
+}
+
+PMOD_EXPORT struct multiset *allocate_multiset (int allocsize,
+						int flags,
+						struct svalue *cmp_less)
+{
+  struct multiset *l = alloc_multiset();
+
+#ifdef PIKE_DEBUG
+  if (cmp_less) check_svalue (cmp_less);
+#endif
+
+  if (allocsize || cmp_less || (flags & ~MULTISET_INDVAL)) {
+    l->msd = low_alloc_multiset_data (allocsize, flags);
+    add_ref (l->msd);
+    fix_free_list (l->msd, 0);
+    if (cmp_less) assign_svalue_no_free (&l->msd->cmp_less, cmp_less);
+    else l->msd->cmp_less.type = T_INT;
+  }
+  else {
+    l->msd = flags & MULTISET_INDVAL ? &empty_indval_msd : &empty_ind_msd;
+    add_ref (l->msd);
+  }
+
+  INIT_MULTISET (l);
+  return l;
+}
+
+PMOD_EXPORT void do_free_multiset (struct multiset *l)
+{
+  if (l) {
+    debug_malloc_touch (l);
+    debug_malloc_touch (l->msd);
+    free_multiset (l);
+  }
+}
+
+PMOD_EXPORT const char msg_multiset_no_node_refs[] = "Multiset got no node refs.\n";
+
+PMOD_EXPORT void do_sub_msnode_ref (struct multiset *l)
+{
+  if (l) {
+    debug_malloc_touch (l);
+    debug_malloc_touch (l->msd);
+    sub_msnode_ref (l);
+  }
+}
+
+enum find_types {
+  FIND_EQUAL,
+  FIND_NOEQUAL,
+  FIND_LESS,
+  FIND_GREATER,
+  FIND_NOROOT,
+  FIND_DESTRUCTED
+};
+
+static enum find_types low_multiset_find_le_gt (
+  struct multiset_data *msd, struct svalue *key, union msnode **found);
+static enum find_types low_multiset_find_lt_ge (
+  struct multiset_data *msd, struct svalue *key, union msnode **found);
+static enum find_types low_multiset_track_eq (
+  struct multiset_data *msd, struct svalue *key, struct rbstack_ptr *track);
+static enum find_types low_multiset_track_le_gt (
+  struct multiset_data *msd, struct svalue *key, struct rbstack_ptr *track);
+
+static void midflight_remove_node (struct multiset *l,
+				   struct multiset_data **pmsd,
+				   union msnode *node)
+{
+  /* If the node index is destructed, we could in principle ignore the
+   * copy-on-write here and remove it in all copies, but then we'd
+   * have to find another way than (l->msd != msd) to signal the tree
+   * change to the calling code. */
+  ONERROR uwp;
+  sub_ref (*pmsd);
+#ifdef PIKE_DEBUG
+  if (!(*pmsd)->refs) fatal ("Expected extra ref to passed msd.\n");
+#endif
+  *pmsd = NULL;
+  add_msnode_ref (l);
+  SET_ONERROR (uwp, do_sub_msnode_ref, l);
+  multiset_delete_node (l, MSNODE2OFF (l->msd, node));
+  UNSET_ONERROR (uwp);
+  add_ref (*pmsd = l->msd);
+}
+
+static void midflight_remove_node_fast (struct multiset *l,
+					struct rbstack_ptr *track,
+					int keep_rbstack)
+{
+  /* The note for midflight_remove_node applies here too. */
+  struct svalue ind, val;
+  union msnode *node = RBNODE (RBSTACK_PEEK (*track));
+  int indval = l->msd->flags & MULTISET_INDVAL;
+
+  /* Postpone free since the msd might be copied in unlink_node. */
+  low_use_multiset_index (node, ind);
+  if (indval) val = node->iv.val;
+
+  unlink_msnode (l, track, keep_rbstack);
+
+  free_svalue (&ind);
+  if (indval) free_svalue (&val);
+}
+
+/* Like midflight_remove_node_fast but doesn't bother with concurrent
+ * changes of the multiset or resizing of the msd. There must not be
+ * any node refs to it. */
+static void midflight_remove_node_faster (struct multiset_data *msd,
+					  struct rbstack_ptr *track)
+{
+  struct svalue ind;
+  union msnode *node = RBNODE (RBSTACK_PEEK (*track));
+
+  free_svalue (low_use_multiset_index (node, ind));
+  if (msd->flags & MULTISET_INDVAL) free_svalue (&node->iv.val);
+
+  node = RBNODE (low_rb_unlink_with_move (
+		   PHDR (&msd->root), track, 0,
+		   msd->flags & MULTISET_INDVAL ?
+		   sizeof (struct msnode_indval) : sizeof (struct msnode_ind)));
+  ADD_TO_FREE_LIST (msd, node);
+  msd->size--;
+  node->i.ind.type = PIKE_T_UNKNOWN;
+}
+
+PMOD_EXPORT void multiset_set_flags (struct multiset *l, int flags)
+{
+  struct multiset_data *old = l->msd;
+
+  debug_malloc_touch (l);
+  debug_malloc_touch (old);
+
+  if ((flags & MULTISET_INDVAL) == (old->flags & MULTISET_INDVAL)) {
+    if (flags != old->flags) {
+      prepare_for_change (l, l->node_refs);
+      l->msd->flags = flags;
+    }
+  }
+
+  else {
+    /* Almost like copy_multiset_data (and resize_multiset_data). */
+
+    int pos = old->allocsize;
+    struct multiset_data *new = low_alloc_multiset_data (pos, flags);
+    assign_svalue_no_free (&new->cmp_less, &old->cmp_less);
+    new->ind_types = old->ind_types;
+    new->val_types = old->val_types;
+
+#define SHIFT_BY_POS(OLD, OLDBASE, OLDTYPE, NEWBASE, NEWTYPE)		\
+    ((OLD) ? NODE_AT (NEWBASE, NEWTYPE, 0) + (				\
+      (struct OLDTYPE *) (OLD) - (struct OLDTYPE *) (OLDBASE)->nodes) : 0)
+
+#define WITH_NODES_BLOCK(TYPE, OTHERTYPE, IND, INDVAL)			\
+    struct OTHERTYPE *onode;						\
+    struct TYPE *nnode;							\
+    while (pos-- > 0) {							\
+      onode = NODE_AT (old, OTHERTYPE, pos);				\
+      nnode = NODE_AT (new, TYPE, pos);					\
+      /* Like COPY_NODE_PTRS, but shift by node position. */		\
+      nnode->prev = SHIFT_BY_POS (onode->prev, old, OTHERTYPE, new, TYPE); \
+      nnode->next = SHIFT_BY_POS (onode->next, old, OTHERTYPE, new, TYPE); \
+      switch (onode->ind.type) {					\
+	case T_DELETED:							\
+	  /* Like COPY_DELETED_PTRS_EXTRA, but shift by node position. */ \
+	  nnode->ind.u.ptr =						\
+	    SHIFT_BY_POS (onode->ind.u.ptr, old, OTHERTYPE, new, TYPE);	\
+	  /* FALL THROUGH */						\
+	case PIKE_T_UNKNOWN:						\
+	  nnode->ind.type = onode->ind.type;				\
+	  break;							\
+	default:							\
+	  COPY_NODE_IND (onode, nnode, TYPE);				\
+	  INDVAL (							\
+	    nnode->val.type = T_INT;					\
+	    nnode->val.u.integer = 1;					\
+	  );								\
+      }									\
+    }									\
+    new->free_list = (union msnode *)					\
+      SHIFT_BY_POS (old->free_list, old, OTHERTYPE, new, TYPE);		\
+    new->root = (union msnode *)					\
+      SHIFT_BY_POS (old->root, old, OTHERTYPE, new, TYPE);
+
+    DO_WITH_NODES (new);
+
+#undef WITH_NODES_BLOCK
+
+    new->size = old->size;
+    if (!sub_ref (old)) free_multiset_data (old);
+    add_ref (l->msd = new);
+  }
+}
+
+PMOD_EXPORT void multiset_set_cmp_less (struct multiset *l,
+					struct svalue *cmp_less)
+{
+  struct multiset_data *old = l->msd;
+
+#ifdef PIKE_DEBUG
+  debug_malloc_touch (l);
+  debug_malloc_touch (old);
+  if (cmp_less) check_svalue (cmp_less);
+#endif
+
+again:
+  if (cmp_less ?
+      is_identical (cmp_less, &old->cmp_less) : old->cmp_less.type == T_INT)
+    {}
+
+  else if (!old->root) {
+    if (prepare_for_change (l, l->node_refs)) old = l->msd;
+    free_svalue (&old->cmp_less);
+    if (cmp_less) assign_svalue_no_free (&old->cmp_less, cmp_less);
+    else old->cmp_less.type = T_INT;
+  }
+
+  else {
+    struct tree_build_data new;
+    union msnode *next;
+    struct svalue ind;
+    ONERROR uwp;
+
+    SET_ONERROR (uwp, free_tree_build_data, &new);
+
+    new.l = NULL, new.msd2 = NULL;
+    new.msd = copy_multiset_data (old);
+    new.list = low_multiset_first (new.msd);
+    new.node = NULL;
+    new.msd->root = NULL;
+    new.msd->size = 0;
+
+    free_svalue (&new.msd->cmp_less);
+    if (cmp_less) assign_svalue_no_free (&new.msd->cmp_less, cmp_less);
+    else new.msd->cmp_less.type = T_INT;
+
+    do {
+      low_use_multiset_index (new.list, ind);
+      next = low_multiset_next (new.list);
+
+      /* Note: Similar code in mkmultiset_2. */
+
+      while (1) {
+	RBSTACK_INIT (rbstack);
+
+	if (!new.msd->root) {
+	  low_rb_init_root (HDR (new.msd->root = new.list));
+	  goto node_added;
+	}
+
+	switch (low_multiset_track_le_gt (new.msd, &ind, &rbstack)) {
+	  case FIND_LESS:
+	    low_rb_link_at_next (PHDR (&new.msd->root), rbstack, HDR (new.list));
+	    goto node_added;
+	  case FIND_GREATER:
+	    low_rb_link_at_prev (PHDR (&new.msd->root), rbstack, HDR (new.list));
+	    goto node_added;
+	  case FIND_DESTRUCTED:
+	    midflight_remove_node_faster (new.msd, &rbstack);
+	    continue;
+	  default: DO_IF_DEBUG (fatal ("Invalid find_type.\n"));
+	}
+      }
+
+    node_added:
+      new.msd->size++;
+      if (l->msd != old) {
+	/* l changed. Have to start over to guarantee no loss of data. */
+	CALL_AND_UNSET_ONERROR (uwp);
+	old = l->msd;
+	goto again;
+      }
+    } while ((new.list = next));
+
+    UNSET_ONERROR (uwp);
+    if (!sub_ref (old)) free_multiset_data (old);
+    add_ref (l->msd = new.msd);
+  }
+}
+
+PMOD_EXPORT struct multiset *mkmultiset (struct array *indices)
+{
+  debug_malloc_touch (indices);
+  return mkmultiset_2 (indices, NULL, NULL);
+}
+
+/* values may be NULL to make a multiset with indices only. */
+PMOD_EXPORT struct multiset *mkmultiset_2 (struct array *indices,
+					   struct array *values,
+					   struct svalue *cmp_less)
+{
+  struct multiset *l;
+  struct tree_build_data new;
+
+#ifdef PIKE_DEBUG
+  debug_malloc_touch (indices);
+  debug_malloc_touch (values);
+  if (values && values->size != indices->size)
+    fatal ("Indices and values not of same size (%d vs %d).\n",
+	   indices->size, values->size);
+  if (cmp_less) check_svalue (cmp_less);
+#endif
+
+  new.l = NULL, new.msd2 = NULL;
+  new.msd = low_alloc_multiset_data (ALLOC_SIZE (indices->size),
+				     values ? MULTISET_INDVAL : 0);
+
+  if (cmp_less) assign_svalue_no_free (&new.msd->cmp_less, cmp_less);
+  else new.msd->cmp_less.type = T_INT;
+
+  if (!indices->size)
+    fix_free_list (new.msd, 0);
+  else {
+    int pos;
+    ONERROR uwp;
+
+    new.list = NULL;
+    SET_ONERROR (uwp, free_tree_build_data, &new);
+    new.msd->ind_types = indices->type_field;
+    if (values) new.msd->val_types = values->type_field;
+
+    for (pos = indices->size; --pos >= 0;) {
+      new.node = values ?
+	IVNODE (NODE_AT (new.msd, msnode_indval, pos)) :
+	INODE (NODE_AT (new.msd, msnode_ind, pos));
+      if (values) assign_svalue_no_free (&new.node->iv.val, &ITEM (values)[pos]);
+      assign_svalue_no_free (&new.node->i.ind, &ITEM (indices)[pos]);
+
+      /* Note: Similar code in multiset_set_cmp_less. */
+
+      /* Note: It would perhaps be a bit faster to use quicksort. */
+
+      while (1) {
+	RBSTACK_INIT (rbstack);
+
+	if (!new.msd->root) {
+	  low_rb_init_root (HDR (new.msd->root = new.node));
+	  goto node_added;
+	}
+
+	switch (low_multiset_track_le_gt (new.msd,
+					  &new.node->i.ind, /* Not clobbered yet. */
+					  &rbstack)) {
+	  case FIND_LESS:
+	    low_rb_link_at_next (PHDR (&new.msd->root), rbstack, HDR (new.node));
+	    goto node_added;
+	  case FIND_GREATER:
+	    low_rb_link_at_prev (PHDR (&new.msd->root), rbstack, HDR (new.node));
+	    goto node_added;
+	  case FIND_DESTRUCTED:
+	    midflight_remove_node_faster (new.msd, &rbstack);
+	    continue;
+	  default: DO_IF_DEBUG (fatal ("Invalid find_type.\n"));
+	}
+      }
+
+    node_added:
+#ifdef PIKE_DEBUG
+      new.node->i.ind.type |= MULTISET_FLAG_MARKER;
+#endif
+      new.msd->size++;
+    }
+
+    UNSET_ONERROR (uwp);
+    fix_free_list (new.msd, indices->size);
+  }
+
+  l = alloc_multiset();
+  l->msd = new.msd;
+  add_ref (new.msd);
+  INIT_MULTISET (l);
+  return l;
+}
+
+PMOD_EXPORT int msnode_is_deleted (struct multiset *l, ptrdiff_t nodepos)
+{
+  struct multiset_data *msd = l->msd;
+  union msnode *node;
+  struct svalue ind;
+
+  debug_malloc_touch (l);
+  debug_malloc_touch (msd);
+  check_msnode (l, nodepos, 1);
+
+  node = OFF2MSNODE (msd, nodepos);
+
+  if (IS_DESTRUCTED (low_use_multiset_index (node, ind))) {
+    if (msd->refs == 1) {
+      add_msnode_ref (l);
+      multiset_delete_node (l, nodepos);
+    }
+    return 1;
+  }
+
+  return node->i.ind.type == T_DELETED;
+}
+
+union msnode *low_multiset_find_eq (struct multiset *l, struct svalue *key)
+{
+  struct multiset_data *msd = l->msd;
+  struct rb_node_hdr *node;
+  ONERROR uwp;
+
+  /* Note: Similar code in low_multiset_track_eq. */
+
+  add_ref (msd);
+  SET_ONERROR (uwp, free_indirect_multiset_data, &msd);
+
+  while (1) {
+    if ((node = HDR (msd->root))) {
+      if (msd->cmp_less.type == T_INT) {
+	struct svalue tmp;
+	LOW_RB_FIND (
+	  node,
+	  {
+	    low_use_multiset_index (RBNODE (node), tmp);
+	    if (IS_DESTRUCTED (&tmp)) goto index_destructed;
+	    INTERNAL_CMP (key, &tmp, cmp_res);
+	  },
+	  node = NULL, ;, node = NULL);
+      }
+
+      else {
+	/* Find the biggest node less or order-wise equal to key. */
+	LOW_RB_FIND_NEQ (
+	  node,
+	  {
+	    push_svalue (key);
+	    low_push_multiset_index (RBNODE (node));
+	    if (IS_DESTRUCTED (sp - 1)) {pop_n_elems (2); goto index_destructed;}
+	    EXTERNAL_CMP (&msd->cmp_less);
+	    cmp_res = IS_ZERO (sp - 1) ? 1 : -1;
+	    pop_stack();
+	  },
+	  {},			/* Got less or equal. */
+	  {node = node->prev;}); /* Got greater - step back one. */
+
+	/* Step backwards until a less or really equal node is found. */
+	for (; node; node = rb_prev (node)) {
+	  low_push_multiset_index (RBNODE (node));
+	  if (IS_DESTRUCTED (sp - 1)) {pop_stack(); goto index_destructed;}
+	  if (is_eq (sp - 1, key)) {pop_stack(); break;}
+	  push_svalue (key);
+	  EXTERNAL_CMP (&msd->cmp_less);
+	  if (!IS_ZERO (sp - 1)) {pop_stack(); node = NULL; break;}
+	  pop_stack();
+	}
+      }
+    }
+
+    if (l->msd == msd) break;
+    /* Will always go into the first if clause below. */
+
+  index_destructed:
+    if (l->msd != msd)
+      MOVE_MSD_REF_AND_FREE (l, msd);
+    else
+      midflight_remove_node (l, &msd, RBNODE (node));
+  }
+
+  UNSET_ONERROR (uwp);
+  sub_extra_ref (msd);
+  return RBNODE (node);
+}
+
+PMOD_EXPORT ptrdiff_t multiset_find_eq (struct multiset *l, struct svalue *key)
+{
+  union msnode *node = low_multiset_find_eq (l, key);
+  debug_malloc_touch (l);
+  debug_malloc_touch (l->msd);
+  check_svalue (key);
+  if (node) {
+    add_msnode_ref (l);
+    return MSNODE2OFF (l->msd, node);
+  }
+  return -1;
+}
+
+static enum find_types low_multiset_find_le_gt (
+  struct multiset_data *msd, struct svalue *key, union msnode **found)
+{
+  struct rb_node_hdr *node = HDR (msd->root);
+
+  /* Note: Similar code in low_multiset_track_le_gt. */
+
+#ifdef PIKE_DEBUG
+  /* Allow zero refs too since that's used during initial building. */
+  if (msd->refs == 1) fatal ("Copy-on-write assumed here.\n");
+#endif
+
+  if ((node = HDR (msd->root))) {
+    if (msd->cmp_less.type == T_INT) {
+      struct svalue tmp;
+      LOW_RB_FIND_NEQ (
+	node,
+	{
+	  low_use_multiset_index (RBNODE (node), tmp);
+	  if (IS_DESTRUCTED (&tmp)) {
+	    *found = RBNODE (node);
+	    return FIND_DESTRUCTED;
+	  }
+	  /* FIXME: Use special variant of set_svalue_cmpfun so we
+	   * don't have to copy the index svalues. */
+	  INTERNAL_CMP (key, &tmp, cmp_res);
+	  cmp_res = cmp_res >= 0 ? 1 : -1;
+	},
+	{*found = RBNODE (node); return FIND_LESS;},
+	{*found = RBNODE (node); return FIND_GREATER;});
+    }
+
+    else {
+      LOW_RB_FIND_NEQ (
+	node,
+	{
+	  push_svalue (key);
+	  low_push_multiset_index (RBNODE (node));
+	  if (IS_DESTRUCTED (sp - 1)) {
+	    pop_n_elems (2);
+	    *found = RBNODE (node);
+	    return FIND_DESTRUCTED;
+	  }
+	  EXTERNAL_CMP (&msd->cmp_less);
+	  cmp_res = IS_ZERO (sp - 1) ? 1 : -1;
+	  pop_stack();
+	},
+	{*found = RBNODE (node); return FIND_LESS;},
+	{*found = RBNODE (node); return FIND_GREATER;});
+    }
+  }
+
+  *found = NULL;
+  return FIND_NOROOT;
+}
+
+static enum find_types low_multiset_find_lt_ge (
+  struct multiset_data *msd, struct svalue *key, union msnode **found)
+{
+  struct rb_node_hdr *node = HDR (msd->root);
+
+#ifdef PIKE_DEBUG
+  /* Allow zero refs too since that's used during initial building. */
+  if (msd->refs == 1) fatal ("Copy-on-write assumed here.\n");
+#endif
+
+  if ((node = HDR (msd->root))) {
+    if (msd->cmp_less.type == T_INT) {
+      struct svalue tmp;
+      LOW_RB_FIND_NEQ (
+	node,
+	{
+	  low_use_multiset_index (RBNODE (node), tmp);
+	  if (IS_DESTRUCTED (&tmp)) {
+	    *found = RBNODE (node);
+	    return FIND_DESTRUCTED;
+	  }
+	  /* FIXME: Use special variant of set_svalue_cmpfun so we
+	   * don't have to copy the index svalues. */
+	  INTERNAL_CMP (key, &tmp, cmp_res);
+	  cmp_res = cmp_res <= 0 ? -1 : 1;
+	},
+	{*found = RBNODE (node); return FIND_LESS;},
+	{*found = RBNODE (node); return FIND_GREATER;});
+    }
+
+    else {
+      LOW_RB_FIND_NEQ (
+	node,
+	{
+	  low_push_multiset_index (RBNODE (node));
+	  if (IS_DESTRUCTED (sp - 1)) {
+	    pop_stack();
+	    *found = RBNODE (node);
+	    return FIND_DESTRUCTED;
+	  }
+	  push_svalue (key);
+	  EXTERNAL_CMP (&msd->cmp_less);
+	  cmp_res = IS_ZERO (sp - 1) ? -1 : 1;
+	  pop_stack();
+	},
+	{*found = RBNODE (node); return FIND_LESS;},
+	{*found = RBNODE (node); return FIND_GREATER;});
+    }
+  }
+
+  *found = NULL;
+  return FIND_NOROOT;
+}
+
+PMOD_EXPORT ptrdiff_t multiset_find_lt (struct multiset *l, struct svalue *key)
+{
+  struct multiset_data *msd = l->msd;
+  union msnode *node;
+  ONERROR uwp;
+
+  debug_malloc_touch (l);
+  debug_malloc_touch (msd);
+  check_svalue (key);
+
+  add_ref (msd);
+  SET_ONERROR (uwp, free_indirect_multiset_data, &msd);
+
+  while (1) {
+    enum find_types find_type = low_multiset_find_lt_ge (msd, key, &node);
+    if (l->msd != msd)		/* Multiset changed; try again. */
+      MOVE_MSD_REF_AND_FREE (l, msd);
+    else
+      switch (find_type) {
+	case FIND_LESS:
+	case FIND_NOROOT:
+	  goto done;
+	case FIND_GREATER:	/* Got greater or equal - step back one. */
+	  node = INODE (node->i.prev);
+	  goto done;
+	case FIND_DESTRUCTED:
+	  midflight_remove_node (l, &msd, node);
+	  break;
+	default: DO_IF_DEBUG (fatal ("Invalid find_type.\n"));
+      }
+  }
+
+done:
+  UNSET_ONERROR (uwp);
+  sub_extra_ref (msd);
+  if (node) {
+    add_msnode_ref (l);
+    return MSNODE2OFF (msd, node);
+  }
+  else return -1;
+}
+
+PMOD_EXPORT ptrdiff_t multiset_find_ge (struct multiset *l, struct svalue *key)
+{
+  struct multiset_data *msd = l->msd;
+  union msnode *node;
+  ONERROR uwp;
+
+  debug_malloc_touch (l);
+  debug_malloc_touch (msd);
+  check_svalue (key);
+
+  add_ref (msd);
+  SET_ONERROR (uwp, free_indirect_multiset_data, &msd);
+
+  while (1) {
+    enum find_types find_type = low_multiset_find_lt_ge (msd, key, &node);
+    if (l->msd != msd)		/* Multiset changed; try again. */
+      MOVE_MSD_REF_AND_FREE (l, msd);
+    else
+      switch (find_type) {
+	case FIND_LESS:		/* Got less - step forward one. */
+	  node = INODE (node->i.next);
+	  goto done;
+	case FIND_NOROOT:
+	case FIND_GREATER:
+	  goto done;
+	case FIND_DESTRUCTED:
+	  midflight_remove_node (l, &msd, node);
+	  break;
+	default: DO_IF_DEBUG (fatal ("Invalid find_type.\n"));
+      }
+  }
+
+done:
+  UNSET_ONERROR (uwp);
+  sub_extra_ref (msd);
+  if (node) {
+    add_msnode_ref (l);
+    return MSNODE2OFF (msd, node);
+  }
+  else return -1;
+}
+
+PMOD_EXPORT ptrdiff_t multiset_find_le (struct multiset *l, struct svalue *key)
+{
+  struct multiset_data *msd = l->msd;
+  union msnode *node;
+  ONERROR uwp;
+
+  debug_malloc_touch (l);
+  debug_malloc_touch (msd);
+  check_svalue (key);
+
+  add_ref (msd);
+  SET_ONERROR (uwp, free_indirect_multiset_data, &msd);
+
+  while (1) {
+    enum find_types find_type = low_multiset_find_le_gt (msd, key, &node);
+    if (l->msd != msd)		/* Multiset changed; try again. */
+      MOVE_MSD_REF_AND_FREE (l, msd);
+    else
+      switch (find_type) {
+	case FIND_LESS:
+	case FIND_NOROOT:
+	  goto done;
+	case FIND_GREATER:	/* Got greater - step back one. */
+	  node = INODE (node->i.prev);
+	  goto done;
+	case FIND_DESTRUCTED:
+	  midflight_remove_node (l, &msd, node);
+	  break;
+	default: DO_IF_DEBUG (fatal ("Invalid find_type.\n"));
+      }
+  }
+
+done:
+  UNSET_ONERROR (uwp);
+  sub_extra_ref (msd);
+  if (node) {
+    add_msnode_ref (l);
+    return MSNODE2OFF (msd, node);
+  }
+  else return -1;
+}
+
+PMOD_EXPORT ptrdiff_t multiset_find_gt (struct multiset *l, struct svalue *key)
+{
+  struct multiset_data *msd = l->msd;
+  union msnode *node;
+  ONERROR uwp;
+
+  debug_malloc_touch (l);
+  debug_malloc_touch (msd);
+  check_svalue (key);
+
+  add_ref (msd);
+  SET_ONERROR (uwp, free_indirect_multiset_data, &msd);
+
+  while (1) {
+    enum find_types find_type = low_multiset_find_le_gt (msd, key, &node);
+    if (l->msd != msd)		/* Multiset changed; try again. */
+      MOVE_MSD_REF_AND_FREE (l, msd);
+    else
+      switch (find_type) {
+	case FIND_LESS:		/* Got less or equal - step forward one. */
+	  node = INODE (node->i.next);
+	  goto done;
+	case FIND_NOROOT:
+	case FIND_GREATER:
+	  goto done;
+	case FIND_DESTRUCTED:
+	  midflight_remove_node (l, &msd, node);
+	  break;
+	default: DO_IF_DEBUG (fatal ("Invalid find_type.\n"));
+      }
+  }
+
+done:
+  UNSET_ONERROR (uwp);
+  sub_extra_ref (msd);
+  if (node) {
+    add_msnode_ref (l);
+    return MSNODE2OFF (msd, node);
+  }
+  else return -1;
+}
+
+PMOD_EXPORT ptrdiff_t multiset_first (struct multiset *l)
+{
+  struct multiset_data *msd = l->msd;
+  union msnode *node;
+  struct svalue ind;
+
+  debug_malloc_touch (l);
+  debug_malloc_touch (msd);
+
+  node = low_multiset_first (msd);
+  while (node && IS_DESTRUCTED (low_use_multiset_index (node, ind)))
+    if (msd->refs == 1) {
+      multiset_delete_node (l, MSNODE2OFF (msd, node));
+      msd = l->msd;
+      node = low_multiset_first (msd);
+    }
+    else
+      node = low_multiset_next (node);
+
+  if (node) {
+    add_msnode_ref (l);
+    return MSNODE2OFF (msd, node);
+  }
+  else return -1;
+}
+
+PMOD_EXPORT ptrdiff_t multiset_last (struct multiset *l)
+{
+  struct multiset_data *msd = l->msd;
+  union msnode *node;
+  struct svalue ind;
+
+  debug_malloc_touch (l);
+  debug_malloc_touch (msd);
+
+  node = low_multiset_last (msd);
+  while (node && IS_DESTRUCTED (low_use_multiset_index (node, ind)))
+    if (msd->refs == 1) {
+      multiset_delete_node (l, MSNODE2OFF (msd, node));
+      msd = l->msd;
+      node = low_multiset_last (msd);
+    }
+    else
+      node = low_multiset_prev (node);
+
+  if (node) {
+    add_msnode_ref (l);
+    return MSNODE2OFF (msd, node);
+  }
+  else return -1;
+}
+
+/* Returns -1 if there's no predecessor. If the node is deleted, the
+ * predecessor of the closest following nondeleted node is returned.
+ * If there is no following nondeleted node, the last node is
+ * returned. */
+PMOD_EXPORT ptrdiff_t multiset_prev (struct multiset *l, ptrdiff_t nodepos)
+{
+  struct multiset_data *msd = l->msd;
+  union msnode *node;
+  struct svalue ind;
+
+  debug_malloc_touch (l);
+  debug_malloc_touch (msd);
+  check_msnode (l, nodepos, 1);
+
+  node = OFF2MSNODE (msd, nodepos);
+
+  if (node->i.ind.type == T_DELETED)
+    do {
+      node = DELETED_NEXT (node);
+      if (!node) {
+	node = low_multiset_last (msd);
+	return node ? MSNODE2OFF (msd, node) : -1;
+      }
+    } while (node->i.ind.type == T_DELETED);
+
+  node = low_multiset_prev (node);
+
+  while (node && IS_DESTRUCTED (low_use_multiset_index (node, ind))) {
+    union msnode *prev = low_multiset_prev (node);
+    if (msd->refs == 1) {
+      nodepos = prev ? MSNODE2OFF (msd, prev) : -1;
+      add_msnode_ref (l);
+      multiset_delete_node (l, MSNODE2OFF (msd, node));
+      msd = l->msd;
+      node = nodepos >= 0 ? OFF2MSNODE (msd, nodepos) : NULL;
+    }
+    else
+      node = prev;
+  }
+
+  return node ? MSNODE2OFF (msd, node) : -1;
+}
+
+/* Returns -1 if there's no successor. If the node is deleted, the
+ * successor of the closest preceding nondeleted node is returned. If
+ * there is no preceding nondeleted node, the first node is
+ * returned. */
+PMOD_EXPORT ptrdiff_t multiset_next (struct multiset *l, ptrdiff_t nodepos)
+{
+  struct multiset_data *msd = l->msd;
+  union msnode *node;
+  struct svalue ind;
+
+  debug_malloc_touch (l);
+  debug_malloc_touch (msd);
+  check_msnode (l, nodepos, 1);
+
+  node = OFF2MSNODE (msd, nodepos);
+
+  if (node->i.ind.type == T_DELETED)
+    do {
+      node = DELETED_PREV (node);
+      if (!node) {
+	node = low_multiset_first (msd);
+	return node ? MSNODE2OFF (msd, node) : -1;
+      }
+    } while (node->i.ind.type == T_DELETED);
+
+  node = low_multiset_next (node);
+
+  while (node && IS_DESTRUCTED (low_use_multiset_index (node, ind))) {
+    union msnode *next = low_multiset_next (node);
+    if (msd->refs == 1) {
+      nodepos = next ? MSNODE2OFF (msd, next) : -1;
+      add_msnode_ref (l);
+      multiset_delete_node (l, MSNODE2OFF (msd, node));
+      msd = l->msd;
+      node = nodepos >= 0 ? OFF2MSNODE (msd, nodepos) : NULL;
+    }
+    else
+      node = next;
+  }
+
+  return node ? MSNODE2OFF (msd, node) : -1;
+}
+
+static enum find_types low_multiset_track_eq (
+  struct multiset_data *msd, struct svalue *key, struct rbstack_ptr *track)
+{
+  struct rb_node_hdr *node = HDR (msd->root);
+  struct rbstack_ptr rbstack = *track;
+
+  /* Note: Similar code in multiset_find_eq. */
+
+#ifdef PIKE_DEBUG
+  /* Allow zero refs too since that's used during initial building. */
+  if (msd->refs == 1) fatal ("Copy-on-write assumed here.\n");
+#endif
+
+  if (msd->cmp_less.type == T_INT) {
+    struct svalue tmp;
+    LOW_RB_TRACK (
+      rbstack, node,
+      {
+	low_use_multiset_index (RBNODE (node), tmp);
+	if (IS_DESTRUCTED (&tmp)) {
+	  *track = rbstack;
+	  return FIND_DESTRUCTED;
+	}
+	/* FIXME: Use special variant of set_svalue_cmpfun so we don't
+	 * have to copy the index svalues. */
+	INTERNAL_CMP (key, &tmp, cmp_res);
+      },
+      {*track = rbstack; return FIND_LESS;},
+      {*track = rbstack; return FIND_EQUAL;},
+      {*track = rbstack; return FIND_GREATER;});
+  }
+
+  else {
+    /* Find the biggest node less or order-wise equal to key. */
+    enum find_types find_type;
+    struct rb_node_hdr *found_node;
+    int step_count;
+
+    LOW_RB_TRACK_NEQ (
+      rbstack, node,
+      {
+	push_svalue (key);
+	low_push_multiset_index (RBNODE (node));
+	if (IS_DESTRUCTED (sp - 1)) {
+	  *track = rbstack;
+	  return FIND_DESTRUCTED;
+	}
+	EXTERNAL_CMP (&msd->cmp_less);
+	cmp_res = IS_ZERO (sp - 1) ? 1 : -1;
+	pop_stack();
+      }, {
+	find_type = FIND_LESS;
+	found_node = node;
+	step_count = 0;
+      }, {
+	find_type = FIND_GREATER;
+	found_node = node;
+	node = node->prev;
+	step_count = 1;
+      });
+
+    /* Step backwards until a less or really equal node is found. */
+    while (1) {
+      if (!node) {*track = rbstack; return find_type;}
+      low_push_multiset_index (RBNODE (node));
+      if (IS_DESTRUCTED (sp - 1)) {pop_stack(); find_type = FIND_DESTRUCTED; break;}
+      if (is_eq (sp - 1, key)) {pop_stack(); find_type = FIND_EQUAL; break;}
+      push_svalue (key);
+      EXTERNAL_CMP (&msd->cmp_less);
+      if (!IS_ZERO (sp - 1)) {pop_stack(); *track = rbstack; return find_type;}
+      pop_stack();
+      node = rb_prev (node);
+      step_count++;
+    }
+
+    /* A node was found during stepping. Adjust rbstack. */
+    while (step_count--) LOW_RB_TRACK_PREV (rbstack, found_node);
+#ifdef PIKE_DEBUG
+    if (node != RBSTACK_PEEK (rbstack)) fatal ("Stack stepping failed.\n");
+#endif
+
+    *track = rbstack;
+    return find_type;
+  }
+}
+
+static enum find_types low_multiset_track_le_gt (
+  struct multiset_data *msd, struct svalue *key, struct rbstack_ptr *track)
+{
+  struct rb_node_hdr *node = HDR (msd->root);
+  struct rbstack_ptr rbstack = *track;
+
+  /* Note: Similar code in low_multiset_find_le_gt. */
+
+#ifdef PIKE_DEBUG
+  /* Allow zero refs too since that's used during initial building. */
+  if (msd->refs == 1) fatal ("Copy-on-write assumed here.\n");
+#endif
+
+  if (msd->cmp_less.type == T_INT) {
+    struct svalue tmp;
+    LOW_RB_TRACK_NEQ (
+      rbstack, node,
+      {
+	low_use_multiset_index (RBNODE (node), tmp);
+	if (IS_DESTRUCTED (&tmp)) {
+	  *track = rbstack;
+	  return FIND_DESTRUCTED;
+	}
+	/* FIXME: Use special variant of set_svalue_cmpfun so we don't
+	 * have to copy the index svalues. */
+	INTERNAL_CMP (key, low_use_multiset_index (RBNODE (node), tmp), cmp_res);
+	cmp_res = cmp_res >= 0 ? 1 : -1;
+      },
+      {*track = rbstack; return FIND_LESS;},
+      {*track = rbstack; return FIND_GREATER;});
+  }
+
+  else {
+    LOW_RB_TRACK_NEQ (
+      rbstack, node,
+      {
+	push_svalue (key);
+	low_push_multiset_index (RBNODE (node));
+	if (IS_DESTRUCTED (sp - 1)) {
+	  pop_n_elems (2);
+	  *track = rbstack;
+	  return FIND_DESTRUCTED;
+	}
+	EXTERNAL_CMP (&msd->cmp_less);
+	cmp_res = IS_ZERO (sp - 1) ? 1 : -1;
+	pop_stack();
+      },
+      {*track = rbstack; return FIND_LESS;},
+      {*track = rbstack; return FIND_GREATER;});
+  }
+}
+
+void multiset_fix_type_field (struct multiset *l)
+{
+  struct multiset_data *msd = l->msd;
+  union msnode *node;
+  TYPE_FIELD ind_types = 0, val_types = 0;
+
+  debug_malloc_touch (l);
+  debug_malloc_touch (msd);
+
+  if ((node = low_multiset_first (msd))) {
+#define WITH_NODES_BLOCK(TYPE, OTHERTYPE, IND, INDVAL)			\
+    IND (val_types = BIT_INT);						\
+    do {								\
+      ind_types |= 1 << (node->i.ind.type & ~MULTISET_FLAG_MASK);	\
+      INDVAL (val_types |= 1 << node->iv.val.type);			\
+    } while ((node = low_multiset_next (node)));
+
+    DO_WITH_NODES (msd);
+
+#undef WITH_NODES_BLOCK
+  }
+  else
+    if (!(msd->flags & MULTISET_INDVAL)) val_types = BIT_INT;
+
+#ifdef PIKE_DEBUG
+  if (ind_types & ~msd->ind_types)
+    fatal ("Multiset indices type field lacked 0x%x.\n", ind_types & ~msd->ind_types);
+  if (val_types & ~msd->val_types)
+    fatal ("Multiset values type field lacked 0x%x.\n", val_types & ~msd->val_types);
+#endif
+
+  msd->ind_types = ind_types;
+  msd->val_types = val_types;
+}
+
+#ifdef PIKE_DEBUG
+static void check_multiset_type_fields (struct multiset *l)
+{
+  struct multiset_data *msd = l->msd;
+  union msnode *node;
+  TYPE_FIELD ind_types = 0, val_types = 0;
+
+  if ((node = low_multiset_first (msd))) {
+#define WITH_NODES_BLOCK(TYPE, OTHERTYPE, IND, INDVAL)			\
+    IND (val_types = BIT_INT);						\
+    do {								\
+      ind_types |= 1 << (node->i.ind.type & ~MULTISET_FLAG_MASK);	\
+      INDVAL (val_types |= 1 << node->iv.val.type);			\
+    } while ((node = low_multiset_next (node)));
+
+    DO_WITH_NODES (msd);
+
+#undef WITH_NODES_BLOCK
+  }
+  else
+    if (!(msd->flags & MULTISET_INDVAL)) val_types = BIT_INT;
+
+  if (ind_types & ~msd->ind_types)
+    fatal ("Multiset indices type field lacked 0x%x.\n", ind_types & ~msd->ind_types);
+  if (val_types & ~msd->val_types)
+    fatal ("Multiset values type field lacked 0x%x.\n", val_types & ~msd->val_types);
+}
+#endif
+
+PMOD_EXPORT void multiset_insert (struct multiset *l,
+				  struct svalue *ind)
+{
+  debug_malloc_touch (l);
+  debug_malloc_touch (l->msd);
+  dmalloc_touch_svalue (ind);
+  multiset_insert_2 (l, ind, NULL, 1);
+}
+
+#define ADD_NODE(MSD, RBSTACK, NEW, IND, VAL, FIND_TYPE) do {		\
+    assign_svalue_no_free (&NEW->i.ind, IND);				\
+    MSD->ind_types |= 1 << IND->type;					\
+    DO_IF_DEBUG (NEW->i.ind.type |= MULTISET_FLAG_MARKER);		\
+    if (MSD->flags & MULTISET_INDVAL) {					\
+      if (VAL) {							\
+	assign_svalue_no_free (&NEW->iv.val, VAL);			\
+	MSD->val_types |= 1 << VAL->type;				\
+      }									\
+      else {								\
+	NEW->iv.val.type = T_INT;					\
+	NEW->iv.val.u.integer = 1;					\
+	MSD->val_types |= BIT_INT;					\
+      }									\
+    }									\
+    switch (FIND_TYPE) {						\
+      case FIND_LESS:							\
+	low_rb_link_at_next (PHDR (&MSD->root), RBSTACK, HDR (NEW));	\
+	break;								\
+      case FIND_GREATER:						\
+	low_rb_link_at_prev (PHDR (&MSD->root), RBSTACK, HDR (NEW));	\
+	break;								\
+      case FIND_NOROOT:							\
+	MSD->root = NEW;						\
+	low_rb_init_root (HDR (NEW));					\
+	break;								\
+      default: DO_IF_DEBUG (fatal ("Invalid find_type.\n"));		\
+    }									\
+  } while (0)
+
+/* val may be zero. If the multiset has values, the integer 1 will be
+ * used as value in that case. val is ignored if the multiset has no
+ * values. The value of an existing entry will be replaced iff replace
+ * is nonzero (done under the assumption the caller has one value
+ * lock), otherwise nothing will be done in that case. */
+PMOD_EXPORT ptrdiff_t multiset_insert_2 (struct multiset *l,
+					 struct svalue *ind,
+					 struct svalue *val,
+					 int replace)
+{
+  struct multiset_data *msd = l->msd;
+  union msnode *new;
+  enum find_types find_type;
+  RBSTACK_INIT (rbstack);
+  ONERROR uwp;
+
+  /* Note: Similar code in multiset_add, multiset_add_after,
+   * multiset_delete_2 and multiset_delete_node. */
+
+#ifdef PIKE_DEBUG
+  debug_malloc_touch (l);
+  debug_malloc_touch (msd);
+  check_svalue (ind);
+  if (val) check_svalue (val);
+#endif
+
+  SET_ONERROR (uwp, free_indirect_multiset_data, &msd);
+
+  while (1) {
+    if (!msd->root) {
+      if (prepare_for_add (l, l->node_refs)) msd = l->msd;
+      ALLOC_MSNODE (msd, l->node_refs, new);
+      find_type = FIND_NOROOT;
+      goto insert;
+    }
+
+    if (!msd->free_list && !l->node_refs && msd->refs == 1) {
+      /* Enlarge now if possible, anticipating there will be an
+       * insert. Otherwise we either have to redo the search or don't
+       * use a rebalancing resize. */
+#ifdef PIKE_DEBUG
+      if (d_flag > 1) check_multiset (l);
+#endif
+      l->msd = resize_multiset_data (msd, ENLARGE_SIZE (msd->allocsize), 0);
+      msd = l->msd;
+    }
+#if 0
+    else
+      if (msd->size == msd->allocsize)
+	fputs ("Can't rebalance multiset tree in multiset_insert_2\n", stderr);
+#endif
+
+    add_ref (msd);
+    find_type = low_multiset_track_eq (msd, ind, &rbstack);
+
+    if (l->msd != msd) {
+      RBSTACK_FREE (rbstack);
+      if (!sub_ref (msd)) free_multiset_data (msd);
+      msd = l->msd;
+    }
+
+    else
+      switch (find_type) {
+	case FIND_LESS:
+	case FIND_GREATER:
+	  sub_extra_ref (msd);
+	  if (prepare_for_add (l, 1)) {
+	    rbstack_shift (rbstack, HDR (msd->nodes), HDR (l->msd->nodes));
+	    msd = l->msd;
+	  }
+	  ALLOC_MSNODE (msd, l->node_refs, new);
+	  goto insert;
+
+	case FIND_EQUAL: {
+	  struct rb_node_hdr *node;
+	  RBSTACK_POP (rbstack, node);
+	  RBSTACK_FREE (rbstack);
+	  UNSET_ONERROR (uwp);
+	  sub_extra_ref (msd);
+	  if (replace && msd->flags & MULTISET_INDVAL) {
+	    if (prepare_for_value_change (l, 1)) {
+	      node = SHIFT_HDRPTR (node, msd, l->msd);
+	      msd = l->msd;
+	    }
+	    if (val) {
+	      assign_svalue (&RBNODE (node)->iv.val, val);
+	      msd->val_types |= 1 << val->type;
+	    }
+	    else {
+	      free_svalue (&RBNODE (node)->iv.val);
+	      RBNODE (node)->iv.val.type = T_INT;
+	      RBNODE (node)->iv.val.u.integer = 1;
+	      msd->val_types |= BIT_INT;
+	    }
+	  }
+	  return MSNODE2OFF (msd, RBNODE (node));
+	}
+
+	case FIND_DESTRUCTED:
+	  sub_extra_ref (msd);
+	  midflight_remove_node_fast (l, &rbstack, 0);
+	  msd = l->msd;
+	  break;
+
+	default: DO_IF_DEBUG (fatal ("Invalid find_type.\n"));
+      }
+  }
+
+insert:
+  UNSET_ONERROR (uwp);
+  ADD_NODE (msd, rbstack, new, ind, val, find_type);
+  return MSNODE2OFF (msd, new);
+}
+
+/* val may be zero. If the multiset has values, the integer 1 will be
+ * used as value then. val is ignored if the multiset has no
+ * values. */
+PMOD_EXPORT ptrdiff_t multiset_add (struct multiset *l,
+				    struct svalue *ind,
+				    struct svalue *val)
+{
+  struct multiset_data *msd = l->msd;
+  union msnode *new;
+  enum find_types find_type;
+  RBSTACK_INIT (rbstack);
+  ONERROR uwp;
+
+  /* Note: Similar code in multiset_insert_2, multiset_add_after,
+   * multiset_delete_2 and multiset_delete_node. */
+
+#ifdef PIKE_DEBUG
+  debug_malloc_touch (l);
+  debug_malloc_touch (msd);
+  check_svalue (ind);
+  if (val) check_svalue (val);
+#endif
+
+  SET_ONERROR (uwp, free_indirect_multiset_data, &msd);
+
+  while (1) {
+    if (!msd->root) {
+      if (prepare_for_add (l, l->node_refs)) msd = l->msd;
+      ALLOC_MSNODE (msd, l->node_refs, new);
+      find_type = FIND_NOROOT;
+      goto add;
+    }
+
+    if (!msd->free_list && !l->node_refs) {
+      /* Enlarge now if possible. Otherwise we either have to redo the
+       * search or don't use a rebalancing resize. */
+      if (msd->refs > 1) {
+	l->msd = copy_multiset_data (msd);
+	MOVE_MSD_REF (l, msd);
+      }
+#ifdef PIKE_DEBUG
+      if (d_flag > 1) check_multiset (l);
+#endif
+      l->msd = resize_multiset_data (msd, ENLARGE_SIZE (msd->allocsize), 0);
+      msd = l->msd;
+    }
+#if 0
+    else
+      if (msd->size == msd->allocsize)
+	fputs ("Can't rebalance multiset tree in multiset_add\n", stderr);
+#endif
+
+    add_ref (msd);
+    find_type = low_multiset_track_le_gt (msd, ind, &rbstack);
+
+    if (l->msd != msd) {
+      RBSTACK_FREE (rbstack);
+      if (!sub_ref (msd)) free_multiset_data (msd);
+      msd = l->msd;
+    }
+
+    else
+      switch (find_type) {
+	case FIND_LESS:
+	case FIND_GREATER:
+	  sub_extra_ref (msd);
+	  if (prepare_for_add (l, 1)) {
+	    rbstack_shift (rbstack, HDR (msd->nodes), HDR (l->msd->nodes));
+	    msd = l->msd;
+	  }
+	  ALLOC_MSNODE (msd, l->node_refs, new);
+	  goto add;
+
+	case FIND_DESTRUCTED:
+	  sub_extra_ref (msd);
+	  midflight_remove_node_fast (l, &rbstack, 0);
+	  msd = l->msd;
+	  break;
+
+	default: DO_IF_DEBUG (fatal ("Invalid find_type.\n"));
+      }
+  }
+
+add:
+  UNSET_ONERROR (uwp);
+  ADD_NODE (msd, rbstack, new, ind, val, find_type);
+  return MSNODE2OFF (msd, new);
+}
+
+#define TEST_LESS(MSD, A, B, CMP_RES) do {				\
+    if (MSD->cmp_less.type == T_INT)					\
+      INTERNAL_CMP (A, B, CMP_RES);					\
+    else {								\
+      push_svalue (A);							\
+      push_svalue (B);							\
+      EXTERNAL_CMP (&MSD->cmp_less);					\
+      CMP_RES = IS_ZERO (sp - 1) ? 1 : -1;				\
+      pop_stack();							\
+    }									\
+  } while (0)
+
+/* val may be zero. If the multiset has values, the integer 1 will be
+ * used as value then. val is ignored if the multiset has no values.
+ * The new entry is added first if nodepos < 0.
+ *
+ * -1 is returned if the entry couldn't be added after the specified
+ * node because that would break the order. This is always checked,
+ * since it might occur due to concurrent changes of the multiset.
+ *
+ * -2 is returned if the given node is (or becomes) deleted.
+ *
+ * Otherwise the offset of the new node is returned (as usual). */
+PMOD_EXPORT ptrdiff_t multiset_add_after (struct multiset *l,
+					  ptrdiff_t nodepos,
+					  struct svalue *ind,
+					  struct svalue *val)
+{
+  struct multiset_data *msd = l->msd;
+  union msnode *new;
+  struct rb_node_hdr *node;
+  enum find_types find_type;
+  int cmp_res;
+  struct svalue tmp;
+  RBSTACK_INIT (rbstack);
+  ONERROR uwp;
+
+  /* Note: Similar code in multiset_insert_2, multiset_add,
+   * multiset_delete_2 and multiset_delete_node. */
+
+#ifdef PIKE_DEBUG
+  debug_malloc_touch (l);
+  debug_malloc_touch (msd);
+  check_svalue (ind);
+  if (val) check_svalue (val);
+  if (nodepos >= 0) check_msnode (l, nodepos, 1);
+#endif
+
+  SET_ONERROR (uwp, free_indirect_multiset_data, &msd);
+
+  while (1) {
+    if (!(node = HDR (msd->root))) {
+      if (prepare_for_add (l, l->node_refs)) msd = l->msd;
+      ALLOC_MSNODE (msd, l->node_refs, new);
+      find_type = FIND_NOROOT;
+      goto add;
+    }
+
+    if (nodepos < 0) {
+      ONERROR uwp2;
+
+    add_node_first:
+      add_ref (msd);
+      add_msnode_ref (l);
+      SET_ONERROR (uwp2, do_sub_msnode_ref, l);
+
+      LOW_RB_TRACK_FIRST (rbstack, node);
+      low_use_multiset_index (RBNODE (node), tmp);
+      TEST_LESS (msd, &tmp, ind, cmp_res);
+
+      if (l->msd != msd) {
+	/* The multiset changed. Must redo the compare unless the
+	 * same node still is the first one. */
+	node = SHIFT_HDRPTR (node, msd, l->msd);
+	if (node != HDR (low_multiset_first (l->msd))) {
+	  RBSTACK_FREE (rbstack);
+	  continue;
+	}
+	rbstack_shift (rbstack, HDR (msd->nodes), HDR (l->msd->nodes));
+	MOVE_MSD_REF_AND_FREE (l, msd);
+      }
+
+      UNSET_ONERROR (uwp2);
+      sub_msnode_ref (l);
+      assert (l->msd == msd);
+      sub_extra_ref (msd);
+      if (cmp_res < 0) {UNSET_ONERROR (uwp); return -1;}
+
+      if (prepare_for_add (l, 1)) {
+	rbstack_shift (rbstack, HDR (msd->nodes), HDR (l->msd->nodes));
+	msd = l->msd;
+      }
+      ALLOC_MSNODE (msd, l->node_refs, new);
+      find_type = FIND_GREATER;
+      goto add;
+    }
+
+    else {
+      int cmp_res;
+      union msnode *existing = OFF2MSNODE (msd, nodepos);
+
+      while (existing->i.ind.type == T_DELETED) {
+	existing = DELETED_PREV (existing);
+	if (!existing) goto add_node_first;
+      }
+
+      add_ref (msd);
+
+      {				/* Compare against the following node. */
+	union msnode *next = low_multiset_next (existing);
+	if (next) {
+	  low_use_multiset_index (next, tmp);
+	  TEST_LESS (msd, &tmp, ind, cmp_res);
+	  if (l->msd != msd) {
+	    if (!sub_ref (msd)) free_multiset_data (msd);
+	    msd = l->msd;
+	    continue;
+	  }
+	  if (cmp_res < 0) {
+	    UNSET_ONERROR (uwp);
+	    sub_extra_ref (msd);
+	    return -1;
+	  }
+	}
+      }
+
+      find_type = low_multiset_track_le_gt (msd, ind, &rbstack);
+
+      if (l->msd != msd) goto multiset_changed;
+
+      if (find_type == FIND_DESTRUCTED) {
+	sub_extra_ref (msd);
+	midflight_remove_node_fast (l, &rbstack, 0);
+	msd = l->msd;
+	continue;
+      }
+
+      /* Step backwards until the existing node is found, or until
+       * we're outside the range of compare-wise equal nodes. */
+      node = RBSTACK_PEEK (rbstack);
+      cmp_res = 0;
+      while (RBNODE (node) != existing) {
+	low_use_multiset_index (RBNODE (node), tmp);
+	TEST_LESS (msd, &tmp, ind, cmp_res);
+	if (cmp_res < 0) break;
+	LOW_RB_TRACK_PREV (rbstack, node);
+	if (!node) {cmp_res = -1; break;}
+      }
+
+      if (l->msd != msd) goto multiset_changed;
+      UNSET_ONERROR (uwp);
+      sub_extra_ref (msd);
+
+      if (cmp_res < 0) return -1;
+
+      if (prepare_for_add (l, 1)) {
+	rbstack_shift (rbstack, HDR (msd->nodes), HDR (l->msd->nodes));
+	node = SHIFT_HDRPTR (node, msd, l->msd);
+	msd = l->msd;
+      }
+      ALLOC_MSNODE (msd, l->node_refs, new);
+
+      /* Find a node to link on to. */
+      if (node->flags & RB_THREAD_NEXT)
+	find_type = FIND_LESS;
+      else {
+	node = node->next;
+	RBSTACK_PUSH (rbstack, node);
+	while (!(node->flags & RB_THREAD_PREV)) {
+	  node = node->prev;
+	  RBSTACK_PUSH (rbstack, node);
+	}
+	find_type = FIND_GREATER;
+      }
+      goto add;
+
+    multiset_changed:
+      RBSTACK_FREE (rbstack);
+      if (!sub_ref (msd)) free_multiset_data (msd);
+      msd = l->msd;
+    }
+  }
+
+add:
+  ADD_NODE (msd, rbstack, new, ind, val, find_type);
+  return MSNODE2OFF (msd, new);
+}
+
+PMOD_EXPORT int multiset_delete (struct multiset *l,
+				 struct svalue *ind)
+{
+  debug_malloc_touch (l);
+  debug_malloc_touch (l->msd);
+  dmalloc_touch_svalue (ind);
+  return multiset_delete_2 (l, ind, NULL);
+}
+
+/* If removed_val isn't NULL, the value of the deleted node is stored
+ * there, or the integer 1 if the multiset lacks values. The undefined
+ * value is stored if no matching entry was found. */
+PMOD_EXPORT int multiset_delete_2 (struct multiset *l,
+				   struct svalue *ind,
+				   struct svalue *removed_val)
+{
+  struct multiset_data *msd = l->msd;
+  enum find_types find_type;
+  RBSTACK_INIT (rbstack);
+  ONERROR uwp;
+
+  /* Note: Similar code in multiset_insert_2, multiset_add,
+   * multiset_add_after and multiset_delete_node. */
+
+  debug_malloc_touch (l);
+  debug_malloc_touch (msd);
+  check_svalue (ind);
+  SET_ONERROR (uwp, free_indirect_multiset_data, &msd);
+
+  while (1) {
+    if (!msd->root) goto not_found;
+
+    add_ref (msd);
+    find_type = low_multiset_track_eq (msd, ind, &rbstack);
+
+    if (l->msd != msd) {
+      RBSTACK_FREE (rbstack);
+      if (!sub_ref (msd)) free_multiset_data (msd);
+      msd = l->msd;
+    }
+
+    else
+      switch (find_type) {
+	case FIND_LESS:
+	case FIND_GREATER:
+	  RBSTACK_FREE (rbstack);
+	  sub_extra_ref (msd);
+	  goto not_found;
+
+	case FIND_EQUAL: {
+	  struct svalue ind, val;
+	  struct rb_node_hdr *node = RBSTACK_PEEK (rbstack);
+	  int indval = msd->flags & MULTISET_INDVAL;
+
+	  UNSET_ONERROR (uwp);
+	  sub_extra_ref (msd);
+
+	  /* Postpone free since the msd might be copied in unlink_node. */
+	  low_use_multiset_index (RBNODE (node), ind);
+	  if (indval) val = RBNODE (node)->iv.val;
+
+	  unlink_msnode (l, &rbstack, 0);
+
+	  free_svalue (&ind);
+	  if (removed_val)
+	    if (indval) {
+	      dmalloc_touch_svalue (&val);
+	      *removed_val = val;
+	      dmalloc_touch_svalue (removed_val);
+	    }
+	    else {
+	      removed_val->type = T_INT;
+	      removed_val->u.integer = 1;
+	    }
+	  else
+	    if (indval) free_svalue (&val);
+
+	  return 1;
+	}
+
+	case FIND_DESTRUCTED:
+	  sub_extra_ref (msd);
+	  midflight_remove_node_fast (l, &rbstack, 0);
+	  msd = l->msd;
+	  break;
+
+	default: DO_IF_DEBUG (fatal ("Invalid find_type.\n"));
+      }
+  }
+
+not_found:
+  UNSET_ONERROR (uwp);
+  if (removed_val) {
+    removed_val->type = T_INT;
+    removed_val->subtype = NUMBER_UNDEFINED;
+    removed_val->u.integer = 0;
+  }
+  return 0;
+}
+
+/* Frees the node reference that nodepos represents. */
+PMOD_EXPORT void multiset_delete_node (struct multiset *l,
+				       ptrdiff_t nodepos)
+{
+  struct multiset_data *msd = l->msd;
+  enum find_types find_type;
+  RBSTACK_INIT (rbstack);
+  ONERROR uwp;
+
+  /* Note: Similar code in multiset_insert_2, multiset_add,
+   * multiset_add_after and multiset_delete_2. */
+
+  debug_malloc_touch (l);
+  debug_malloc_touch (msd);
+  check_msnode (l, nodepos, 1);
+
+  SET_ONERROR (uwp, free_indirect_multiset_data, &msd);
+
+  while (1) {
+    union msnode *existing = OFF2MSNODE (msd, nodepos);
+    struct svalue ind;
+
+    if (existing->i.ind.type == T_DELETED) {
+      UNSET_ONERROR (uwp);
+      sub_msnode_ref (l);
+      return;
+    }
+    low_use_multiset_index (existing, ind);
+
+    add_ref (msd);
+    find_type = low_multiset_track_le_gt (msd, &ind, &rbstack);
+
+    if (l->msd != msd) {
+      RBSTACK_FREE (rbstack);
+      if (!sub_ref (msd)) free_multiset_data (msd);
+      msd = l->msd;
+    }
+
+    else if (find_type == FIND_DESTRUCTED) {
+      sub_extra_ref (msd);
+      midflight_remove_node_fast (l, &rbstack, 0);
+      msd = l->msd;
+    }
+
+    else {
+      struct svalue val;
+      struct rb_node_hdr *node = RBSTACK_PEEK (rbstack);
+      int indval = msd->flags & MULTISET_INDVAL;
+
+      /* Step backwards until the existing node is found. */
+      while (RBNODE (node) != existing) LOW_RB_TRACK_PREV (rbstack, node);
+
+      UNSET_ONERROR (uwp);
+
+      /* Postpone free since the msd might be copied in unlink_node. */
+      if (indval) val = RBNODE (node)->iv.val;
+
+      sub_msnode_ref (l);
+      assert (l->msd == msd);
+      sub_extra_ref (msd);
+      unlink_msnode (l, &rbstack, 0);
+
+      free_svalue (&ind);
+      if (indval) free_svalue (&val);
+
+      return;
+    }
+  }
+}
+
+PMOD_EXPORT int multiset_member (struct multiset *l, struct svalue *key)
+{
+  debug_malloc_touch (l);
+  debug_malloc_touch (l->msd);
+  dmalloc_touch_svalue (key);
+  return low_multiset_find_eq (l, key) ? 1 : 0;
+}
+
+/* No ref is added for the returned svalue. */
+PMOD_EXPORT struct svalue *multiset_lookup (struct multiset *l,
+					    struct svalue *key)
+{
+  union msnode *node;
+  debug_malloc_touch (l);
+  debug_malloc_touch (l->msd);
+  check_svalue (key);
+  if ((node = low_multiset_find_eq (l, key)))
+    if (l->msd->flags & MULTISET_INDVAL) return &node->iv.val;
+    else return &svalue_int_one;
+  else
+    return NULL;
+}
+
+struct array *multiset_indices (struct multiset *l)
+{
+  debug_malloc_touch (l);
+  debug_malloc_touch (l->msd);
+  return multiset_range_indices (l, -1, -1);
+}
+
+struct array *multiset_values (struct multiset *l)
+{
+  debug_malloc_touch (l);
+  debug_malloc_touch (l->msd);
+  return multiset_range_values (l, -1, -1);
+}
+
+#define GET_RANGE_SIZE_AND_END(BEGPOS, ENDPOS, MSD, MSD_SIZE, RANGE_SIZE, END) do { \
+    if (BEGPOS < 0 && ENDPOS < 0) {					\
+      RANGE_SIZE = MSD_SIZE;						\
+      END = low_multiset_last (MSD);					\
+    }									\
+									\
+    else {								\
+      union msnode *beg, *node;						\
+									\
+      if (BEGPOS < 0)							\
+	beg = NULL;							\
+      else {								\
+	beg = OFF2MSNODE (MSD, BEGPOS);					\
+	if (beg->i.ind.type == T_DELETED) {				\
+	  do {								\
+	    beg = DELETED_PREV (beg);					\
+	  } while (beg && beg->i.ind.type == T_DELETED);		\
+	  if (beg) beg = low_multiset_next (beg);			\
+	}								\
+      }									\
+									\
+      if (ENDPOS < 0) {							\
+	END = low_multiset_last (MSD);					\
+	RANGE_SIZE = 1;							\
+      }									\
+      else {								\
+	END = OFF2MSNODE (MSD, ENDPOS);					\
+	if (END->i.ind.type == T_DELETED) {				\
+	  do {								\
+	    END = DELETED_NEXT (END);					\
+	  } while (END && END->i.ind.type == T_DELETED);		\
+	  if (END) END = low_multiset_prev (END);			\
+	  else END = low_multiset_last (MSD);				\
+	}								\
+	RANGE_SIZE = beg ? 1 : 0;					\
+      }									\
+									\
+      for (node = END; node != beg; node = low_multiset_prev (node)) {	\
+	if (!node) {							\
+	  RANGE_SIZE = 0;						\
+	  break;							\
+	}								\
+	RANGE_SIZE++;							\
+      }									\
+    }									\
+  } while (0)
+
+/* The range is inclusive. begpos and/or endpos may be -1 to go to the
+ * limit in that direction. If begpos points to a deleted node then
+ * the next nondeleted node is used instead, which is found in the
+ * same way as multiset_next. Vice versa for endpos. If the
+ * beginning is after the end then the empty array is returned. */
+struct array *multiset_range_indices (struct multiset *l,
+				      ptrdiff_t begpos, ptrdiff_t endpos)
+{
+  struct multiset_data *msd;
+  struct array *indices;
+  union msnode *end;
+  int msd_size, range_size;
+
+#ifdef PIKE_DEBUG
+  debug_malloc_touch (l);
+  debug_malloc_touch (l->msd);
+  if (begpos >= 0) check_msnode (l, begpos, 1);
+  if (endpos >= 0) check_msnode (l, endpos, 1);
+#endif
+
+  check_multiset_for_destruct (l);
+  msd = l->msd;
+  msd_size = multiset_sizeof (l);
+
+  GET_RANGE_SIZE_AND_END (begpos, endpos, msd, msd_size,
+			  range_size, end);
+
+  if (range_size) {
+    TYPE_FIELD types;
+    indices = allocate_array_no_init (1, range_size);
+    indices->size = range_size;
+    if (range_size == msd_size) {
+      types = msd->ind_types;
+      while (1) {
+	low_assign_multiset_index_no_free (&ITEM (indices)[--range_size], end);
+	if (!range_size) break;
+	end = low_multiset_prev (end);
+      }
+    }
+    else {
+      types = 0;
+      while (1) {
+	low_assign_multiset_index_no_free (&ITEM (indices)[--range_size], end);
+	types |= 1 << ITEM (indices)[range_size].type;
+	if (!range_size) break;
+	end = low_multiset_prev (end);
+      }
+    }
+    indices->type_field = types;
+  }
+  else add_ref (indices = &empty_array);
+
+  array_check_type_field (indices);
+  return indices;
+}
+
+/* The range is inclusive. begpos and/or endpos may be -1 to go to the
+ * limit in that direction. If begpos points to a deleted node then
+ * the next nondeleted node is used instead, which is found in the
+ * same way as multiset_next. Vice versa for endpos. If the
+ * beginning is after the end then the empty array is returned. */
+struct array *multiset_range_values (struct multiset *l,
+				     ptrdiff_t begpos, ptrdiff_t endpos)
+{
+  struct multiset_data *msd;
+  struct array *values;
+  union msnode *beg, *end;
+  int msd_size, range_size;
+
+#ifdef PIKE_DEBUG
+  debug_malloc_touch (l);
+  debug_malloc_touch (l->msd);
+  if (begpos >= 0) check_msnode (l, begpos, 1);
+  if (endpos >= 0) check_msnode (l, endpos, 1);
+#endif
+
+  check_multiset_for_destruct (l);
+  msd = l->msd;
+  msd_size = multiset_sizeof (l);
+
+  GET_RANGE_SIZE_AND_END (begpos, endpos, msd, msd_size,
+			  range_size, end);
+
+  if (range_size) {
+    values = allocate_array_no_init (1, range_size);
+    values->size = range_size;
+    if (l->msd->flags & MULTISET_INDVAL) {
+      TYPE_FIELD types;
+      if (range_size == msd_size) {
+	types = msd->val_types;
+	while (1) {
+	  low_assign_multiset_index_no_free (&ITEM (values)[--range_size], end);
+	  if (!range_size) break;
+	  end = low_multiset_prev (end);
+	}
+      }
+      else {
+	types = 0;
+	while (1) {
+	  low_assign_multiset_index_no_free (&ITEM (values)[--range_size], end);
+	  types |= 1 << ITEM (values)[range_size].type;
+	  if (!range_size) break;
+	  end = low_multiset_prev (end);
+	}
+      }
+      values->type_field = types;
+    }
+    else {
+      do {
+	ITEM (values)[--range_size].type = T_INT;
+	ITEM (values)[range_size].subtype = NUMBER_NUMBER;
+	ITEM (values)[range_size].u.integer = 1;
+      } while (range_size);
+      values->type_field = BIT_INT;
+    }
+  }
+  else add_ref (values = &empty_array);
+
+  return values;
+}
+
+/* Eliminates all pointers to destructed objects. If an index is such
+ * a pointer then the node is removed. */
+PMOD_EXPORT void check_multiset_for_destruct (struct multiset *l)
+{
+  struct multiset_data *msd = l->msd;
+
+#ifdef PIKE_DEBUG
+  debug_malloc_touch (l);
+  debug_malloc_touch (msd);
+  if (Pike_in_gc > GC_PASS_PREPARE && Pike_in_gc < GC_PASS_FREE)
+    fatal("check_multiset_for_destruct called in invalid pass inside gc.\n");
+#endif
+
+  if (msd->root &&
+      ((msd->ind_types | msd->val_types) & (BIT_OBJECT | BIT_FUNCTION))) {
+    struct rb_node_hdr *node = HDR (msd->root);
+    struct svalue ind;
+    TYPE_FIELD ind_types = 0, val_types = 0;
+    RBSTACK_INIT (rbstack);
+    LOW_RB_TRACK_FIRST (rbstack, node);
+
+#define WITH_NODES_BLOCK(TYPE, OTHERTYPE, IND, INDVAL)			\
+    IND (val_types = BIT_INT);						\
+    do {								\
+      low_use_multiset_index (RBNODE (node), ind);			\
+      if (IS_DESTRUCTED (&ind)) {					\
+	midflight_remove_node_fast (l, &rbstack, 1);			\
+	msd = l->msd;							\
+	node = RBSTACK_PEEK (rbstack);					\
+      }									\
+      else {								\
+	ind_types |= 1 << ind.type;					\
+	INDVAL (							\
+	  if (IS_DESTRUCTED (&RBNODE (node)->iv.val)) {			\
+	    check_destructed (&RBNODE (node)->iv.val);			\
+	    val_types |= BIT_INT;					\
+	  }								\
+	  else val_types |= 1 << RBNODE (node)->iv.val.type;		\
+	);								\
+	LOW_RB_TRACK_NEXT (rbstack, node);				\
+      }									\
+    } while (node);
+
+    DO_WITH_NODES (msd);
+
+#undef WITH_NODES_BLOCK
+
+#ifdef PIKE_DEBUG
+    if (ind_types & ~msd->ind_types)
+      fatal ("Multiset indices type field lacked 0x%x.\n", ind_types & ~msd->ind_types);
+    if (val_types & ~msd->val_types)
+      fatal ("Multiset values type field lacked 0x%x.\n", val_types & ~msd->val_types);
+#endif
+
+    msd->ind_types = ind_types;
+    msd->val_types = val_types;
+  }
+}
+
+PMOD_EXPORT struct multiset *copy_multiset (struct multiset *l)
+{
+  struct multiset_data *msd = l->msd;
+  debug_malloc_touch (l);
+  debug_malloc_touch (msd);
+  l = alloc_multiset();
+  INIT_MULTISET (l);
+  add_ref (l->msd = msd);
+  return l;
+}
+
+/* Returns NULL if no special case is applicable. */
+static struct multiset *merge_special (struct multiset *a,
+				       struct multiset *b,
+				       int operation)
+{
+  ONERROR uwp;
+  int op;
+
+  debug_malloc_touch (a);
+  debug_malloc_touch (a->msd);
+  debug_malloc_touch (b);
+  debug_malloc_touch (b->msd);
+
+#define COPY_MSD_AND_KEEP_FLAGS(FROM, TO) do {				\
+    struct multiset_data *oldmsd = (TO)->msd;				\
+    SET_ONERROR (uwp, free_indirect_multiset_data, &oldmsd);		\
+    add_ref ((TO)->msd = (FROM)->msd);					\
+    multiset_set_flags ((TO), oldmsd->flags);				\
+    multiset_set_cmp_less ((TO), &oldmsd->cmp_less);			\
+    UNSET_ONERROR (uwp);						\
+    if (!sub_ref (oldmsd)) free_multiset_data (oldmsd);			\
+  } while (0)
+
+#define EMPTY_MSD_AND_KEEP_FLAGS(L) do {				\
+    struct multiset_data *oldmsd = (L)->msd;				\
+    add_ref ((L)->msd = low_alloc_multiset_data (0, oldmsd->flags));	\
+    assign_svalue_no_free (&(L)->msd->cmp_less, &oldmsd->cmp_less);	\
+    if (!sub_ref (oldmsd)) free_multiset_data (oldmsd);			\
+  } while (0)
+
+#define ALLOC_COPY_AND_SET_FLAGS(FROM, RES, FLAGSRC) do {		\
+    (RES) = copy_multiset (FROM);					\
+    SET_ONERROR (uwp, do_free_multiset, (RES));				\
+    multiset_set_flags ((RES), (FLAGSRC)->msd->flags);			\
+    multiset_set_cmp_less ((RES), &(FLAGSRC)->msd->cmp_less);		\
+    UNSET_ONERROR (uwp);						\
+  } while (0)
+
+#define ALLOC_EMPTY_AND_SET_FLAGS(RES, FLAGSRC) do {			\
+    (RES) = allocate_multiset (0, (FLAGSRC)->msd->flags,		\
+			       &(FLAGSRC)->msd->cmp_less);		\
+  } while (0)
+
+  if (!a->msd->root)
+    if (operation & PIKE_ARRAY_OP_B) /* Result is b. */
+      if (operation & PIKE_MERGE_DESTR_A) {
+	if (a->node_refs) return NULL;
+	COPY_MSD_AND_KEEP_FLAGS (b, a);
+	return a;
+      }
+      else {
+	struct multiset *res;
+	ALLOC_COPY_AND_SET_FLAGS (b, res, a);
+	return res;
+      }
+    else			/* Result is empty. */
+      if (operation & PIKE_MERGE_DESTR_A)
+	return a;
+      else {
+	struct multiset *res;
+	ALLOC_EMPTY_AND_SET_FLAGS (res, a);
+	return res;
+      }
+
+  else if (!b->msd->root)
+    if (operation & (PIKE_ARRAY_OP_A << 8)) /* Result is a. */
+      if (operation & PIKE_MERGE_DESTR_A)
+	return a;
+      else {
+	struct multiset *res;
+	ALLOC_COPY_AND_SET_FLAGS (a, res, a);
+	return res;
+      }
+    else			/* Result is empty. */
+      if (operation & PIKE_MERGE_DESTR_A) {
+	if (a->node_refs) return NULL;
+	EMPTY_MSD_AND_KEEP_FLAGS (a);
+	return a;
+      }
+      else {
+	struct multiset *res;
+	ALLOC_EMPTY_AND_SET_FLAGS (res, a);
+	return res;
+      }
+
+  else if (a == b) {
+    op = operation & ((PIKE_ARRAY_OP_A|PIKE_ARRAY_OP_B) << 4);
+    if (op) {
+      if (op != ((PIKE_ARRAY_OP_A|PIKE_ARRAY_OP_B) << 4)) { /* Result is a (or b). */
+	if (operation & PIKE_MERGE_DESTR_A)
+	  return a;
+	else {
+	  struct multiset *res;
+	  ALLOC_COPY_AND_SET_FLAGS (a, res, a);
+	  return res;
+	}
+      }
+    }
+    else			/* Result is empty. */
+      if (operation & PIKE_MERGE_DESTR_A) {
+	if (a->node_refs) return NULL;
+	EMPTY_MSD_AND_KEEP_FLAGS (a);
+	return a;
+      }
+      else {
+	struct multiset *res;
+	ALLOC_EMPTY_AND_SET_FLAGS (res, a);
+	return res;
+      }
+  }
+
+  return NULL;
+}
+
+struct merge_data
+{
+  struct multiset *a, *b, *res, *tmp;
+  struct recovery_data rd;
+  struct rb_node_hdr *a_node, *b_node, *res_list;
+  size_t res_length;
+};
+
+static void cleanup_merge_data (struct merge_data *m)
+{
+  debug_malloc_touch (m->a);
+  debug_malloc_touch (m->a ? m->a->msd : NULL);
+  debug_malloc_touch (m->b);
+  debug_malloc_touch (m->b ? m->b->msd : NULL);
+  debug_malloc_touch (m->res);
+  debug_malloc_touch (m->res ? m->res->msd : NULL);
+  debug_malloc_touch (m->tmp);
+  debug_malloc_touch (m->tmp ? m->tmp->msd : NULL);
+
+  if (m->res_list) {
+    /* The result msd contains a list and possibly a part of a tree.
+     * Knit it together to a tree again. Knowledge that LOW_RB_MERGE
+     * traverses the trees backwards is used here. */
+    struct rb_node_hdr *list = m->res_list;
+    size_t length = m->res_length;
+    if (m->res == m->a) {
+      struct rb_node_hdr *node;
+      for (node = m->a_node; node; node = rb_prev (node)) {
+	node->next = list, list = node;
+	length++;
+      }
+    }
+    m->res->msd->root = RBNODE (rb_make_tree (list, length));
+  }
+
+  sub_msnode_ref (m->res);
+  if (m->res != m->a) free_multiset (m->res);
+  if (m->tmp) free_multiset (m->tmp);
+  free_recovery_data (&m->rd);
+}
+
+static void merge_shift_ptrs (struct multiset_data *old, struct multiset_data *new,
+			      struct merge_data *m)
+{
+  if (m->a == m->res && m->a_node) m->a_node = SHIFT_HDRPTR (m->a_node, old, new);
+  if (m->res_list) m->res_list = SHIFT_HDRPTR (m->res_list, old, new);
+}
+
+/* If PIKE_MERGE_DESTR_A is used, a is returned without ref increase.
+ * Else the new multiset will inherit flags and cmp_less from a.
+ *
+ * If destructive on an operand and there is an exception, then some
+ * random part(s) of the operand will be left unprocessed. All entries
+ * that were in the operand and would remain in the finished result
+ * will still be there, and no entries from the other operand that
+ * wouldn't be in the finished result. */
+PMOD_EXPORT struct multiset *merge_multisets (struct multiset *a,
+					      struct multiset *b,
+					      int operation)
+{
+  struct merge_data m;
+  int got_node_refs, indval;
+  TYPE_FIELD ind_types, val_types;
+  ONERROR uwp;
+
+  debug_malloc_touch (a);
+  debug_malloc_touch (a->msd);
+  debug_malloc_touch (b);
+  debug_malloc_touch (b->msd);
+
+#ifdef PIKE_DEBUG
+  if (operation & PIKE_MERGE_DESTR_B)
+    fatal ("Destructiveness on second operand not supported.\n");
+#endif
+
+#if 1
+  if (!a->msd->root || !b->msd->root || a == b) {
+    struct multiset *res = merge_special (a, b, operation);
+    if (res) return res;
+  }
+#endif
+
+  m.tmp = NULL;
+  m.res_list = NULL;
+  m.res_length = 0;
+
+  /* Preparations. Set m.res and make sure the operands have the same
+   * form. This can do up to three multiset copies that could be
+   * optimized away, but that'd lead to quite a bit of extra code and
+   * those situations are so unusual it's not worth bothering
+   * about. */
+  if (operation & PIKE_MERGE_DESTR_A) {
+#ifdef PIKE_DEBUG
+    if (a->refs != 1)
+      fatal ("Not safe to do destructive merge with several refs to the operand.\n");
+#endif
+    m.res = m.a = a;
+    if (a == b)
+      /* Can't handle the result being the same as both a and b. */
+      m.b = m.tmp = copy_multiset (b);
+    else m.b = b;
+    /* Can't handle a shared data block even though there might be no
+     * change in it, since the merge always relinks the tree. */
+    prepare_for_change (m.res, got_node_refs = m.res->node_refs);
+  }
+  else {
+    int newsize;
+    if (operation & (PIKE_ARRAY_OP_A << 8)) newsize = multiset_sizeof (a);
+    else newsize = 0;
+    if (operation & PIKE_ARRAY_OP_B) newsize += multiset_sizeof (b);
+    m.res = allocate_multiset (newsize, a->msd->flags, &a->msd->cmp_less);
+    m.a = a, m.b = b;
+    got_node_refs = 0;
+  }
+  if (!SAME_CMP_LESS (a->msd, b->msd)) {
+    if (!m.tmp) m.b = m.tmp = copy_multiset (b);
+    multiset_set_cmp_less (m.b, &a->msd->cmp_less);
+  }
+  if ((a->msd->flags & MULTISET_INDVAL) != (b->msd->flags & MULTISET_INDVAL)) {
+    if (!m.tmp) m.b = m.tmp = copy_multiset (b);
+    multiset_set_flags (m.b, a->msd->flags);
+  }
+
+  indval = m.res->msd->flags & MULTISET_INDVAL;
+  ind_types = val_types = 0;
+  if (m.res == a) m.rd.a_msd = NULL;
+  else add_ref (m.rd.a_msd = m.a->msd);
+  add_ref (m.rd.b_msd = m.b->msd);
+  add_msnode_ref (m.res);
+  SET_ONERROR (uwp, cleanup_merge_data, &m);
+
+#define ALLOC_RES_NODE(RES, RES_MSD, NEW_NODE)				\
+  do {									\
+    union msnode *node;							\
+    if (prepare_for_add (RES, 1)) {					\
+      merge_shift_ptrs (RES_MSD, (RES)->msd, &m);			\
+      (RES_MSD) = (RES)->msd;						\
+    }									\
+    ALLOC_MSNODE (RES_MSD, (RES)->node_refs, node);			\
+    NEW_NODE = HDR (node);						\
+  } while (0)
+
+#define UNLINK_RES_NODE(RES_MSD, RES_LIST, GOT_NODE_REFS, NODE)		\
+  do {									\
+    ADD_TO_FREE_LIST (RES_MSD, RBNODE (NODE));				\
+    if (GOT_NODE_REFS) {						\
+      RBNODE (NODE)->i.ind.type = T_DELETED;				\
+      /* Knowledge that LOW_RB_MERGE traverses the trees backwards */	\
+      /* is used here. */						\
+      RBNODE (NODE)->i.ind.u.ptr = RES_LIST;				\
+      RBNODE (NODE)->i.prev =						\
+	(struct msnode_ind *) low_multiset_prev (RBNODE (NODE));	\
+    }									\
+    else {								\
+      RBNODE (NODE)->i.ind.type = PIKE_T_UNKNOWN;			\
+      RBNODE (NODE)->i.prev = NULL;					\
+      RES_MSD->size--;							\
+    }									\
+  } while (0)
+
+  if (m.res->msd->cmp_less.type == T_INT) {
+    struct multiset_data *res_msd = m.res->msd;
+    struct svalue a_ind, b_ind;
+    m.a_node = HDR (m.a->msd->root), m.b_node = HDR (m.rd.b_msd->root);
+
+    if (m.rd.a_msd)		/* Not destructive on a. */
+      LOW_RB_MERGE (
+	ic_nd, m.a_node, m.b_node,
+	m.res_list, m.res_length, operation,
+
+	{
+	  low_use_multiset_index (RBNODE (m.a_node), a_ind);
+	  if (IS_DESTRUCTED (&a_ind)) goto ic_nd_free_a;
+	}, {
+	  low_use_multiset_index (RBNODE (m.b_node), b_ind);
+	  if (IS_DESTRUCTED (&b_ind)) goto ic_nd_free_b;
+	},
+
+	INTERNAL_CMP (&a_ind, &b_ind, cmp_res);,
+
+	{			/* Copy m.a_node. */
+	  ALLOC_RES_NODE (m.res, res_msd, new_node);
+	  assign_svalue_no_free (&RBNODE (new_node)->i.ind, &a_ind);
+	  ind_types |= 1 << a_ind.type;
+	  DO_IF_DEBUG (RBNODE (new_node)->i.ind.type |= MULTISET_FLAG_MARKER);
+	  if (indval) {
+	    assign_svalue_no_free (&RBNODE (new_node)->iv.val,
+				   &RBNODE (m.a_node)->iv.val);
+	    val_types |= 1 << RBNODE (m.a_node)->iv.val.type;
+	  }
+	}, {			/* Free m.a_node. */
+	},
+
+	{			/* Copy m.b_node. */
+	  ALLOC_RES_NODE (m.res, res_msd, new_node);
+	  assign_svalue_no_free (&RBNODE (new_node)->i.ind, &b_ind);
+	  ind_types |= 1 << b_ind.type;
+	  DO_IF_DEBUG (RBNODE (new_node)->i.ind.type |= MULTISET_FLAG_MARKER);
+	  if (indval) {
+	    assign_svalue_no_free (&RBNODE (new_node)->iv.val,
+				   &RBNODE (m.b_node)->iv.val);
+	    val_types |= 1 << RBNODE (m.b_node)->iv.val.type;
+	  }
+	}, {			/* Free m.b_node. */
+	});
+
+    else			/* Destructive on a. */
+      LOW_RB_MERGE (
+	ic_da, m.a_node, m.b_node,
+	m.res_list, m.res_length, operation,
+
+	{
+	  low_use_multiset_index (RBNODE (m.a_node), a_ind);
+	  if (IS_DESTRUCTED (&a_ind)) goto ic_da_free_a;
+	}, {
+	  low_use_multiset_index (RBNODE (m.b_node), b_ind);
+	  if (IS_DESTRUCTED (&b_ind)) goto ic_da_free_b;
+	},
+
+	INTERNAL_CMP (&a_ind, &b_ind, cmp_res);,
+
+	{			/* Copy m.a_node. */
+	  new_node = m.a_node;
+	  ind_types |= 1 << a_ind.type;
+	  if (indval) val_types |= 1 << RBNODE (m.a_node)->iv.val.type;
+	}, {			/* Free m.a_node. */
+	  free_svalue (&a_ind);
+	  if (indval) free_svalue (&RBNODE (m.a_node)->iv.val);
+	  UNLINK_RES_NODE (res_msd, m.res_list, got_node_refs, m.a_node);
+	},
+
+	{			/* Copy m.b_node. */
+	  ALLOC_RES_NODE (m.res, res_msd, new_node);
+	  assign_svalue_no_free (&RBNODE (new_node)->i.ind, &b_ind);
+	  ind_types |= 1 << b_ind.type;
+	  DO_IF_DEBUG (RBNODE (new_node)->i.ind.type |= MULTISET_FLAG_MARKER);
+	  if (indval) {
+	    assign_svalue_no_free (&RBNODE (new_node)->iv.val,
+				   &RBNODE (m.b_node)->iv.val);
+	    val_types |= 1 << RBNODE (m.b_node)->iv.val.type;
+	  }
+	}, {			/* Free m.b_node. */
+	});
+  }
+
+  else {
+    struct svalue *cmp_less = &m.res->msd->cmp_less;
+    struct multiset_data *res_msd = m.res->msd;
+    struct svalue a_ind, b_ind;
+
+    fatal ("FIXME: Merge of multisets with external sort function "
+	   "not yet implemented.\n");
+
+    LOW_RB_MERGE (
+      ec, m.a_node, m.b_node,
+      m.res_list, m.res_length, operation,
+
+      {
+	low_use_multiset_index (RBNODE (m.a_node), a_ind);
+	if (IS_DESTRUCTED (&a_ind)) goto ec_free_a;
+      }, {
+	low_use_multiset_index (RBNODE (m.b_node), b_ind);
+	if (IS_DESTRUCTED (&b_ind)) goto ec_free_b;
+      },
+
+      {
+	push_svalue (&a_ind);
+	push_svalue (&b_ind);
+	EXTERNAL_CMP (cmp_less);
+	cmp_res = IS_ZERO (sp - 1) ? 0 : -1;
+	pop_stack();
+	if (!cmp_res) {
+	  push_svalue (&b_ind);
+	  push_svalue (&a_ind);
+	  EXTERNAL_CMP (cmp_less);
+	  cmp_res = IS_ZERO (sp - 1) ? 0 : 1;
+	  pop_stack();
+	  if (!cmp_res) {
+	    /* The values are orderwise equal. Have to check if there
+	     * is an orderwise equal sequence in either operand, since
+	     * we must do an array-like merge between them in that
+	     * case. Knowledge that LOW_RB_MERGE traverses the trees
+	     * backwards is used here. */
+	    /* FIXME */
+	  }
+	}
+      },
+
+      {				/* Copy m.a_node. */
+	if (m.rd.a_msd) {
+	  ALLOC_RES_NODE (m.res, res_msd, new_node);
+	  assign_svalue_no_free (&RBNODE (new_node)->i.ind, &a_ind);
+	  ind_types |= 1 << a_ind.type;
+	  DO_IF_DEBUG (RBNODE (new_node)->i.ind.type |= MULTISET_FLAG_MARKER);
+	  if (indval) {
+	    assign_svalue_no_free (&RBNODE (new_node)->iv.val,
+				   &RBNODE (m.a_node)->iv.val);
+	    val_types |= 1 << RBNODE (m.a_node)->iv.val.type;
+	  }
+	}
+	else
+	  new_node = m.a_node;
+      }, {			/* Free m.a_node. */
+	if (m.rd.a_msd) {}
+	else {
+	  free_svalue (&a_ind);
+	  if (indval) free_svalue (&RBNODE (m.a_node)->iv.val);
+	  UNLINK_RES_NODE (res_msd, m.res_list, got_node_refs, m.a_node);
+	}
+      },
+
+      {				/* Copy m.b_node. */
+	ALLOC_RES_NODE (m.res, res_msd, new_node);
+	assign_svalue_no_free (&RBNODE (new_node)->i.ind, &b_ind);
+	ind_types |= 1 << b_ind.type;
+	DO_IF_DEBUG (RBNODE (new_node)->i.ind.type |= MULTISET_FLAG_MARKER);
+	if (indval) {
+	  assign_svalue_no_free (&RBNODE (new_node)->iv.val,
+				 &RBNODE (m.b_node)->iv.val);
+	  val_types |= 1 << RBNODE (m.b_node)->iv.val.type;
+	}
+      }, {			/* Free m.b_node. */
+      });
+  }
+
+#undef ALLOC_RES_NODE
+#undef UNLINK_RES_NODE
+
+#ifdef PIKE_DEBUG
+  if (operation & PIKE_MERGE_DESTR_A) {
+    if (a->refs != 1)
+      fatal ("First operand grew external refs during destructive merge.\n");
+    if (a->msd->refs > 1)
+      fatal ("Data block of first operand grew external refs "
+	     "during destructive merge.\n");
+  }
+#endif
+
+  UNSET_ONERROR (uwp);
+  m.res->msd->root = RBNODE (rb_make_tree (m.res_list, m.res_length));
+  m.res->msd->ind_types = ind_types;
+  if (indval) m.res->msd->val_types = val_types;
+  if (m.tmp) free_multiset (m.tmp);
+  if (m.rd.a_msd && !sub_ref (m.rd.a_msd)) free_multiset_data (m.rd.a_msd);
+  if (!sub_ref (m.rd.b_msd)) free_multiset_data (m.rd.b_msd);
+#ifdef PIKE_DEBUG
+  if (d_flag > 1) check_multiset (m.res);
+#endif
+  sub_msnode_ref (m.res);	/* Tries to shrink m.res->msd. */
+  return m.res;
+}
+
+/* The result has values iff any argument has values. The order is
+ * taken from the first argument. No weak flags are propagated to
+ * the result. */
+PMOD_EXPORT struct multiset *add_multisets (struct svalue *vect, int count)
+{
+  struct multiset *res, *l;
+  int size = 0, idx, indval = 0;
+  struct svalue *cmp_less = count ? &vect[0].u.multiset->msd->cmp_less : NULL;
+  ONERROR uwp;
+
+  for (idx = 0; idx < count; idx++) {
+    struct multiset *l = vect[idx].u.multiset;
+    debug_malloc_touch (l);
+    debug_malloc_touch (l->msd);
+    size += multiset_sizeof (l);
+    if (!indval && l->msd->flags & MULTISET_INDVAL) indval = 1;
+  }
+
+  if (!size) return allocate_multiset (0, indval && MULTISET_INDVAL, cmp_less);
+
+  for (idx = 0;; idx++) {
+    l = vect[idx].u.multiset;
+    if (l->msd->root) break;
+  }
+
+  if (indval == !!(l->msd->flags & MULTISET_INDVAL) &&
+      (cmp_less ? is_identical (cmp_less, &l->msd->cmp_less) :
+       l->msd->cmp_less.type == T_INT)) {
+    res = copy_multiset (l);
+    multiset_set_flags (res, indval && MULTISET_INDVAL);
+    idx++;
+  }
+  else
+    res = allocate_multiset (size, indval && MULTISET_INDVAL, cmp_less);
+  SET_ONERROR (uwp, do_free_multiset, res);
+
+  for (; idx < count; idx++)
+    /* FIXME: This is inefficient as long as merge_multisets
+     * always is linear. */
+    merge_multisets (res, vect[idx].u.multiset,
+		     PIKE_MERGE_DESTR_A | PIKE_ARRAY_OP_ADD);
+
+  UNSET_ONERROR (uwp);
+  return res;
+}
+
+/* Differences in the weak flags are ignored, but not the order
+ * function and whether there are values or not. The order is always
+ * significant, even in the parts of the multisets where the order
+ * function doesn't define it. */
+PMOD_EXPORT int multiset_equal_p (struct multiset *a, struct multiset *b,
+				  struct processing *p)
+{
+  struct processing curr;
+  struct recovery_data rd;
+  union msnode *a_node, *b_node;
+  struct svalue a_ind, b_ind;
+  int res;
+  ONERROR uwp;
+
+  debug_malloc_touch (a);
+  debug_malloc_touch (a->msd);
+
+  if (a == b) return 1;
+
+  debug_malloc_touch (b);
+  debug_malloc_touch (b->msd);
+
+  check_multiset_for_destruct (a);
+  check_multiset_for_destruct (b);
+
+  rd.a_msd = a->msd, rd.b_msd = b->msd;
+
+  if (multiset_sizeof (a) != multiset_sizeof (b) ||
+      (rd.a_msd->flags & MULTISET_INDVAL) != (rd.b_msd->flags & MULTISET_INDVAL) ||
+      !SAME_CMP_LESS (rd.a_msd, rd.b_msd))
+    return 0;
+
+  if (!rd.a_msd->root && !rd.b_msd->root)
+    return 1;
+
+  curr.pointer_a = (void *) a;
+  curr.pointer_b = (void *) b;
+  curr.next = p;
+
+  for (; p; p = p->next)
+    if (p->pointer_a == (void *) a && p->pointer_b == (void *) b)
+      return 1;
+
+  add_ref (rd.a_msd);
+  add_ref (rd.b_msd);
+  SET_ONERROR (uwp, free_recovery_data, &rd);
+  a_node = low_multiset_first (rd.a_msd);
+  b_node = low_multiset_first (rd.b_msd);
+  res = 1;
+
+#define WITH_NODES_BLOCK(TYPE, OTHERTYPE, IND, INDVAL)			\
+  do {									\
+    if (INDVAL (							\
+	  !low_is_equal (&a_node->iv.val, &b_node->iv.val, &curr) ||	\
+	)								\
+	!low_is_equal (low_use_multiset_index (a_node, a_ind),		\
+		       low_use_multiset_index (b_node, b_ind),		\
+		       &curr)) {					\
+      res = 0;								\
+      break;								\
+    }									\
+    a_node = low_multiset_next (a_node);				\
+    b_node = low_multiset_next (b_node);				\
+  } while (a_node);
+
+  DO_WITH_NODES (rd.a_msd);
+
+#undef WITH_NODES_BLOCK
+
+  UNSET_ONERROR (uwp);
+  if (!sub_ref (rd.a_msd)) free_multiset_data (rd.a_msd);
+  if (!sub_ref (rd.b_msd)) free_multiset_data (rd.b_msd);
+  return res;
+}
+
+void describe_multiset (struct multiset *l, struct processing *p, int indent)
+{
+  struct processing curr;
+  struct multiset_data *msd;
+  int depth;
+
+  debug_malloc_touch (l);
+  debug_malloc_touch (l->msd);
+
+  curr.pointer_a = (void *) l;
+  curr.next = p;
+
+  for (depth = 0; p; p = p->next, depth++)
+    if (p->pointer_a == (void *) l) {
+      char buf[20];
+      sprintf (buf, "@%d", depth);
+      my_strcat (buf);
+      return;
+    }
+
+  check_multiset_for_destruct (l);
+  msd = l->msd;
+
+  if (!msd->root)
+    my_strcat ("(< >)");
+  else {
+    union msnode *node;
+    struct svalue ind;
+    INT32 size = multiset_sizeof (l);
+    int notfirst = 0;
+    ONERROR uwp;
+
+    if (size == 1)
+      my_strcat ("(< /* 1 element */\n");
+    else {
+      char buf[40];
+      sprintf (buf, "(< /* %ld elements */\n", (long) size);
+      my_strcat (buf);
+    }
+
+    indent += 2;
+    add_ref (msd);
+    SET_ONERROR (uwp, free_indirect_multiset_data, &msd);
+    node = low_multiset_first (msd);
+
+#define WITH_NODES_BLOCK(TYPE, OTHERTYPE, IND, INDVAL)			\
+    do {								\
+      if (notfirst) my_strcat (",\n");					\
+      else notfirst = 1;						\
+									\
+      for (depth = 2; depth < indent; depth++) my_putchar (' ');	\
+      low_use_multiset_index (node, ind);				\
+      describe_svalue (&ind, indent, &curr);				\
+									\
+      INDVAL (								\
+	my_putchar (':');						\
+	describe_svalue (&node->iv.val, indent, &curr);			\
+      );								\
+    } while ((node = low_multiset_next (node)));
+
+    DO_WITH_NODES (msd);
+
+#undef WITH_NODES_BLOCK
+
+    my_putchar ('\n');
+    for (depth = 4; depth < indent; depth++) my_putchar (' ');
+    my_strcat (">)");
+
+    UNSET_ONERROR (uwp);
+    if (!sub_ref (msd)) free_multiset_data (msd);
+  }
+}
+
+void simple_describe_multiset (struct multiset *l)
+{
+  char *desc;
+  init_buf();
+  describe_multiset (l, NULL, 2);
+  desc = simple_free_buf();
+  fprintf (stderr, "%s\n", desc);
+  free (desc);
+}
+
+int multiset_is_constant (struct multiset *l, struct processing *p)
+{
+  struct multiset_data *msd = l->msd;
+  int res = 1;
+
+  debug_malloc_touch (l);
+  debug_malloc_touch (msd);
+
+  if (msd->root &&
+      (msd->ind_types | msd->val_types) & ~(BIT_INT|BIT_FLOAT|BIT_STRING)) {
+    union msnode *node = low_multiset_first (msd);
+    struct svalue ind;
+    add_ref (msd);
+
+#define WITH_NODES_BLOCK(TYPE, OTHERTYPE, IND, INDVAL)			\
+    do {								\
+      if (INDVAL (							\
+	    !svalues_are_constant (&node->iv.val, 1, msd->val_types, p) || \
+	  )								\
+	  !svalues_are_constant (low_use_multiset_index (node, ind),	\
+				 1, msd->ind_types, p)) {		\
+	res = 0;							\
+	break;								\
+      }									\
+    } while ((node = low_multiset_next (node)));
+
+    DO_WITH_NODES (msd);
+
+#undef WITH_NODES_BLOCK
+
+    sub_extra_ref (msd);
+    assert (msd->refs);
+  }
+
+  return res;
+}
+
+node *make_node_from_multiset (struct multiset *l)
+{
+  debug_malloc_touch (l);
+  debug_malloc_touch (l->msd);
+
+  multiset_fix_type_field (l);
+
+  if (multiset_is_constant (l, NULL)) {
+    struct svalue s;
+    if (!l->msd->root) return mkefuncallnode ("aggregate_multiset", NULL);
+    s.type = T_MULTISET;
+    s.subtype = 0;
+    s.u.multiset = l;
+    return mkconstantsvaluenode (&s);
+  }
+
+  else {
+    struct array *ind = multiset_range_indices (l, -1, -1);
+    node *n;
+
+#ifdef PIKE_DEBUG
+    if (l->msd->cmp_less.type != T_INT)
+      fatal ("Didn't expect multiset with custom order function.\n");
+    if (l->msd->flags & MULTISET_WEAK)
+      fatal ("Didn't expect multiset with weak flag(s).\n");
+#endif
+
+    if (l->msd->flags & MULTISET_INDVAL) {
+      struct array *val = multiset_range_values (l, -1, -1);
+      n = mkefuncallnode ("mkmultiset",
+			  mknode (F_ARG_LIST,
+				  make_node_from_array (ind),
+				  make_node_from_array (val)));
+      free_array (val);
+    }
+    else
+      n = mkefuncallnode ("mkmultiset", make_node_from_array (ind));
+
+    free_array (ind);
+    return n;
+  }
+}
+
+/*! @decl multiset aggregate_multiset(mixed ... elems)
+ *!
+ *! Construct a multiset with the arguments as indices. The multiset
+ *! will not contain any values. This method is most useful when
+ *! constructing multisets with @[map] or similar; generally, the
+ *! multiset literal syntax is handier: @code{(<elem1, elem2, ...>)@}
+ *! With it, it's also possible to construct a multiset with values:
+ *! @code{(<index1: value1, index2: value2, ...>)@}
+ *!
+ *! @seealso
+ *!   @[sizeof()], @[multisetp()], @[mkmultiset()]
+ */
+PMOD_EXPORT void f_aggregate_multiset (INT32 args)
+{
+  f_aggregate (args);
+  push_multiset (mkmultiset_2 (sp[-1].u.array, NULL, NULL));
+  free_array (sp[-2].u.array);
+  sp[-2] = *--sp;
+}
+
+struct multiset *copy_multiset_recursively (struct multiset *l,
+					    struct processing *p)
+{
+  struct processing curr;
+  struct tree_build_data new;
+  struct multiset_data *msd = l->msd;
+  union msnode *node;
+  int pos;
+  struct svalue ind;
+  TYPE_FIELD ind_types, val_types;
+  ONERROR uwp;
+
+  debug_malloc_touch (l);
+  debug_malloc_touch (msd);
+
+  curr.pointer_a = (void *) l;
+  curr.next = p;
+
+  for (; p; p = p->next)
+    if (p->pointer_a == (void *) l) {
+      add_ref ((struct multiset *) p->pointer_b);
+      return (struct multiset *) p->pointer_b;
+    }
+
+#ifdef PIKE_DEBUG
+  if (d_flag > 1) check_multiset_type_fields (l);
+#endif
+
+  if (!msd->root || !((msd->ind_types | msd->val_types) & BIT_COMPLEX))
+    return copy_multiset (l);
+
+  /* Use a dummy empty msd temporarily in the new multiset, since the
+   * real one is not suitable for general consumption while it's being
+   * built below. This will have the effect that any changes in the
+   * multiset made by other code during the build will change the
+   * dummy msd and will thus be lost afterwards. */
+  new.l = allocate_multiset (0, msd->flags, &msd->cmp_less);
+  new.msd = low_alloc_multiset_data (multiset_sizeof (l), msd->flags);
+  assign_svalue_no_free (&new.msd->cmp_less, &msd->cmp_less);
+  new.node = NULL;
+  pos = 0;
+  ind_types = val_types = 0;
+  curr.pointer_b = (void *) new.l;
+  add_ref (new.msd2 = msd);
+  node = low_multiset_first (msd);
+  SET_ONERROR (uwp, free_tree_build_data, &new);
+
+#define WITH_NODES_BLOCK(TYPE, OTHERTYPE, IND, INDVAL)			\
+  struct TYPE *new_node = NODE_AT (new.msd, TYPE, 0);			\
+  new.list = (union msnode *) new_node;					\
+  while (1) {								\
+    new_node->next = NULL;						\
+    new_node->ind.type = T_INT;						\
+    INDVAL (new_node->val.type = T_INT);				\
+									\
+    copy_svalues_recursively_no_free (&new_node->ind,			\
+				      low_use_multiset_index (node, ind), \
+				      1, &curr);			\
+    ind_types |= 1 << new_node->ind.type;				\
+    DO_IF_DEBUG (new_node->ind.type |= MULTISET_FLAG_MARKER);		\
+    INDVAL (								\
+      copy_svalues_recursively_no_free (&new_node->val, &node->iv.val,	\
+					1, &curr);			\
+      val_types |= 1 << new_node->val.type;				\
+    );									\
+									\
+    if (!(node = low_multiset_next (node))) break;			\
+    new_node->next = NODE_AT (new.msd, TYPE, ++pos);			\
+    new_node = new_node->next;						\
+  }									\
+  new.msd->ind_types = ind_types;					\
+  INDVAL (new.msd->val_types = val_types);
+
+  DO_WITH_NODES (msd);
+
+#undef WITH_NODES_BLOCK
+
+  UNSET_ONERROR (uwp);
+  if (!sub_ref (msd)) free_multiset_data (msd);
+  assert (!new.msd->refs);
+
+  new.msd->size = ++pos;
+  fix_free_list (new.msd, pos);
+  new.msd->root = RBNODE (rb_make_tree (HDR (new.list), pos));
+  if (!sub_ref (new.l->msd)) free_multiset_data (new.l->msd);
+  add_ref (new.l->msd = new.msd);
+
+  return new.l;
+}
+
+/* Does not handle n being too large. */
+PMOD_EXPORT ptrdiff_t multiset_get_nth (struct multiset *l, size_t n)
+{
+  add_msnode_ref (l);
+  return MSNODE2OFF (l->msd, RBNODE (rb_get_nth (HDR (l->msd->root), n)));
+}
+
+#define GC_MSD_GOT_NODE_REFS GC_USER_1
+#define GC_MSD_VISITED GC_USER_2
+
+#ifdef PIKE_DEBUG
+unsigned gc_touch_all_multisets (void)
+{
+  unsigned n = 0;
+  struct multiset *l;
+  if (first_multiset && first_multiset->prev)
+    fatal ("Error in multiset link list.\n");
+  for (l = first_multiset; l; l = l->next) {
+    debug_gc_touch (l);
+    n++;
+    if (l->next && l->next->prev != l)
+      fatal ("Error in multiset link list.\n");
+  }
+  return n;
+}
+#endif
+
+void gc_check_all_multisets (void)
+{
+  struct multiset *l;
+
+  /* Loop twice: First to get the number of internal refs to the msd:s
+   * right, and then again to check the svalues in them correctly.
+   * This is necessary since we need to know if an msd got external
+   * direct refs to avoid checking its svalues as weak. */
+
+  for (l = first_multiset; l; l = l->next) {
+    struct multiset_data *msd = l->msd;
+
+#ifdef DEBUG_MALLOC
+    if (((int) msd) == 0x55555555) {
+      fprintf (stderr, "** Zapped multiset in list of active multisets.\n");
+      describe_something (l, T_MULTISET, 0, 2, 0, NULL);
+      fatal ("Zapped multiset in list of active multisets.\n");
+    }
+#endif
+#ifdef PIKE_DEBUG
+    if (d_flag > 1) check_multiset (l);
+#endif
+
+    debug_gc_check2 (msd, T_MULTISET, l, " as multiset data block of a multiset");
+  }
+
+  for (l = first_multiset; l; l = l->next) {
+    struct multiset_data *msd = l->msd;
+    struct marker *m = get_marker (msd);
+
+    if (!(m->flags & GC_MSD_VISITED)) {
+      if (msd->root) {
+	union msnode *node = low_multiset_first (msd);
+	struct svalue ind;
+
+#define WITH_NODES_BLOCK(TYPE, OTHERTYPE, IND, INDVAL)			\
+	if (!(msd->flags & MULTISET_WEAK) || m->refs < msd->refs)	\
+	  do {								\
+	    low_use_multiset_index (node, ind);				\
+	    debug_gc_check_svalues (&ind, 1, T_MULTISET, l);		\
+	    INDVAL (debug_gc_check_svalues (				\
+		      &node->iv.val, 1, T_MULTISET, l));		\
+	  } while ((node = low_multiset_next (node)));			\
+									\
+	else {								\
+	  switch (msd->flags & MULTISET_WEAK) {				\
+	    case MULTISET_WEAK_INDICES:					\
+	      do {							\
+		low_use_multiset_index (node, ind);			\
+		debug_gc_check_weak_svalues (&ind, 1, T_MULTISET, l);	\
+		INDVAL (debug_gc_check_svalues (			\
+			  &node->iv.val, 1, T_MULTISET, l));		\
+	      } while ((node = low_multiset_next (node)));		\
+	      break;							\
+									\
+	    case MULTISET_WEAK_VALUES:					\
+	      do {							\
+		low_use_multiset_index (node, ind);			\
+		debug_gc_check_svalues (&ind, 1, T_MULTISET, l);	\
+		INDVAL (debug_gc_check_weak_svalues (			\
+			  &node->iv.val, 1, T_MULTISET, l));		\
+	      } while ((node = low_multiset_next (node)));		\
+	      break;							\
+									\
+	    default:							\
+	      do {							\
+		low_use_multiset_index (node, ind);			\
+		debug_gc_check_weak_svalues (&ind, 1, T_MULTISET, l);	\
+		INDVAL (debug_gc_check_weak_svalues (			\
+			  &node->iv.val, 1, T_MULTISET, l));		\
+	      } while ((node = low_multiset_next (node)));		\
+	      break;							\
+	  }								\
+	  gc_checked_as_weak (msd);					\
+	}
+
+	DO_WITH_NODES (msd);
+
+#undef WITH_NODES_BLOCK
+      }
+
+      if (l->node_refs) m->flags |= GC_MSD_GOT_NODE_REFS | GC_MSD_VISITED;
+      else m->flags |= GC_MSD_VISITED;
+    }
+  }
+}
+
+static void gc_unlink_msnode_shared (struct multiset_data *msd,
+				     struct rbstack_ptr *track,
+				     int got_node_refs)
+{
+  struct rbstack_ptr rbstack = *track;
+  union msnode *unlinked_node;
+
+  /* Note: Similar code in unlink_msnode. */
+
+  if (got_node_refs) {
+    union msnode *prev, *next;
+    unlinked_node = RBNODE (RBSTACK_PEEK (rbstack));
+    prev = low_multiset_prev (unlinked_node);
+    next = low_multiset_next (unlinked_node);
+    low_rb_unlink_without_move (PHDR (&msd->root), &rbstack, 1);
+    ADD_TO_FREE_LIST (msd, unlinked_node);
+    unlinked_node->i.ind.type = T_DELETED;
+    unlinked_node->i.prev = (struct msnode_ind *) prev;
+    unlinked_node->i.ind.u.ptr = next;
+  }
+
+  else {
+    unlinked_node =
+      RBNODE (low_rb_unlink_with_move (
+		PHDR (&msd->root), &rbstack, 1,
+		msd->flags & MULTISET_INDVAL ?
+		sizeof (struct msnode_indval) : sizeof (struct msnode_ind)));
+    ADD_TO_FREE_LIST (msd, unlinked_node);
+    unlinked_node->i.ind.type = PIKE_T_UNKNOWN;
+    unlinked_node->i.prev = NULL;
+    msd->size--;
+  }
+
+  *track = rbstack;
+}
+
+#define GC_RECURSE_MSD_IN_USE(MSD, RECURSE_FN, IND_TYPES, VAL_TYPES) do { \
+    union msnode *node = low_multiset_first (MSD);			\
+    IND_TYPES = msd->ind_types;						\
+    if (node) {								\
+      struct svalue ind;						\
+									\
+      if (msd->flags & MULTISET_INDVAL)					\
+	do {								\
+	  low_use_multiset_index (node, ind);				\
+	  if (!IS_DESTRUCTED (&ind) && RECURSE_FN (&ind, 1)) {		\
+	    DO_IF_DEBUG (fatal ("Didn't expect an svalue zapping now.\n")); \
+	  }								\
+	  RECURSE_FN (&node->iv.val, 1);				\
+	  VAL_TYPES |= 1 << node->iv.val.type;				\
+	} while ((node = low_multiset_next (node)));			\
+									\
+      else								\
+	do {								\
+	  low_use_multiset_index (node, ind);				\
+	  if (!IS_DESTRUCTED (&ind) && RECURSE_FN (&ind, 1)) {		\
+	    DO_IF_DEBUG (fatal ("Didn't expect an svalue zapping now.\n")); \
+	  }								\
+	} while ((node = low_multiset_next (node)));			\
+    }									\
+  } while (0)
+
+/* This macro assumes that the msd isn't "in use", i.e. there are no
+ * external references directly to it. In that case we can zap svalues
+ * in it even if the mapping_data block is shared. */
+#define GC_RECURSE(MSD, GOT_NODE_REFS, REC_NODE_I, REC_NODE_IV, TYPE,	\
+		   IND_TYPES, VAL_TYPES) do {				\
+    struct rb_node_hdr *node = HDR (MSD->root);				\
+    if (node) {								\
+      struct svalue ind;						\
+      int remove;							\
+      RBSTACK_INIT (rbstack);						\
+      LOW_RB_TRACK_FIRST (rbstack, node);				\
+									\
+      if (msd->flags & MULTISET_INDVAL)					\
+	do {								\
+	  low_use_multiset_index (RBNODE (node), ind);			\
+	  REC_NODE_IV ((&ind), (&RBNODE (node)->iv.val),		\
+		       remove,						\
+		       PIKE_CONCAT (TYPE, _svalues),			\
+		       PIKE_CONCAT (TYPE, _weak_svalues),		\
+		       PIKE_CONCAT (TYPE, _without_recurse),		\
+		       PIKE_CONCAT (TYPE, _weak_without_recurse));	\
+	  if (remove) {							\
+	    gc_unlink_msnode_shared (MSD, &rbstack, GOT_NODE_REFS);	\
+	    node = RBSTACK_PEEK (rbstack);				\
+	  }								\
+	  else {							\
+	    IND_TYPES |= 1 << ind.type;					\
+	    VAL_TYPES |= 1 << RBNODE (node)->iv.val.type;		\
+	    LOW_RB_TRACK_NEXT (rbstack, node);				\
+	  }								\
+	} while (node);							\
+									\
+      else								\
+	do {								\
+	  low_use_multiset_index (RBNODE (node), ind);			\
+	  REC_NODE_I ((&ind),						\
+		      remove,						\
+		      PIKE_CONCAT (TYPE, _svalues),			\
+		      PIKE_CONCAT (TYPE, _weak_svalues));		\
+	  if (remove) {							\
+	    gc_unlink_msnode_shared (MSD, &rbstack, GOT_NODE_REFS);	\
+	    node = RBSTACK_PEEK (rbstack);				\
+	  }								\
+	  else {							\
+	    IND_TYPES |= 1 << ind.type;					\
+	    LOW_RB_TRACK_NEXT (rbstack, node);				\
+	  }								\
+	} while (node);							\
+    }									\
+  } while (0)
+
+#define GC_REC_I_WEAK_NONE(IND, REMOVE, N_REC, W_REC) do {		\
+    REMOVE = N_REC (IND, 1);						\
+  } while (0)
+
+#define GC_REC_I_WEAK_IND(IND, REMOVE, N_REC, W_REC) do {		\
+    REMOVE = W_REC (IND, 1);						\
+  } while (0)
+
+#define GC_REC_IV_WEAK_NONE(IND, VAL, REMOVE, N_REC, W_REC, N_TST, W_TST) do { \
+    if ((REMOVE = N_REC (IND, 1)))					\
+      gc_free_svalue (VAL);						\
+    else								\
+      N_REC (VAL, 1);							\
+  } while (0)
+
+#define GC_REC_IV_WEAK_IND(IND, VAL, REMOVE, N_REC, W_REC, N_TST, W_TST) do { \
+    if ((REMOVE = W_REC (IND, 1)))					\
+      gc_free_svalue (VAL);						\
+    else								\
+      N_REC (VAL, 1);							\
+  } while (0)
+
+#define GC_REC_IV_WEAK_VAL(IND, VAL, REMOVE, N_REC, W_REC, N_TST, W_TST) do { \
+    if ((REMOVE = N_TST (IND))) /* Don't recurse now. */		\
+      gc_free_svalue (VAL);						\
+    else if ((REMOVE = W_REC (VAL, 1)))					\
+      gc_free_svalue (IND);						\
+    else								\
+      N_REC (IND, 1); /* Now we can recurse the index. */		\
+  } while (0)
+
+#define GC_REC_IV_WEAK_BOTH(IND, VAL, REMOVE, N_REC, W_REC, N_TST, W_TST) do { \
+    if ((REMOVE = W_TST (IND))) /* Don't recurse now. */		\
+      gc_free_svalue (VAL);						\
+    else if ((REMOVE = W_REC (VAL, 1)))					\
+      gc_free_svalue (IND);						\
+    else								\
+      W_REC (IND, 1); /* Now we can recurse the index. */		\
+  } while (0)
+
+void gc_mark_multiset_as_referenced (struct multiset *l)
+{
+  if (gc_mark (l)) {
+    struct multiset_data *msd = l->msd;
+
+    if (l == gc_mark_multiset_pos)
+      gc_mark_multiset_pos = l->next;
+    if (l == gc_internal_multiset)
+      gc_internal_multiset = l->next;
+    else {
+      DOUBLEUNLINK (first_multiset, l);
+      DOUBLELINK (first_multiset, l); /* Linked in first. */
+    }
+
+    if (gc_mark (msd) && msd->root &&
+	((msd->ind_types | msd->val_types) & BIT_COMPLEX)) {
+      struct marker *m = get_marker (msd);
+      TYPE_FIELD ind_types = 0, val_types = 0;
+
+      if (m->refs < msd->refs) {
+	/* Must leave the multiset data untouched if there are direct
+	 * external refs to it. */
+	GC_RECURSE_MSD_IN_USE (msd, gc_mark_svalues, ind_types, val_types);
+	gc_assert_checked_as_nonweak (msd);
+      }
+
+      else {
+	switch (msd->flags & MULTISET_WEAK) {
+	  case 0:
+	    GC_RECURSE (msd, m->flags & GC_MSD_GOT_NODE_REFS,
+			GC_REC_I_WEAK_NONE, GC_REC_IV_WEAK_NONE,
+			gc_mark, ind_types, val_types);
+	    gc_assert_checked_as_nonweak (msd);
+	    break;
+	  case MULTISET_WEAK_INDICES:
+	    GC_RECURSE (msd, m->flags & GC_MSD_GOT_NODE_REFS,
+			GC_REC_I_WEAK_IND, GC_REC_IV_WEAK_IND,
+			gc_mark, ind_types, val_types);
+	    gc_assert_checked_as_weak (msd);
+	    break;
+	  case MULTISET_WEAK_VALUES:
+	    GC_RECURSE (msd, m->flags & GC_MSD_GOT_NODE_REFS,
+			GC_REC_I_WEAK_NONE, GC_REC_IV_WEAK_VAL,
+			gc_mark, ind_types, val_types);
+	    gc_assert_checked_as_weak (msd);
+	    break;
+	  default:
+	    GC_RECURSE (msd, m->flags & GC_MSD_GOT_NODE_REFS,
+			GC_REC_I_WEAK_IND, GC_REC_IV_WEAK_BOTH,
+			gc_mark, ind_types, val_types);
+	    gc_assert_checked_as_weak (msd);
+	    break;
+	}
+
+	if (msd->refs == 1 && DO_SHRINK (msd, 0)) {
+	  /* Only shrink the multiset if it isn't shared, or else we
+	   * can end up with larger memory consumption since the
+	   * shrunk data blocks won't be shared. */
+	  l->msd = resize_multiset_data (msd, ALLOC_SIZE (msd->size), 0);
+	  msd = l->msd;
+	}
+      }
+
+      msd->ind_types = ind_types;
+      if (msd->flags & MULTISET_INDVAL) msd->val_types = val_types;
+    }
+  }
+}
+
+void gc_mark_all_multisets (void)
+{
+  gc_mark_multiset_pos = gc_internal_multiset;
+  while (gc_mark_multiset_pos) {
+    struct multiset *l = gc_mark_multiset_pos;
+    gc_mark_multiset_pos = l->next;
+    if (gc_is_referenced (l)) gc_mark_multiset_as_referenced (l);
+  }
+}
+
+void real_gc_cycle_check_multiset (struct multiset *l, int weak)
+{
+  GC_CYCLE_ENTER (l, weak) {
+    struct multiset_data *msd = l->msd;
+
+    if (msd->root && ((msd->ind_types | msd->val_types) & BIT_COMPLEX)) {
+      struct marker *m = get_marker (msd);
+      TYPE_FIELD ind_types = 0, val_types = 0;
+
+      if (m->refs < msd->refs) {
+	/* Must leave the multiset data untouched if there are direct
+	 * external refs to it. */
+	GC_RECURSE_MSD_IN_USE (msd, gc_cycle_check_svalues, ind_types, val_types);
+	gc_assert_checked_as_nonweak (msd);
+      }
+
+      else {
+	switch (msd->flags & MULTISET_WEAK) {
+	  case 0:
+	    GC_RECURSE (msd, m->flags & GC_MSD_GOT_NODE_REFS,
+			GC_REC_I_WEAK_NONE, GC_REC_IV_WEAK_NONE,
+			gc_cycle_check, ind_types, val_types);
+	    gc_assert_checked_as_nonweak (msd);
+	    break;
+	  case MULTISET_WEAK_INDICES:
+	    GC_RECURSE (msd, m->flags & GC_MSD_GOT_NODE_REFS,
+			GC_REC_I_WEAK_IND, GC_REC_IV_WEAK_IND,
+			gc_cycle_check, ind_types, val_types);
+	    gc_assert_checked_as_weak (msd);
+	    break;
+	  case MULTISET_WEAK_VALUES:
+	    GC_RECURSE (msd, m->flags & GC_MSD_GOT_NODE_REFS,
+			GC_REC_I_WEAK_NONE, GC_REC_IV_WEAK_VAL,
+			gc_cycle_check, ind_types, val_types);
+	    gc_assert_checked_as_weak (msd);
+	    break;
+	  default:
+	    GC_RECURSE (msd, m->flags & GC_MSD_GOT_NODE_REFS,
+			GC_REC_I_WEAK_IND, GC_REC_IV_WEAK_BOTH,
+			gc_cycle_check, ind_types, val_types);
+	    gc_assert_checked_as_weak (msd);
+	    break;
+	}
+
+	if (msd->refs == 1 && DO_SHRINK (msd, 0)) {
+	  /* Only shrink the multiset if it isn't shared, or else we
+	   * can end up with larger memory consumption since the
+	   * shrunk data blocks won't be shared. */
+	  l->msd = resize_multiset_data (msd, ALLOC_SIZE (msd->size), 0);
+	  msd = l->msd;
+	}
+      }
+
+      msd->ind_types = ind_types;
+      if (msd->flags & MULTISET_INDVAL) msd->val_types = val_types;
+    }
+  } GC_CYCLE_LEAVE;
+}
+
+void gc_cycle_check_all_multisets (void)
+{
+  struct multiset *l;
+  for (l = gc_internal_multiset; l; l = l->next) {
+    real_gc_cycle_check_multiset (l, 0);
+    gc_cycle_run_queue();
+  }
+}
+
+void gc_zap_ext_weak_refs_in_multisets (void)
+{
+  gc_mark_multiset_pos = first_multiset;
+  while (gc_mark_multiset_pos != gc_internal_multiset && gc_ext_weak_refs) {
+    struct multiset *l = gc_mark_multiset_pos;
+    gc_mark_multiset_pos = l->next;
+    gc_mark_multiset_as_referenced (l);
+  }
+  discard_queue (&gc_mark_queue);
+}
+
+void gc_free_all_unreferenced_multisets (void)
+{
+  struct multiset *l, *next;
+
+  for (l = gc_internal_multiset; l; l = next) {
+    if (gc_do_free (l)) {
+      struct multiset_data *msd = l->msd;
+      if (msd->root) {
+	/* Replace the msd with an empty one to avoid recursion during free. */
+	l->msd = msd->flags & MULTISET_INDVAL ? &empty_indval_msd : &empty_ind_msd;
+	add_ref (l->msd);
+	if (!sub_ref (msd)) free_multiset_data (msd);
+      }
+      gc_free_extra_ref (l);
+      SET_NEXT_AND_FREE (l, free_multiset);
+    }
+    else next = l->next;
+  }
+}
+
+void init_multiset()
+{
+#ifdef PIKE_DEBUG
+  union msnode test;
+  HDR (&test)->flags = 0;
+  test.i.ind.type = (1 << 8) - 1;
+  test.i.ind.subtype = (1 << 16) - 1;
+  test.i.ind.u.refs = (INT32 *) (ptrdiff_t) -1;
+  if (HDR (&test)->flags & (MULTISET_FLAG_MASK))
+    fatal ("The ind svalue overlays the flags field in an unexpected way.\n");
+  HDR (&test)->flags |= RB_FLAG_MASK;
+  if (test.i.ind.type & MULTISET_FLAG_MARKER)
+    fatal ("The ind svalue overlays the flags field in an unexpected way.\n");
+  test.i.ind.type |= MULTISET_FLAG_MARKER;
+  if ((test.i.ind.type & ~MULTISET_FLAG_MASK) != (1 << 8) - 1)
+    fatal ("The ind svalue overlays the flags field in an unexpected way.\n");
+#endif
+#ifndef HAVE_UNION_INIT
+  svalue_int_one.u.integer = 1;
+#endif
+  init_multiset_blocks();
+}
+
+/* Pike might exit without calling this. */
+void exit_multiset()
+{
+#ifdef PIKE_DEBUG
+  if (svalue_int_one.type != T_INT ||
+      svalue_int_one.subtype != NUMBER_NUMBER ||
+      svalue_int_one.u.integer != 1)
+    fatal ("svalue_int_one has been changed.\n");
+#endif
+  free_all_multiset_blocks();
+}
+
+#if defined (PIKE_DEBUG) || defined (TEST_MULTISET)
+
+union msnode *debug_check_msnode (struct multiset *l, ptrdiff_t nodepos,
+				  int allow_deleted, char *file, int line)
+{
+  struct multiset_data *msd = l->msd;
+  union msnode *node;
+  if (l->node_refs <= 0)
+    fatal ("%s:%d: Got a node reference to a multiset without any.\n", file, line);
+  if (nodepos < 0 || nodepos >= msd->allocsize)
+    fatal ("%s:%d: Node offset %"PRINTPTRDIFFT"d "
+	   "outside storage for multiset (size %d).\n",
+	   file, line, nodepos, msd->allocsize);
+  node = OFF2MSNODE (msd, nodepos);
+  if (node->i.ind.type == PIKE_T_UNKNOWN)
+    fatal ("%s:%d: Invalid node offset %"PRINTPTRDIFFT"d.\n", file, line, nodepos);
+  if (!allow_deleted && node->i.ind.type == T_DELETED)
+    fatal ("%s:%d: Node at offset %"PRINTPTRDIFFT"d is deleted.\n", file, line, nodepos);
+  return node;
+}
+
+void check_low_msnode (struct multiset_data *msd, union msnode *node,
+		       int allow_free)
+{
+  union msnode *n;
+  if (node < msd->nodes ||
+      node >= (msd->flags & MULTISET_INDVAL ?
+	       IVNODE (NODE_AT (msd, msnode_indval, msd->allocsize)) :
+	       INODE (NODE_AT (msd, msnode_ind, msd->allocsize))))
+    fatal ("Node outside storage for multiset.\n");
+  if ((char *) node - (char *) msd->nodes !=
+      (msd->flags & MULTISET_INDVAL ?
+       (&node->iv - &msd->nodes->iv) * (ptrdiff_t) ((struct msnode_indval *) NULL + 1) :
+       (&node->i - &msd->nodes->i) * (ptrdiff_t) ((struct msnode_ind *) NULL + 1)))
+    fatal ("Unaligned node in storage for multiset.\n");
+  if (!allow_free && node->i.ind.type == PIKE_T_UNKNOWN)
+    fatal ("Node is free.\n");
+}
+
+void check_multiset (struct multiset *l)
+{
+  struct multiset_data *msd = l->msd;
+  int size = 0, indval = msd->flags & MULTISET_INDVAL;
+
+  /* Check refs and multiset link list. */
+
+  if (l->refs <= 0)
+    fatal ("Multiset has incorrect refs %d.\n", l->refs);
+  if (l->node_refs < 0)
+    fatal ("Multiset has incorrect node_refs %d.\n", l->node_refs);
+  if (!msd)
+    fatal ("Multiset has no data block.\n");
+  if (msd->refs <= 0)
+    fatal ("Multiset data block has incorrect refs %d.\n", msd->refs);
+  if (msd->noval_refs < 0)
+    fatal ("Multiset data block has negative noval_refs %d.\n", msd->noval_refs);
+  if (msd->noval_refs > msd->refs)
+    fatal ("Multiset data block has more noval_refs %d than refs %d.\n",
+	   msd->noval_refs, msd->refs);
+
+  if (l->next && l->next->prev != l)
+    fatal("multiset->next->prev != multiset.\n");
+  if (l->prev) {
+    if (l->prev->next != l)
+      fatal ("multiset->prev->next != multiset.\n");
+  }
+  else
+    if (first_multiset != l)
+      fatal ("multiset->prev == 0 but first_multiset != multiset.\n");
+
+  /* Check all node pointers, the tree structure and the type hints. */
+
+  {
+    TYPE_FIELD ind_types = 0, val_types = indval ? 0 : BIT_INT;
+
+    if (msd->root) {
+      int pos;
+      struct svalue ind;
+
+      check_low_msnode (msd, msd->root, 0);
+
+#define WITH_NODES_BLOCK(TYPE, OTHERTYPE, IND, INDVAL)			\
+      struct TYPE *node;						\
+      for (pos = msd->allocsize; pos-- > 0;) {				\
+	node = NODE_AT (msd, TYPE, pos);				\
+									\
+	switch (node->ind.type) {					\
+	  case T_DELETED:						\
+	    if (node->next) check_low_msnode (msd, (union msnode *) node->next, 1); \
+	    if (DELETED_PREV ((union msnode *) node))			\
+	      check_low_msnode (					\
+		msd, (union msnode *) DELETED_PREV ((union msnode *) node), 0); \
+	    if (DELETED_NEXT ((union msnode *) node))			\
+	      check_low_msnode (					\
+		msd, (union msnode *) DELETED_NEXT ((union msnode *) node), 0); \
+	    break;							\
+									\
+	  case PIKE_T_UNKNOWN:						\
+	    if (node->prev) fatal ("Free node got garbage in prev pointer.\n"); \
+	    if (node->next) check_low_msnode (msd, (union msnode *) node->next, 1); \
+	    break;							\
+									\
+	  default:							\
+	    size++;							\
+	    ind_types |= 1 << low_use_multiset_index ((union msnode *) node, ind)->type; \
+	    INDVAL (val_types |= 1 << node->val.type);			\
+	    if (node->prev) check_low_msnode (msd, (union msnode *) node->prev, 0); \
+	    if (node->next) check_low_msnode (msd, (union msnode *) node->next, 0); \
+	}								\
+      }
+
+      DO_WITH_NODES (msd);
+
+#undef WITH_NODES_BLOCK
+
+#ifdef PIKE_DEBUG
+      debug_check_rb_tree (HDR (msd->root),
+			   msd->flags & MULTISET_INDVAL ?
+			   (dump_data_fn *) debug_dump_indval_data :
+			   (dump_data_fn *) debug_dump_ind_data,
+			   msd);
+#endif
+    }
+
+    if (ind_types & ~msd->ind_types)
+      fatal ("Multiset indices type field lacked 0x%x.\n", ind_types & ~msd->ind_types);
+    if (val_types & ~msd->val_types)
+      fatal ("Multiset values type field lacked 0x%x.\n", val_types & ~msd->val_types);
+  }
+
+  /* Check the free list. */
+
+  {
+    int deletedsize = 0, freesize = 0;
+    union msnode *node;
+    for (node = msd->free_list; node; node = NEXT_FREE (node)) {
+      check_low_msnode (msd, node, 1);
+      if (node->i.ind.type == PIKE_T_UNKNOWN) break;
+      if (node->i.ind.type != T_DELETED)
+	fatal ("Multiset node in free list got invalid type %d.\n", node->i.ind.type);
+      deletedsize++;
+    }
+
+    if (node) {
+      freesize++;
+      for (node = NEXT_FREE (node); node; node = NEXT_FREE (node)) {
+	freesize++;
+	check_low_msnode (msd, node, 1);
+	if (node->i.ind.type == T_DELETED)
+	  fatal ("Multiset data got deleted node after free node on free list.\n");
+	if (node->i.ind.type != PIKE_T_UNKNOWN)
+	  fatal ("Multiset node in free list got invalid type %d.\n", node->i.ind.type);
+      }
+    }
+
+    if (msd->size != size + deletedsize)
+      fatal ("Multiset data got size %d but tree is %d nodes and %d are deleted.\n",
+	     msd->size, size, deletedsize);
+
+    if (freesize != msd->allocsize - msd->size)
+      fatal ("Multiset data should have %d free nodes but got %d on free list.\n",
+	     msd->allocsize - msd->size, freesize);
+
+    if (!l->node_refs && deletedsize)
+      fatal ("Multiset data got deleted nodes but no node refs.\n");
+  }
+
+  /* Check the order. This can call pike code, so we need to be extra careful. */
+
+  if (msd->root && !Pike_in_gc) {
+    JMP_BUF recovery;
+    add_msnode_ref (l);
+    if (SETJMP (recovery))
+      call_handle_error();
+
+    else {
+      /* msd duplicated to avoid SETJMP clobber (or at least silence
+       * gcc warnings about it). */
+      struct multiset_data *msd = l->msd;
+      union msnode *node, *next;
+      struct svalue tmp1, tmp2;
+      ptrdiff_t nextpos;
+
+      node = low_multiset_first (msd);
+      low_use_multiset_index (node, tmp1);
+#ifdef PIKE_DEBUG
+      check_svalue (&tmp1);
+      if (indval) check_svalue (&node->iv.val);
+#endif
+
+      if (msd->cmp_less.type == T_INT)
+	for (; (next = low_multiset_next (node)); node = next) {
+	  int cmp_res;
+	  low_use_multiset_index (next, tmp2);
+	  if (!IS_DESTRUCTED (&tmp2)) {
+#ifdef PIKE_DEBUG
+	    check_svalue (&tmp2);
+	    if (indval) check_svalue (&node->iv.val);
+#endif
+
+	    nextpos = MSNODE2OFF (msd, next);
+	    INTERNAL_CMP (low_use_multiset_index (node, tmp1), &tmp2, cmp_res);
+	    if (cmp_res > 0)
+	      fatal ("Order failure in multiset data with internal order.\n");
+
+	    if (l->msd != msd) {
+	      msd = l->msd;
+	      next = OFF2MSNODE (msd, nextpos);
+	      while (next && next->i.ind.type == T_DELETED)
+		next = DELETED_PREV (next);
+	      if (!next) {
+		next = low_multiset_first (msd);
+		if (!next) goto order_check_done;
+	      }
+	    }
+	  }
+	}
+
+      else
+	for (; (next = low_multiset_next (node)); node = next) {
+	  low_push_multiset_index (next);
+	  if (!IS_DESTRUCTED (sp - 1)) {
+#ifdef PIKE_DEBUG
+	    check_svalue (sp - 1);
+	    if (indval) check_svalue (&node->iv.val);
+#endif
+	    low_push_multiset_index (node);
+
+	    nextpos = MSNODE2OFF (msd, next);
+	    EXTERNAL_CMP (&msd->cmp_less);
+	    if (!IS_ZERO (sp - 1))
+	      fatal ("Order failure in multiset data with external order.\n");
+	    pop_stack();
+
+	    if (l->msd != msd) {
+	      msd = l->msd;
+	      next = OFF2MSNODE (msd, nextpos);
+	      while (next && next->i.ind.type == T_DELETED)
+		next = DELETED_PREV (next);
+	      if (!next) {
+		next = low_multiset_first (msd);
+		if (!next) goto order_check_done;
+	      }
+	    }
+	  }
+	}
+
+    order_check_done:
+    }
+
+    UNSETJMP (recovery);
+    sub_msnode_ref (l);
+  }
+}
+
+void check_all_multisets (void)
+{
+  struct multiset *l;
+  for (l = first_multiset; l; l = l->next)
+    check_multiset (l);
+}
+
+static void debug_dump_ind_data (struct msnode_ind *node,
+				 struct multiset_data *msd)
+{
+  struct svalue tmp;
+  print_svalue (stderr, low_use_multiset_index (INODE (node), tmp));
+  fprintf (stderr, " [%"PRINTPTRDIFFT"d]", MSNODE2OFF (msd, INODE (node)));
+}
+
+static void debug_dump_indval_data (struct msnode_indval *node,
+				    struct multiset_data *msd)
+{
+  struct svalue tmp;
+  print_svalue (stderr, low_use_multiset_index (IVNODE (node), tmp));
+  fputs (": ", stderr);
+  print_svalue (stderr, &node->val);
+  fprintf (stderr, " [%"PRINTPTRDIFFT"d]", MSNODE2OFF (msd, IVNODE (node)));
+}
+
+void debug_dump_multiset (struct multiset *l)
+{
+  struct multiset_data *msd = l->msd;
+  union msnode *node;
+  struct svalue tmp;
+
+  fprintf (stderr, "Refs=%d, node_refs=%d, next=%p, prev=%p\nmsd=%p",
+	   l->refs, l->node_refs, l->next, l->prev, msd);
+
+  if ((ptrdiff_t) msd & 3)
+    fputs (" (unaligned)\n", stderr);
+
+  else {
+    fprintf (stderr, ", refs=%d, noval_refs=%d, flags=0x%x, size=%d, allocsize=%d\n",
+	     msd->refs, msd->noval_refs, msd->flags, msd->size, msd->allocsize);
+
+    if (msd == &empty_ind_msd) fputs ("msd is empty_ind_msd\n", stderr);
+    else if (msd == &empty_indval_msd) fputs ("msd is empty_indval_msd\n", stderr);
+
+    fputs ("Indices type field =", stderr);
+    debug_dump_type_field (msd->ind_types);
+    fputs ("\nValues type field =", stderr);
+    debug_dump_type_field (msd->val_types);
+
+    if (msd->cmp_less.type == T_INT)
+      fputs ("\nInternal compare function\n", stderr);
+    else {
+      fputs ("\nCompare function = ", stderr);
+      print_svalue (stderr, &msd->cmp_less);
+      fputc ('\n', stderr);
+    }
+
+#ifdef PIKE_DEBUG
+    debug_dump_rb_tree (HDR (msd->root),
+			msd->flags & MULTISET_INDVAL ?
+			(dump_data_fn *) debug_dump_indval_data :
+			(dump_data_fn *) debug_dump_ind_data,
+			msd);
+#else
+    simple_describe_multiset (l);
+#endif
+
+    if (msd->free_list && msd->free_list->i.ind.type == T_DELETED) {
+      union msnode *node = msd->free_list;
+      fputs ("Deleted nodes:", stderr);
+      do {
+	if (node != msd->free_list) fputc (',', stderr);
+	fprintf (stderr, " %p [%"PRINTPTRDIFFT"d]", node, MSNODE2OFF (msd, node));
+      } while ((node = NEXT_FREE (node)) && node->i.ind.type == T_DELETED);
+    }
+  }
+}
+
+static void debug_multiset_fatal (struct multiset *l, const char *fmt, ...)
+{
+  struct multiset_data *msd = l->msd;
+  va_list args;
+  va_start (args, fmt);
+  (void) VFPRINTF (stderr, fmt, args);
+  fprintf (stderr, "Dumping multiset @ %p: ", l);
+  debug_dump_multiset (l);
+  debug_fatal ("\r");
+}
+
+#ifdef TEST_MULTISET
+
+#define TEST_FIND(fn, exp) do {						\
+    node = PIKE_CONCAT (multiset_, fn) (l, sp - 1);			\
+    if (node < 0)							\
+      multiset_fatal (l, #fn " failed to find %d (%d).\n", exp, i);	\
+    if (access_msnode (l, node)->i.ind.u.integer != exp)		\
+      multiset_fatal (l, #fn " failed to find %d - got %d instead (%d).\n", \
+		      exp, access_msnode (l, node)->i.ind.u.integer, i); \
+    sub_msnode_ref (l);							\
+  } while (0)
+
+#define TEST_NOT_FIND(fn) do {						\
+    node = PIKE_CONCAT (multiset_, fn) (l, sp - 1);			\
+    if (node >= 0)							\
+      multiset_fatal (l, #fn " failed to not find %d - got %d (%d).\n",	\
+		      sp[-1].u.integer,					\
+		      access_msnode (l, node)->i.ind.u.integer, i);	\
+  } while (0)
+
+#define TEST_STEP_FIND(fn, dir, exp) do {				\
+    add_msnode_ref (l);		/* Cheating. */				\
+    node = PIKE_CONCAT (multiset_, dir) (l, node);			\
+    if (node < 0)							\
+      multiset_fatal (l, "Failed to step " #dir " to %d after " #fn	\
+		      " of %d (%d).\n", exp, sp[-1].u.integer, i);	\
+    if (access_msnode (l, node)->i.ind.u.integer != exp)		\
+      multiset_fatal (l, "Failed to step " #dir " to %d after " #fn	\
+		      " of %d - got %d instead (%d).\n",		\
+		      exp, sp[-1].u.integer,				\
+		      access_msnode (l, node)->i.ind.u.integer, i);	\
+    sub_msnode_ref (l);							\
+  } while (0)
+
+#define TEST_STEP_NOT_FIND(fn, dir) do {				\
+    add_msnode_ref (l);		/* Cheating. */				\
+    node = PIKE_CONCAT (multiset_, dir) (l, node);			\
+    if (node >= 0)							\
+      multiset_fatal (l, "Failed to step " #dir " to end after " #fn	\
+		      " of %d - got %d (%d).\n",			\
+		      sp[-1].u.integer,					\
+		      access_msnode (l, node)->i.ind.u.integer, i);	\
+    sub_msnode_ref (l);							\
+  } while (0)
+
+static int naive_test_equal (struct multiset *a, struct multiset *b)
+{
+  union msnode *na, *nb;
+  struct svalue sa, sb;
+  if ((a->msd->flags & MULTISET_INDVAL) != (b->msd->flags & MULTISET_INDVAL)) return 0;
+  na = low_multiset_first (a->msd);
+  nb = low_multiset_first (b->msd);
+  while (na && nb) {
+    low_use_multiset_index (na, sa);
+    low_use_multiset_index (nb, sb);
+    if (sa.type != sb.type || sa.u.integer != sb.u.integer ||
+	(a->msd->flags & MULTISET_INDVAL && (
+	  na->iv.val.type != nb->iv.val.type ||
+	  na->iv.val.u.integer != nb->iv.val.u.integer))) return 0;
+    na = low_multiset_next (na);
+    nb = low_multiset_next (nb);
+  }
+  return !(na || nb);
+}
+
+static void debug_merge_fatal (struct multiset *a, struct multiset *b,
+			       struct multiset *exp, struct multiset *got,
+			       const char *fmt, ...)
+{
+  va_list args;
+  va_start (args, fmt);
+  (void) VFPRINTF (stderr, fmt, args);
+  fputs ("Dumping a: ", stderr);
+  debug_dump_multiset (a);
+  fputs ("Dumping b: ", stderr);
+  debug_dump_multiset (b);
+  fputs ("Dumping expected: ", stderr);
+  debug_dump_multiset (exp);
+  fputs ("Dumping got: ", stderr);
+  debug_dump_multiset (got);
+  debug_fatal ("\r");
+}
+
+#include "builtin_functions.h"
+#include "constants.h"
+#include "mapping.h"
+
+void test_multiset (void)
+{
+  int pass, i, j, v, vv, old_d_flag = d_flag;
+  struct svalue *less_efun, *greater_efun, tmp, *orig_sp = sp;
+  struct array *arr;
+  struct multiset *l, *l2;
+  ptrdiff_t node;
+  d_flag = 3;
+
+  push_svalue (simple_mapping_string_lookup (get_builtin_constants(), "`<"));
+  less_efun = sp - 1;
+  push_svalue (simple_mapping_string_lookup (get_builtin_constants(), "`>"));
+  greater_efun = sp - 1;
+
+  for (pass = 0; pass < 2; pass++) {
+    push_int (1);
+    push_int (1);
+    push_int (2);
+    push_int (4);
+    push_int (5);
+    push_int (5);
+    push_int (7);
+    push_int (8);
+    push_int (11);
+    push_int (14);
+    push_int (15);
+    push_int (15);
+    f_aggregate (12);
+
+    for (i = 1*2*3*4*5*6*7*8*9; i > 0; i--) {
+      if (!(i % 1000)) fprintf (stderr, "ind %s %d         \r",
+				pass ? "cmp_less" : "internal", i);
+
+      l = allocate_multiset (0, 0, pass ? less_efun : NULL);
+      stack_dup();
+      push_int (i);
+      f_permute (2);
+      arr = sp[-1].u.array;
+
+      for (j = 0; j < 12; j++) {
+	multiset_insert_2 (l, &arr->item[j], NULL, 1);
+	check_multiset (l);
+      }
+      if (multiset_sizeof (l) != 9)
+	multiset_fatal (l, "Size is wrong: %d (%d)\n", multiset_sizeof (l), i);
+
+      push_int (5);
+      TEST_FIND (find_eq, 5);
+      TEST_FIND (find_lt, 4);
+      TEST_FIND (find_gt, 7);
+      TEST_FIND (find_le, 5);
+      TEST_FIND (find_ge, 5);
+      pop_stack();
+
+      push_int (6);
+      TEST_NOT_FIND (find_eq);
+      TEST_FIND (find_lt, 5);
+      TEST_FIND (find_gt, 7);
+      TEST_FIND (find_le, 5);
+      TEST_FIND (find_ge, 7);
+      pop_stack();
+
+      push_int (0);
+      TEST_NOT_FIND (find_eq);
+      TEST_NOT_FIND (find_lt);
+      TEST_FIND (find_gt, 1);
+      TEST_NOT_FIND (find_le);
+      TEST_FIND (find_ge, 1);
+      pop_stack();
+
+      push_int (1);
+      TEST_FIND (find_eq, 1);
+      TEST_NOT_FIND (find_lt);
+      TEST_FIND (find_gt, 2);
+      TEST_FIND (find_le, 1);
+      TEST_FIND (find_ge, 1);
+      pop_stack();
+
+      push_int (15);
+      TEST_FIND (find_eq, 15);
+      TEST_FIND (find_lt, 14);
+      TEST_NOT_FIND (find_gt);
+      TEST_FIND (find_le, 15);
+      TEST_FIND (find_ge, 15);
+      pop_stack();
+
+      push_int (17);
+      TEST_NOT_FIND (find_eq);
+      TEST_FIND (find_lt, 15);
+      TEST_NOT_FIND (find_gt);
+      TEST_FIND (find_le, 15);
+      TEST_NOT_FIND (find_ge);
+      pop_stack();
+
+      l2 = l;
+#if 0
+      l2 = copy_multiset (l);
+      check_multiset (l2);
+#endif
+      for (j = 0, v = 0; j < 12; j++) {
+	v += !!multiset_delete_2 (l2, &arr->item[j], NULL);
+	if (multiset_find_eq (l2, &arr->item[j]) >= 0)
+	  multiset_fatal (l2, "Entry %d not deleted (%d).\n",
+			  arr->item[j].u.integer, i);
+	check_multiset (l2);
+      }
+      if (v != 9 || l2->msd->root)
+	multiset_fatal (l2, "Wrong number of entries deleted: %d (%d)\n", v, i);
+
+#if 0
+      free_multiset (l2);
+#endif
+      free_multiset (l);
+      pop_stack();
+    }
+    pop_stack();
+  }
+
+  for (pass = 0; pass < 2; pass++) {
+    push_int (1);
+    push_int (1);
+    push_int (4);
+    push_int (5);
+    push_int (5);
+    push_int (7);
+    push_int (15);
+    push_int (15);
+    f_aggregate (8);
+
+    for (i = 1*2*3*4*5*6*7*8; i > 0; i--) {
+      if (!(i % 1000)) fprintf (stderr, "indval %s %d         \r",
+				pass ? "cmp_less" : "internal", i);
+
+      stack_dup();
+      push_int (i);
+      f_permute (2);
+      arr = sp[-1].u.array;
+
+      {
+	ptrdiff_t nodes[8];
+	l = allocate_multiset (0, MULTISET_INDVAL, pass ? less_efun : NULL);
+	push_int (17);
+
+	for (j = 0; j < 8; j++) {
+	  int node_ref = (node = multiset_last (l)) >= 0;
+	  for (; node >= 0; node = multiset_prev (l, node)) {
+	    if (get_multiset_value (l, node)->u.integer <=
+		arr->item[j].u.integer) break;
+	  }
+	  nodes[j] = multiset_add_after (l, node, sp - 1, &arr->item[j]);
+	  if (node_ref) sub_msnode_ref (l);
+	  if (nodes[j] < 0) {
+	    if (node < 0)
+	      multiset_fatal (l, "Failed to add %d:%d first: %d\n",
+			      sp[-1].u.integer, arr->item[j].u.integer, nodes[j]);
+	    else
+	      multiset_fatal (l, "Failed to add %d:%d after %d:%d: %d\n",
+			      sp[-1].u.integer, arr->item[j].u.integer,
+			      use_multiset_index (l, node, tmp)->u.integer,
+			      get_multiset_value (l, node)->u.integer);
+	  }
+	  add_msnode_ref (l);
+	  check_multiset (l);
+	}
+	if (j != 8) multiset_fatal (l, "Size is wrong: %d (%d)\n", j, i);
+
+	add_msnode_ref (l);
+	for (j = 0; j < 8; j++) {
+	  multiset_delete_node (l, nodes[j]);
+	  check_multiset (l);
+	}
+	sub_msnode_ref (l);
+	if (multiset_sizeof (l))
+	  multiset_fatal (l, "Whole tree not deleted (%d)\n", i);
+	free_multiset (l);
+	pop_stack();
+      }
+
+      l = allocate_multiset (0, MULTISET_INDVAL, pass ? less_efun : NULL);
+
+      for (j = 0; j < 8; j++) {
+	push_int (arr->item[j].u.integer * 8 + j);
+	multiset_add (l, &arr->item[j], sp - 1);
+	check_multiset (l);
+	pop_stack();
+      }
+
+      for (j = 0, v = 0, node = multiset_first (l);
+	   node >= 0; node = multiset_next (l, node), j++) {
+	push_multiset_index (l, node);
+	if (v >= get_multiset_value (l, node)->u.integer)
+	  multiset_fatal (l, "Failed to sort values (%d).\n", i);
+	v = get_multiset_value (l, node)->u.integer;
+	pop_stack();
+      }
+      if (j != 8 || multiset_sizeof (l) != j)
+	multiset_fatal (l, "Size is wrong: %d (%d)\n", j, i);
+      sub_msnode_ref (l);
+
+      push_int (5);
+      TEST_FIND (find_eq, 5);
+      TEST_STEP_FIND (find_eq, next, 7);
+      TEST_FIND (find_lt, 4);
+      TEST_FIND (find_gt, 7);
+      TEST_FIND (find_le, 5);
+      TEST_STEP_FIND (find_le, next, 7);
+      TEST_FIND (find_ge, 5);
+      TEST_STEP_FIND (find_ge, prev, 4);
+      pop_stack();
+
+      push_int (6);
+      TEST_NOT_FIND (find_eq);
+      TEST_FIND (find_lt, 5);
+      TEST_FIND (find_gt, 7);
+      TEST_FIND (find_le, 5);
+      TEST_STEP_FIND (find_le, next, 7);
+      TEST_FIND (find_ge, 7);
+      TEST_STEP_FIND (find_ge, prev, 5);
+      pop_stack();
+
+      push_int (0);
+      TEST_NOT_FIND (find_eq);
+      TEST_NOT_FIND (find_lt);
+      TEST_FIND (find_gt, 1);
+      TEST_STEP_NOT_FIND (find_gt, prev);
+      TEST_NOT_FIND (find_le);
+      TEST_FIND (find_ge, 1);
+      TEST_STEP_FIND (find_ge, next, 1);
+      pop_stack();
+
+      push_int (1);
+      TEST_FIND (find_eq, 1);
+      TEST_STEP_FIND (find_eq, next, 4);
+      TEST_NOT_FIND (find_lt);
+      TEST_FIND (find_gt, 4);
+      TEST_FIND (find_le, 1);
+      TEST_STEP_FIND (find_le, next, 4);
+      TEST_FIND (find_ge, 1);
+      TEST_STEP_NOT_FIND (find_ge, prev);
+      pop_stack();
+
+      push_int (15);
+      TEST_FIND (find_eq, 15);
+      TEST_STEP_NOT_FIND (find_eq, next);
+      TEST_FIND (find_lt, 7);
+      TEST_NOT_FIND (find_gt);
+      TEST_FIND (find_le, 15);
+      TEST_STEP_NOT_FIND (find_le, next);
+      TEST_FIND (find_ge, 15);
+      TEST_STEP_FIND (find_ge, prev, 7);
+      pop_stack();
+
+      push_int (17);
+      TEST_NOT_FIND (find_eq);
+      TEST_FIND (find_lt, 15);
+      TEST_STEP_NOT_FIND (find_lt, next);
+      TEST_NOT_FIND (find_gt);
+      TEST_FIND (find_le, 15);
+      TEST_STEP_FIND (find_le, prev, 15);
+      TEST_NOT_FIND (find_ge);
+      pop_stack();
+
+      l2 = copy_multiset (l);
+      check_multiset (l2);
+      if (!naive_test_equal (l, l2))
+	multiset_fatal (l2, "Copy not equal to original (%d).\n", i);
+
+      push_int (-1);
+      for (j = 0; j < 8; j++) {
+	multiset_insert_2 (l2, &arr->item[j], sp - 1, 0);
+	if (multiset_sizeof (l2) != multiset_sizeof (l))
+	  multiset_fatal (l2, "Duplicate entry %d inserted (%d).\n",
+			  arr->item[j].u.integer, i);
+	if (get_multiset_value (
+	      l2, multiset_find_eq (l2, &arr->item[j]))->u.integer == -1)
+	  multiset_fatal (l2, "Insert replaced last entry %d (%d).\n",
+			  arr->item[j].u.integer, i);
+	sub_msnode_ref (l2);
+      }
+      for (j = 0; j < 8; j++) {
+	multiset_insert_2 (l2, &arr->item[j], sp - 1, 1);
+	if (multiset_sizeof (l2) != multiset_sizeof (l))
+	  multiset_fatal (l2, "Duplicate entry %d inserted (%d).\n",
+			  arr->item[j].u.integer, i);
+	if (get_multiset_value (
+	      l2, multiset_find_eq (l2, &arr->item[j]))->u.integer != -1)
+	  multiset_fatal (l2, "Insert didn't replace last entry %d (%d).\n",
+			  arr->item[j].u.integer, i);
+	sub_msnode_ref (l2);
+      }
+      pop_stack();
+
+      for (v = 0; multiset_sizeof (l2); v++) {
+	add_msnode_ref (l2);
+	multiset_delete_node (l2, MSNODE2OFF (l2->msd, l2->msd->root));
+	check_multiset (l2);
+      }
+      if (v != 8)
+	multiset_fatal (l2, "Wrong number of entries deleted: %d (%d)\n", v, i);
+      free_multiset (l2);
+
+      for (j = 0, v = 0; j < 8; j++) {
+	if (!multiset_delete_2 (l, &arr->item[j], &tmp))
+	  multiset_fatal (l, "Entry %d not deleted (%d).\n",
+			  arr->item[j].u.integer, i);
+	if ((node = multiset_find_eq (l, &arr->item[j])) >= 0) {
+	  if (get_multiset_value (l, node)->u.integer >= tmp.u.integer)
+	    multiset_fatal (l, "Last entry %d not deleted (%d).\n",
+			    arr->item[j].u.integer, i);
+	  sub_msnode_ref (l);
+	}
+	free_svalue (&tmp);
+	check_multiset (l);
+      }
+
+      free_multiset (l);
+      pop_stack();
+    }
+    pop_stack();
+  }
+
+  for (pass = 0; pass < 2; pass++) {
+    int max = 1000000;
+    l = allocate_multiset (0, 0, pass ? less_efun : NULL);
+    srand (0);
+#ifdef RB_STATS
+    reset_rb_stats();
+#endif
+    for (i = max, v = 0; i > 0; i--) {
+      if (!(i % 10000)) fprintf (stderr, "grow %s %d, %d duplicates         \r",
+				 pass ? "cmp_less" : "internal", i, v);
+      push_int (rand());
+      if (multiset_find_eq (l, sp - 1) >= 0) {
+	v++;
+	sub_msnode_ref (l);
+      }
+      multiset_add (l, sp - 1, NULL);
+      pop_stack();
+    }
+#ifdef RB_STATS
+    fputc ('\n', stderr), print_rb_stats (1);
+#endif
+    check_multiset (l);
+    srand (0);
+    for (i = max; i > 0; i--) {
+      if (!(i % 10000)) fprintf (stderr, "shrink %s %d                   \r",
+				 pass ? "cmp_less" : "internal", i);
+      push_int (rand());
+      if (!multiset_delete (l, sp - 1))
+	fatal ("Pseudo-random sequence didn't repeat.\n");
+      pop_stack();
+    }
+#ifdef RB_STATS
+    fputc ('\n', stderr), print_rb_stats (1);
+#endif
+    if (multiset_sizeof (l))
+      multiset_fatal (l, "Multiset not empty.\n");
+    free_multiset (l);
+  }
+
+  if (1) {
+    int max = 400;
+    struct array *arr = allocate_array_no_init (0, max);
+    struct svalue *sval;
+    srand (0);
+    for (i = j = 0; i < max; i++) {
+      if (!(i % 10)) fprintf (stderr, "maketree %d         \r",
+			      max * 10 - arr->size);
+
+      l = mkmultiset_2 (arr, i & 2 ? arr : NULL, i & 1 ? less_efun : NULL);
+      check_multiset (l);
+      multiset_set_cmp_less (l, i & 4 ? less_efun : NULL);
+      check_multiset (l);
+      multiset_set_flags (l, i & 8 ? MULTISET_INDVAL : 0);
+      check_multiset (l);
+      multiset_set_cmp_less (l, greater_efun);
+      check_multiset (l);
+
+      if ((node = multiset_first (l)) >= 0) {
+	int pos = 0, try_get = rand() % arr->size;
+	for (; node >= 0; node = multiset_next (l, node), pos++)
+	  if (pos == try_get) {
+	    if ((v = use_multiset_index (
+		   l, multiset_get_nth (l, try_get), tmp)->u.integer) !=
+		arr->size - try_get - 1)
+	      multiset_fatal (l, "Element @ %d is %d, but %d was expected (%d).\n",
+			      try_get, v, arr->size - try_get - 1, i);
+	    sub_msnode_ref (l);
+	    if ((v = get_multiset_value (l, node)->u.integer) !=
+		(vv = ((i & (8|2)) == (8|2) ? arr->size - try_get - 1 : 1)))
+	      multiset_fatal (l, "Element @ %d got value %d, but %d was expected (%d).\n",
+			      try_get, v, vv, i);
+	    break;
+	  }
+	sub_msnode_ref (l);
+      }
+
+      free_multiset (l);
+      arr = resize_array (arr, j + 10);
+      for (; j < arr->size; j++)
+	ITEM (arr)[j].u.integer = j;
+    }
+    free_array (arr);
+  }
+
+  for (pass = 0; pass < 1; pass++) {
+    struct multiset *a =
+      allocate_multiset (0, MULTISET_INDVAL, pass ? less_efun : NULL);
+    struct multiset *b =
+      allocate_multiset (0, MULTISET_INDVAL, pass ? less_efun : NULL);
+    struct multiset *and =
+      allocate_multiset (0, MULTISET_INDVAL, pass ? less_efun : NULL);
+    struct multiset *or =
+      allocate_multiset (0, MULTISET_INDVAL, pass ? less_efun : NULL);
+    struct multiset *add =
+      allocate_multiset (0, MULTISET_INDVAL, pass ? less_efun : NULL);
+    struct multiset *sub =
+      allocate_multiset (0, MULTISET_INDVAL, pass ? less_efun : NULL);
+    struct multiset *xor =
+      allocate_multiset (0, MULTISET_INDVAL, pass ? less_efun : NULL);
+    int action = 0;
+
+    srand (0);
+    for (i = 5000; i >= 0; i--, action = action % 6 + 1) {
+      int nr = rand();		/* Assumes we keep within one period. */
+
+      if (!(i % 100)) fprintf (stderr, "merge %d         \r", i);
+
+      switch (action) {
+	case 1:			/* Unique index added to a only. */
+	  push_int (nr);
+	  push_int (1);
+	  multiset_add (a, sp - 2, sp - 1);
+	  multiset_add (add, sp - 2, sp - 1);
+	  multiset_add (sub, sp - 2, sp - 1);
+	  multiset_add (xor, sp - 2, sp - 1);
+	  goto add_unique;
+
+	case 2:			/* Unique index added to b only. */
+	  push_int (nr);
+	  push_int (2);
+	  multiset_add (b, sp - 2, sp - 1);
+	  multiset_add (add, sp - 2, sp - 1);
+	  multiset_add (xor, sp - 2, sp - 1);
+	  goto add_unique;
+
+	case 3:			/* Unique index added to a and b. */
+	  push_int (nr);
+	  push_int (1);
+	  multiset_add (a, sp - 2, sp - 1);
+	  multiset_add (add, sp - 2, sp - 1);
+	  pop_stack();
+	  push_int (2);
+	  multiset_add (b, sp - 2, sp - 1);
+	  multiset_add (and, sp - 2, sp - 1);
+	  multiset_add (add, sp - 2, sp - 1);
+
+      add_unique:
+	  if (multiset_lookup (or, sp - 2))
+	    multiset_fatal (or, "Duplicate index %d not expected here.\n", nr);
+	  multiset_insert_2 (or, sp - 2, sp - 1, 0);
+	  pop_stack();
+	  pop_stack();
+	  break;
+
+	case 4:			/* Duplicate index added to a only. */
+	  nr = low_use_multiset_index (
+	    low_multiset_get_nth (
+	      sub->msd, nr % multiset_sizeof (sub)), tmp)->u.integer;
+	  push_int (nr);
+	  push_int (1);
+	  multiset_add (a, sp - 2, sp - 1);
+	  multiset_add (or, sp - 2, sp - 1);
+	  multiset_add (add, sp - 2, sp - 1);
+	  multiset_add (sub, sp - 2, sp - 1);
+	  multiset_add (xor, sp - 2, sp - 1);
+	  pop_stack();
+	  pop_stack();
+	  break;
+
+	case 5:			/* Duplicate index added to b only. */
+	  nr = low_use_multiset_index (
+	    low_multiset_get_nth (
+	      b->msd, nr % multiset_sizeof (b)), tmp)->u.integer;
+	  push_int (nr);
+	  push_int (2);
+	  multiset_add (b, sp - 2, sp - 1);
+	  multiset_add (or, sp - 2, sp - 1);
+	  multiset_add (add, sp - 2, sp - 1);
+	  multiset_add (xor, sp - 2, sp - 1);
+	  pop_stack();
+	  pop_stack();
+	  break;
+
+	case 6:			/* Duplicate index added to a and b. */
+	  nr = low_use_multiset_index (
+	    low_multiset_get_nth (
+	      b->msd, nr % multiset_sizeof (b)), tmp)->u.integer;
+	  push_int (nr);
+	  push_int (1);
+	  multiset_add (a, sp - 2, sp - 1);
+	  node = multiset_find_lt (add, sp - 2);
+	  if ((nr = multiset_add_after (add, node, sp - 2, sp - 1)) < 0)
+	    multiset_fatal (add, "Failed to add %d:1 after %d: %d.\n",
+			    sp[-2].u.integer, node, nr);
+	  if (node >= 0) sub_msnode_ref (add);
+	  pop_stack();
+	  push_int (2);
+	  multiset_add (b, sp - 2, sp - 1);
+	  multiset_add (and, sp - 2, sp - 1);
+	  multiset_add (or, sp - 2, sp - 1);
+	  multiset_add (add, sp - 2, sp - 1);
+	  pop_stack();
+	  pop_stack();
+	  break;
+      }
+
+      if (i % 10) continue;
+
+      l = merge_multisets (a, b, PIKE_ARRAY_OP_AND);
+      if (!naive_test_equal (and, l))
+	debug_merge_fatal (a, b, and, l, "Invalid 'and' merge (%d).\n", i);
+      free_multiset (l);
+      l = copy_multiset (a);
+      merge_multisets (l, b, PIKE_ARRAY_OP_AND | PIKE_MERGE_DESTR_A);
+      if (!naive_test_equal (and, l))
+	debug_merge_fatal (a, b, and, l, "Invalid destructive 'and' merge (%d).\n", i);
+      free_multiset (l);
+
+      l = merge_multisets (a, b, PIKE_ARRAY_OP_OR);
+      if (!naive_test_equal (or, l))
+	debug_merge_fatal (a, b, or, l, "Invalid 'or' merge (%d).\n", i);
+      free_multiset (l);
+      l = copy_multiset (a);
+      merge_multisets (l, b, PIKE_ARRAY_OP_OR | PIKE_MERGE_DESTR_A);
+      if (!naive_test_equal (or, l))
+	debug_merge_fatal (a, b, or, l, "Invalid destructive 'or' merge (%d).\n", i);
+      free_multiset (l);
+
+      l = merge_multisets (a, b, PIKE_ARRAY_OP_ADD);
+      if (!naive_test_equal (add, l))
+	debug_merge_fatal (a, b, add, l, "Invalid 'add' merge (%d).\n", i);
+      free_multiset (l);
+      l = copy_multiset (a);
+      merge_multisets (l, b, PIKE_ARRAY_OP_ADD | PIKE_MERGE_DESTR_A);
+      if (!naive_test_equal (add, l))
+	debug_merge_fatal (a, b, add, l, "Invalid destructive 'add' merge (%d).\n", i);
+      free_multiset (l);
+
+      l = merge_multisets (a, b, PIKE_ARRAY_OP_SUB);
+      if (!naive_test_equal (sub, l))
+	debug_merge_fatal (a, b, sub, l, "Invalid 'sub' merge (%d).\n", i);
+      free_multiset (l);
+      l = copy_multiset (a);
+      merge_multisets (l, b, PIKE_ARRAY_OP_SUB | PIKE_MERGE_DESTR_A);
+      if (!naive_test_equal (sub, l))
+	debug_merge_fatal (a, b, sub, l, "Invalid destructive 'sub' merge (%d).\n", i);
+      free_multiset (l);
+
+      l = merge_multisets (a, b, PIKE_ARRAY_OP_XOR);
+      if (!naive_test_equal (xor, l))
+	debug_merge_fatal (a, b, xor, l, "Invalid 'xor' merge (%d).\n", i);
+      free_multiset (l);
+      l = copy_multiset (a);
+      merge_multisets (l, b, PIKE_ARRAY_OP_XOR | PIKE_MERGE_DESTR_A);
+      if (!naive_test_equal (xor, l))
+	debug_merge_fatal (a, b, xor, l, "Invalid destructive 'xor' merge (%d).\n", i);
+      free_multiset (l);
+
+      check_multiset (a);
+    }
+
+    free_multiset (a);
+    free_multiset (b);
+    free_multiset (and);
+    free_multiset (or);
+    free_multiset (add);
+    free_multiset (sub);
+    free_multiset (xor);
+  }
+
+  pop_n_elems (2);
+  if (orig_sp != sp)
+    fatal ("Stack wrong: %"PRINTPTRDIFFT"d extra elements.\n", sp - orig_sp);
+  fprintf (stderr, "                            \r");
+  d_flag = old_d_flag;
+}
+
+#endif /* TEST_MULTISET */
+
+#endif /* PIKE_DEBUG || TEST_MULTISET */
+
+#else  /* PIKE_NEW_MULTISETS */
+
 /*\
 ||| This file a part of Pike, and is copyright by Fredrik Hubinette
 ||| Pike is distributed as GPL (General Public License)
 ||| See the files COPYING and DISCLAIMER for more information.
 \*/
 /**/
+
 #include "global.h"
 #include "array.h"
 #include "multiset.h"
@@ -17,7 +5164,7 @@
 #include "gc.h"
 #include "security.h"
 
-RCSID("$Id: multiset.c,v 1.39 2001/09/25 05:55:11 hubbe Exp $");
+RCSID("$Id: multiset.c,v 1.40 2001/12/10 02:08:16 mast Exp $");
 
 struct multiset *first_multiset;
 
@@ -444,4 +5591,4 @@ int multiset_is_constant(struct multiset *m,
   return array_is_constant(m->ind, p);
 }
 
-
+#endif	/* PIKE_NEW_MULTISETS */
diff --git a/src/multiset.h b/src/multiset.h
index 48bdca8841cec242d9803f5fc530491f212018e9..143b46e9412be1f5b017d94e0601222159b4c279 100644
--- a/src/multiset.h
+++ b/src/multiset.h
@@ -1,3 +1,425 @@
+#ifndef MULTISET_H
+#define MULTISET_H
+
+#ifdef PIKE_NEW_MULTISETS
+
+/* Multisets using rbtree.
+ *
+ * Created by Martin Stjernholm 2001-05-07
+ *
+ * $Id: multiset.h,v 1.20 2001/12/10 02:08:17 mast Exp $
+ */
+
+/* #define TEST_MULTISET */
+
+#include "rbtree.h"
+#include "block_alloc_h.h"
+
+/* Note: Don't access the ind svalue (or at least not its type field)
+ * in the following directly, since the rbtree flags overlay that. Use
+ * assign_multiset_index_no_free() or similar instead. */
+
+struct msnode_ind
+{
+  struct msnode_ind *prev, *next; /* Must be first. */
+  struct svalue ind;		/* Must be second. */
+};
+
+struct msnode_indval
+{
+  struct msnode_indval *prev, *next; /* Must be first. */
+  struct svalue ind;		/* Must be second. */
+  struct svalue val;
+};
+
+union msnode
+{
+  struct msnode_ind i;
+  struct msnode_indval iv;
+};
+
+#define MULTISET_FLAG_MARKER	0x1000
+#define MULTISET_FLAG_MASK	(RB_FLAG_MASK | MULTISET_FLAG_MARKER)
+/* The flag marker is used to reliably nail direct uses of the ind
+ * type field. */
+
+struct multiset_data
+{
+  INT32 refs, noval_refs;
+#ifdef PIKE_RUN_UNLOCKED
+#error multiset_data has not been adapted for running unlocked.
+#endif
+  union msnode *root, *free_list;
+  struct svalue cmp_less;
+  INT32 size, allocsize;	/* size includes any T_DELETED nodes (see below). */
+  TYPE_FIELD ind_types;
+  TYPE_FIELD val_types;		/* Always BIT_INT in valueless multisets. */
+  INT16 flags;
+  union msnode nodes[1];
+};
+
+struct multiset
+{
+  PIKE_MEMORY_OBJECT_MEMBERS;
+  struct multiset_data *msd;
+  struct multiset *next, *prev;
+  INT32 node_refs;
+};
+
+/* Data structure notes:
+ *
+ * o  The node free list through multiset_data.free_list is singly
+ *    linked by the next pointers. Nodes on the free list must have
+ *    ind.type set to either PIKE_T_UNKNOWN or T_DELETED (see below).
+ *
+ *    Note that the free nodes might have T_DELETED set even when
+ *    there are no node refs left to the multiset_data block and
+ *    there's only one reference to it.
+ *
+ * o  multiset_data.cmp_less.type is T_INT when the internal set order
+ *    is used.
+ *
+ * o  multset_data.refs counts the number of "independent" references
+ *    to the data block. When it's greater than one, the data block
+ *    must always be copied if anything but a node value is changed.
+ *    Changes of node values might be allowed depending on
+ *    multset_data.noval_refs.
+ *
+ *    A direct pointer to or into the data block can only be kept if
+ *    it's accounted for by this ref counter, otherwise an indirection
+ *    via a multiset struct must be used.
+ *
+ *    When the gc runs, it won't touch a multiset_data block that got
+ *    direct external references, i.e. if multiset_data.refs is
+ *    greater than the number of multiset objects that points to
+ *    it.
+ *
+ * o  multiset_data.noval_refs is the number of the multiset_data.refs
+ *    references that don't lock the value part of the data block.
+ *    Thus, values may be changed without copy if (refs - noval_refs)
+ *    is 1 (if youself have a value lock) or 0 (if you don't). The
+ *    functions below that change values only assume that the caller
+ *    has a value lock.
+ *
+ *    Note that you should not do any operation that might cause a
+ *    copy of the data block (which includes insert and delete) when
+ *    you've increased noval_refs, since it then won't be possible for
+ *    the copying function to know whether one noval_ref should be
+ *    moved to the copy or not. All copy operations let the noval_refs
+ *    stay with the original.
+ *
+ * o  multiset.node_refs counts the number of references to nodes in
+ *    the multiset. The references are array offsets in
+ *    multiset.msd->nodes, so the data block may be reallocated but no
+ *    nodes may be moved relatively within the block. Values but not
+ *    indices may be changed, nodes may be added and removed, and the
+ *    order may be changed.
+ *
+ *    Nodes that are removed during nonzero node_refs are linked in
+ *    first on the free list as usual, but ind.type is set to
+ *    T_DELETED. They are thereby flagged to not be used again. When
+ *    node_refs reaches zero and there's only one reference to the
+ *    multiset_data block, the type is changed to PIKE_T_UNKNOWN for
+ *    all these nodes.
+ *
+ *    The prev and ind.u.ptr pointers in T_DELETED nodes point to the
+ *    previous and next neighbor, respectively, of the node at the
+ *    time it was deleted. Thus the relative position of the node is
+ *    remembered even after it has been deleted.
+ */
+
+/* The following are compatible with PIKE_WEAK_INDICES and PIKE_WEAK_VALUES. */
+#define MULTISET_WEAK_INDICES	2
+#define MULTISET_WEAK_VALUES	4
+#define MULTISET_WEAK		6
+
+#define MULTISET_INDVAL		8
+
+extern struct multiset *first_multiset;
+extern struct multiset *gc_internal_multiset;
+
+PMOD_EXPORT extern struct svalue svalue_int_one;
+
+PMOD_EXPORT void multiset_clear_node_refs (struct multiset *l);
+
+#ifdef PIKE_DEBUG
+/* To get good type checking. */
+static inline union msnode *msnode_check (union msnode *x)
+  {return x;}
+#else
+#define msnode_check(X) ((union msnode *) (X))
+#endif
+
+#define MULTISET_STEP_FUNC(FUNC, NODE)					\
+  ((union msnode *) FUNC ((struct rb_node_hdr *) msnode_check (NODE)))
+#define low_multiset_first(MSD) MULTISET_STEP_FUNC (rb_first, (MSD)->root)
+#define low_multiset_last(MSD) MULTISET_STEP_FUNC (rb_last, (MSD)->root)
+#define low_multiset_prev(NODE) MULTISET_STEP_FUNC (rb_prev, NODE)
+#define low_multiset_next(NODE) MULTISET_STEP_FUNC (rb_next, NODE)
+#define low_multiset_get_nth(MSD, N)					\
+  ((union msnode *) rb_get_nth ((struct rb_node_hdr *) (MSD)->root, (N)))
+union msnode *low_multiset_find_eq (struct multiset *l, struct svalue *key);
+
+#define low_assign_multiset_index_no_free(TO, NODE) do {		\
+    struct svalue *_ms_index_to_ = (TO);				\
+    *_ms_index_to_ = msnode_check (NODE)->i.ind;			\
+    _ms_index_to_->type &= ~MULTISET_FLAG_MASK;				\
+    add_ref_svalue (_ms_index_to_);					\
+  } while (0)
+#define low_assign_multiset_index(TO, NODE) do {			\
+    struct svalue *_ms_index_to2_ = (TO);				\
+    free_svalue (_ms_index_to2_);					\
+    low_assign_multiset_index_no_free (_ms_index_to2_, (NODE));		\
+  } while (0)
+#define low_push_multiset_index(NODE)					\
+  low_assign_multiset_index_no_free (Pike_sp++, (NODE))
+#define low_use_multiset_index(NODE, VAR)				\
+  ((VAR) = msnode_check (NODE)->i.ind,					\
+   (VAR).type &= ~MULTISET_FLAG_MASK,					\
+   &(VAR))
+
+#define low_get_multiset_value(MSD, NODE)				\
+  ((MSD)->flags & MULTISET_INDVAL ? &(NODE)->iv.val : &svalue_int_one)
+#define low_set_multiset_value(MSD, NODE, VAL) do {			\
+    if ((MSD)->flags & MULTISET_INDVAL)					\
+      assign_svalue (&(NODE)->iv.val, VAL);				\
+  } while (0)
+
+#define OFF2MSNODE(MSD, OFFSET)						\
+  ((MSD)->flags & MULTISET_INDVAL ?					\
+   (union msnode *) (&(MSD)->nodes->iv + (OFFSET)) :			\
+   (union msnode *) (&(MSD)->nodes->i + (OFFSET)))
+#define MSNODE2OFF(MSD, NODE)						\
+  ((MSD)->flags & MULTISET_INDVAL ?					\
+   &(NODE)->iv - &(MSD)->nodes->iv : &(NODE)->i - &(MSD)->nodes->i)
+
+PMOD_EXPORT INT32 multiset_sizeof (struct multiset *l);
+#define l_sizeof(L) multiset_sizeof (L)
+#define multiset_ind_types(L) ((L)->msd->ind_types)
+#define multiset_val_types(L) ((L)->msd->val_types)
+#define multiset_get_flags(L) ((L)->msd->flags)
+#define multiset_get_cmp_less(L) (&(L)->msd->cmp_less)
+#define multiset_indval(L) ((L)->msd->flags & MULTISET_INDVAL)
+
+/* This is somewhat faster than using multiset_sizeof just to
+ * check whether or not the multiset has no elements at all. */
+#define multiset_is_empty(L) (!(L)->msd->root)
+
+PMOD_PROTO void really_free_multiset (struct multiset *l);
+
+#define free_multiset(L) do {						\
+    struct multiset *_ms_ = (L);					\
+    debug_malloc_touch (_ms_);						\
+    if (!sub_ref (_ms_)) really_free_multiset (_ms_);			\
+  } while (0)
+
+#ifdef PIKE_DEBUG
+
+void check_low_msnode (struct multiset_data *msd, union msnode *node, int allow_free);
+union msnode *debug_check_msnode (
+  struct multiset *l, ptrdiff_t nodepos, int allow_deleted,
+  char *file, int line);
+#define check_msnode(L, NODEPOS, ALLOW_DELETED)				\
+  debug_check_msnode ((L), (NODEPOS), (ALLOW_DELETED), __FILE__, __LINE__)
+#define access_msnode(L, NODEPOS)					\
+  check_msnode ((L), (NODEPOS), 0)
+
+#else
+
+#define check_msnode(L, NODEPOS, ALLOW_DELETED)
+#define access_msnode(L, NODEPOS) OFF2MSNODE ((L)->msd, (NODEPOS))
+
+#endif
+
+BLOCK_ALLOC(multiset, 511)
+
+/* See rbtree.h for a description of the operations.
+ *
+ * If cmp_less is used, it's a function pointer used as `< to compare
+ * the entries, otherwise the internal set order is used. `< need not
+ * define a total order for the possible indices; if neither a < b nor
+ * b < a is true then a and b are considered equal orderwise. The
+ * order between such indices is arbitrary and stable. The orderwise
+ * equality doesn't affect searches on equality, however; if several
+ * orderwise equal values are found, then they are searched linearly
+ * backwards until one is found which is equal to the key according to
+ * `==.
+ *
+ * It's possible to keep references to individual nodes. They consist
+ * of the node offset within the multiset data block, which together
+ * with the multiset struct can access the node. Use add_msnode_ref
+ * when you store a node reference and sub_msnode_ref when you throw
+ * it away. The multiset_find_*, multiset_first, multiset_last and
+ * multiset_get_nth functions do add_msnode_ref for you (if they
+ * return a match). Other functions, like multiset_insert_2, doesn't,
+ * even though they might return a node offset.
+ *
+ * msnode_is_deleted tells whether the referenced node has been
+ * deleted. The relative position of a deleted node is remembered by
+ * keeping pointers to the neighbors it had when it was deleted. A
+ * "defensive" strategy is used when a deleted node is used in a
+ * function: If going forward then the previous neighbor is followed
+ * until a nondeleted node is found, which is then used as the start
+ * node for the forward movement. Vice versa in the backward
+ * direction. This has the effect that if nodes are added and removed
+ * in a multiset that is being traversed in some direction, then no
+ * newly added nodes in the vicinity of the current one are missed. It
+ * also has the effect that the node returned by multiset_next for a
+ * deleted node might be before the one returned by multiset_prev.
+ *
+ * Since the functions might run pike code when comparing entries
+ * (even when cmp_less isn't used), the multiset may change during the
+ * search in it. If that happens for a destructive operation, it's
+ * remade in one way or the other to ensure that the change has been
+ * made in the multiset that is current upon return. This normally has
+ * no caller visible effects, except for multiset_add_after, which
+ * might fail to add the requested entry (it returns less than zero in
+ * that case).
+ */
+
+/* Returns the node offset, or -1 if no match was found. */
+PMOD_EXPORT ptrdiff_t multiset_find_eq (struct multiset *l, struct svalue *key);
+PMOD_EXPORT ptrdiff_t multiset_find_lt (struct multiset *l, struct svalue *key);
+PMOD_EXPORT ptrdiff_t multiset_find_gt (struct multiset *l, struct svalue *key);
+PMOD_EXPORT ptrdiff_t multiset_find_le (struct multiset *l, struct svalue *key);
+PMOD_EXPORT ptrdiff_t multiset_find_ge (struct multiset *l, struct svalue *key);
+PMOD_EXPORT ptrdiff_t multiset_first (struct multiset *l);
+PMOD_EXPORT ptrdiff_t multiset_last (struct multiset *l);
+PMOD_EXPORT ptrdiff_t multiset_prev (struct multiset *l, ptrdiff_t nodepos);
+PMOD_EXPORT ptrdiff_t multiset_next (struct multiset *l, ptrdiff_t nodepos);
+
+PMOD_EXPORT extern const char msg_multiset_no_node_refs[];
+
+#define add_msnode_ref(L) do {(L)->node_refs++;} while (0)
+#define sub_msnode_ref(L) do {						\
+    struct multiset *_ms_ = (L);					\
+    DO_IF_DEBUG (							\
+      if (!_ms_->node_refs) fatal (msg_multiset_no_node_refs);		\
+    );									\
+    if (!--_ms_->node_refs && _ms_->msd->refs == 1)			\
+      multiset_clear_node_refs (_ms_);					\
+  } while (0)
+
+PMOD_EXPORT void do_sub_msnode_ref (struct multiset *l);
+PMOD_EXPORT int msnode_is_deleted (struct multiset *l, ptrdiff_t nodepos);
+
+#define assign_multiset_index_no_free(TO, L, NODEPOS) do {		\
+    struct multiset *_ms_ = (L);					\
+    union msnode *_ms_node_ = access_msnode (_ms_, (NODEPOS));		\
+    low_assign_multiset_index_no_free (TO, _ms_node_);			\
+  } while (0)
+#define assign_multiset_index(TO, L, NODEPOS) do {			\
+    struct multiset *_ms_ = (L);					\
+    union msnode *_ms_node_ = access_msnode (_ms_, (NODEPOS));		\
+    low_assign_multiset_index (TO, _ms_node_);				\
+  } while (0)
+#define push_multiset_index(L, NODEPOS)					\
+  assign_multiset_index_no_free (Pike_sp++, (L), (NODEPOS))
+#define use_multiset_index(L, NODEPOS, VAR)				\
+  ((VAR) = access_msnode ((L), (NODEPOS))->i.ind,			\
+   (VAR).type &= ~MULTISET_FLAG_MASK,					\
+   &(VAR))
+
+#define get_multiset_value(L, NODEPOS)					\
+  ((L)->msd->flags & MULTISET_INDVAL ?					\
+   &access_msnode ((L), (NODEPOS))->iv.val : &svalue_int_one)
+#define set_multiset_value(L, NODEPOS, VAL) do {			\
+    if ((L)->msd->flags & MULTISET_INDVAL)				\
+      assign_svalue (&access_msnode ((L), (NODEPOS))->iv.val, VAL);	\
+  } while (0)
+/* Note: It's intentional that the value is silently ignored for
+ * index-only multisets. */
+
+#define assign_multiset_value_no_free(TO, L, NODEPOS)			\
+  assign_svalue_no_free (TO, get_multiset_value (L, NODEPOS))
+#define assign_multiset_value(TO, L, NODEPOS)				\
+  assign_svalue (TO, get_multiset_value (L, NODEPOS))
+#define push_multiset_value(L, NODEPOS)					\
+  push_svalue (get_multiset_value (L, NODEPOS))
+
+PMOD_EXPORT struct multiset *allocate_multiset (int allocsize,
+						int flags,
+						struct svalue *cmp_less);
+PMOD_EXPORT void do_free_multiset (struct multiset *l);
+void multiset_fix_type_field (struct multiset *l);
+PMOD_EXPORT void multiset_set_flags (struct multiset *l, int flags);
+PMOD_EXPORT void multiset_set_cmp_less (struct multiset *l,
+					struct svalue *cmp_less);
+PMOD_EXPORT struct multiset *mkmultiset (struct array *indices);
+PMOD_EXPORT struct multiset *mkmultiset_2 (struct array *indices,
+					   struct array *values,
+					   struct svalue *cmp_less);
+PMOD_EXPORT void multiset_insert (struct multiset *l,
+				  struct svalue *ind);
+PMOD_EXPORT ptrdiff_t multiset_insert_2 (struct multiset *l,
+					 struct svalue *ind,
+					 struct svalue *val,
+					 int replace);
+PMOD_EXPORT ptrdiff_t multiset_add (struct multiset *l,
+				    struct svalue *ind,
+				    struct svalue *val);
+PMOD_EXPORT ptrdiff_t multiset_add_after (struct multiset *l,
+					  ptrdiff_t node,
+					  struct svalue *ind,
+					  struct svalue *val);
+PMOD_EXPORT int multiset_delete (struct multiset *l,
+				 struct svalue *ind);
+PMOD_EXPORT int multiset_delete_2 (struct multiset *l,
+				   struct svalue *ind,
+				   struct svalue *removed_val);
+PMOD_EXPORT void multiset_delete_node (struct multiset *l,
+				       ptrdiff_t node);
+PMOD_EXPORT int multiset_member (struct multiset *l,
+				 struct svalue *key);
+PMOD_EXPORT struct svalue *multiset_lookup (struct multiset *l,
+					    struct svalue *key);
+struct array *multiset_indices (struct multiset *l);
+struct array *multiset_values (struct multiset *l);
+struct array *multiset_range_indices (struct multiset *l,
+				      ptrdiff_t beg, ptrdiff_t end);
+struct array *multiset_range_values (struct multiset *l,
+				     ptrdiff_t beg, ptrdiff_t end);
+PMOD_EXPORT void check_multiset_for_destruct (struct multiset *l);
+PMOD_EXPORT struct multiset *copy_multiset (struct multiset *l);
+PMOD_EXPORT struct multiset *merge_multisets (struct multiset *a,
+					      struct multiset *b,
+					      int operation);
+PMOD_EXPORT struct multiset *add_multisets (struct svalue *argp, int count);
+PMOD_EXPORT int multiset_equal_p (struct multiset *a, struct multiset *b,
+				  struct processing *p);
+void describe_multiset (struct multiset *l, struct processing *p, int indent);
+void simple_describe_multiset (struct multiset *l);
+int multiset_is_constant (struct multiset *l, struct processing *p);
+node *make_node_from_multiset (struct multiset *l);
+PMOD_EXPORT void f_aggregate_multiset (int args);
+struct multiset *copy_multiset_recursively (struct multiset *l,
+					    struct processing *p);
+PMOD_EXPORT ptrdiff_t multiset_get_nth (struct multiset *l, size_t n);
+
+unsigned gc_touch_all_multisets (void);
+void gc_check_all_multisets (void);
+void gc_mark_multiset_as_referenced (struct multiset *l);
+void gc_mark_all_multisets (void);
+void gc_zap_ext_weak_refs_in_multisets (void);
+void real_gc_cycle_check_multiset (struct multiset *l, int weak);
+void gc_cycle_check_all_multisets (void);
+void gc_free_all_unreferenced_multisets (void);
+#define gc_cycle_check_multiset(X, WEAK) \
+  gc_cycle_enqueue ((gc_cycle_check_cb *) real_gc_cycle_check_multiset, (X), (WEAK))
+
+#ifdef PIKE_DEBUG
+void check_multiset (struct multiset *l);
+void check_all_multisets (void);
+void debug_dump_multiset (struct multiset *l);
+#endif
+
+void count_memory_in_multisets (INT32 *num, INT32 *size);
+void init_multiset (void);
+void exit_multiset (void);
+void test_multiset (void);
+
+#else  /* PIKE_NEW_MULTISETS */
+
 /*\
 ||| This file a part of Pike, and is copyright by Fredrik Hubinette
 ||| Pike is distributed as GPL (General Public License)
@@ -5,10 +427,8 @@
 \*/
 
 /*
- * $Id: multiset.h,v 1.19 2001/09/25 05:55:12 hubbe Exp $
+ * $Id: multiset.h,v 1.20 2001/12/10 02:08:17 mast Exp $
  */
-#ifndef MULTISET_H
-#define MULTISET_H
 
 #include "las.h"
 
@@ -26,6 +446,11 @@ extern struct multiset *gc_internal_multiset;
 #define free_multiset(L) do{ struct multiset *l_=(L); debug_malloc_touch(l_); if(!sub_ref(l_)) really_free_multiset(l_); }while(0)
 
 #define l_sizeof(L) ((L)->ind->size)
+#define multiset_sizeof(L) l_sizeof (L)
+#define multiset_is_empty(L) (!multiset_sizeof (L))
+#define multiset_ind_types(L) ((L)->ind->type_field)
+#define multiset_get_flags(L) ((L)->ind->flags)
+#define multiset_fix_type_field(L) array_fix_type_field ((L)->ind)
 
 /* Prototypes begin here */
 PMOD_EXPORT int multiset_member(struct multiset *l, struct svalue *ind);
@@ -65,4 +490,6 @@ int multiset_is_constant(struct multiset *m,
 #define gc_cycle_check_multiset(X, WEAK) \
   gc_cycle_enqueue((gc_cycle_check_cb *) real_gc_cycle_check_multiset, (X), (WEAK))
 
+#endif /* PIKE_NEW_MULTISETS */
+
 #endif /* MULTISET_H */
diff --git a/src/opcodes.c b/src/opcodes.c
index b7ab72864d5a51b1c13b8855cb48219e8896bff7..fc9bec817eab1100e5a1720b74c40cb834b87b30 100644
--- a/src/opcodes.c
+++ b/src/opcodes.c
@@ -27,7 +27,7 @@
 #include "bignum.h"
 #include "operators.h"
 
-RCSID("$Id: opcodes.c,v 1.116 2001/09/24 14:58:05 grubba Exp $");
+RCSID("$Id: opcodes.c,v 1.117 2001/12/10 02:08:15 mast Exp $");
 
 void index_no_free(struct svalue *to,struct svalue *what,struct svalue *ind)
 {
@@ -660,18 +660,46 @@ void o_cast(struct pike_type *type, INT32 run_time_type)
       if(run_time_itype != T_MIXED)
       {
 	struct multiset *m;
+#ifdef PIKE_NEW_MULTISETS
+	struct multiset *tmp=sp[-2].u.multiset;
+#else
 	struct array *tmp=sp[-2].u.multiset->ind;
+#endif
 	DECLARE_CYCLIC();
 	
 	if((m=(struct multiset *)BEGIN_CYCLIC(tmp,0)))
 	{
 	  ref_push_multiset(m);
 	}else{
-	  INT32 e;
-	  struct array *a;
 #ifdef PIKE_DEBUG
 	  struct svalue *save_sp=sp+1;
 #endif
+
+#ifdef PIKE_NEW_MULTISETS
+	  ptrdiff_t nodepos;
+	  if (multiset_indval (tmp))
+	    Pike_error ("FIXME: Casting not implemented for multisets with values.\n");
+	  push_multiset (m = allocate_multiset (multiset_sizeof (tmp),
+						multiset_get_flags (tmp),
+						multiset_get_cmp_less (tmp)));
+
+	  SET_CYCLIC_RET(m);
+
+	  if ((nodepos = multiset_first (tmp)) >= 0) {
+	    ONERROR uwp;
+	    SET_ONERROR (uwp, do_sub_msnode_ref, tmp);
+	    do {
+	      push_multiset_index (tmp, nodepos);
+	      o_cast(itype, run_time_itype);
+	      multiset_insert_2 (m, sp - 1, NULL, 0);
+	      pop_stack();
+	    } while ((nodepos = multiset_next (tmp, nodepos)) >= 0);
+	    UNSET_ONERROR (uwp);
+	  }
+
+#else  /* PIKE_NEW_MULTISETS */
+	  INT32 e;
+	  struct array *a;
 	  push_multiset(m=allocate_multiset(a=allocate_array(tmp->size)));
 	  
 	  SET_CYCLIC_RET(m);
@@ -683,11 +711,13 @@ void o_cast(struct pike_type *type, INT32 run_time_type)
 	    array_set_index(a,e,sp-1);
 	    pop_stack();
 	  }
+	  order_multiset(m);
+#endif
+
 #ifdef PIKE_DEBUG
 	  if(save_sp!=sp)
 	    fatal("o_cast left stack droppings.\n");
 #endif
-	  order_multiset(m);
 	}
 	END_CYCLIC();
 	assign_svalue(sp-3,sp-1);
diff --git a/src/operators.c b/src/operators.c
index 54d39e8bad593a58f37bfca127abf1a758ee99cc..48c25dec83469d0c718b17ac9ec04e72f66fa426 100644
--- a/src/operators.c
+++ b/src/operators.c
@@ -6,7 +6,7 @@
 /**/
 #include "global.h"
 #include <math.h>
-RCSID("$Id: operators.c,v 1.142 2001/10/28 18:02:27 nilsson Exp $");
+RCSID("$Id: operators.c,v 1.143 2001/12/10 02:08:16 mast Exp $");
 #include "interpret.h"
 #include "svalue.h"
 #include "multiset.h"
@@ -934,9 +934,25 @@ PMOD_EXPORT void o_subtract(void)
 	  {
 	     struct mapping *m;
 
+#ifdef PIKE_NEW_MULTISETS
+	     int got_cmp_less = !!multiset_get_cmp_less (sp[-1].u.multiset);
+	     struct array *ind = multiset_indices (sp[-1].u.multiset);
+	     pop_stack();
+	     push_array (ind);
+	     if (got_cmp_less)
+	       m=merge_mapping_array_unordered(sp[-2].u.mapping,
+					       sp[-1].u.array,
+					       PIKE_ARRAY_OP_SUB);
+	     else
+	       m=merge_mapping_array_ordered(sp[-2].u.mapping,
+					     sp[-1].u.array,
+					     PIKE_ARRAY_OP_SUB);
+#else
 	     m=merge_mapping_array_ordered(sp[-2].u.mapping,
 					   sp[-1].u.multiset->ind,
 					   PIKE_ARRAY_OP_SUB);
+#endif
+
 	     pop_n_elems(2);
 	     push_mapping(m);
 	     return;
@@ -1180,9 +1196,25 @@ PMOD_EXPORT void o_and(void)
 	   {
 	      struct mapping *m;
 
+#ifdef PIKE_NEW_MULTISETS
+	     int got_cmp_less = !!multiset_get_cmp_less (sp[-1].u.multiset);
+	     struct array *ind = multiset_indices (sp[-1].u.multiset);
+	     pop_stack();
+	     push_array (ind);
+	     if (got_cmp_less)
+	       m=merge_mapping_array_unordered(sp[-2].u.mapping,
+					       sp[-1].u.array,
+					       PIKE_ARRAY_OP_AND);
+	     else
+	       m=merge_mapping_array_ordered(sp[-2].u.mapping,
+					     sp[-1].u.array,
+					     PIKE_ARRAY_OP_AND);
+#else
 	      m=merge_mapping_array_ordered(sp[-2].u.mapping,
 					    sp[-1].u.multiset->ind,
 					    PIKE_ARRAY_OP_AND);
+#endif
+
 	      pop_n_elems(2);
 	      push_mapping(m);
 	      return;
diff --git a/src/pike_types.c b/src/pike_types.c
index 5f36be8b253cb6d03c94552d65096712ec2482ba..381bff87846fe53116a6071a8e65b35e56a780ed 100644
--- a/src/pike_types.c
+++ b/src/pike_types.c
@@ -5,7 +5,7 @@
 \*/
 /**/
 #include "global.h"
-RCSID("$Id: pike_types.c,v 1.182 2001/10/05 01:30:13 hubbe Exp $");
+RCSID("$Id: pike_types.c,v 1.183 2001/12/10 02:08:16 mast Exp $");
 #include <ctype.h>
 #include "svalue.h"
 #include "pike_types.h"
@@ -3945,17 +3945,10 @@ struct pike_type *get_type_of_svalue(struct svalue *s)
     }
     return ret;
        
-  case T_MULTISET:
   case T_ARRAY:
     {
       struct pike_type *arg_type;
-      struct array *a;
-
-      if (s->type == T_MULTISET) {
-	a = s->u.multiset->ind;
-      } else {
-	a = s->u.array;
-      }
+      struct array *a = s->u.array;
 #if 0
       int i;
 
@@ -3981,6 +3974,16 @@ struct pike_type *get_type_of_svalue(struct svalue *s)
       return pop_unfinished_type();
     }
 
+  case T_MULTISET:
+    type_stack_mark();
+    if (multiset_sizeof(s->u.multiset)) {
+      push_type(T_MIXED);
+    }
+    else {
+      push_type(T_ZERO);
+    }
+    push_type(T_MULTISET);
+    return pop_unfinished_type();
 
   case T_MAPPING:
     type_stack_mark();