Skip to content
Snippets Groups Projects
Select Git revision
  • 7dcba661d62d9248ed7b892024a2442b0520016f
  • master default
  • rsa-crt-hardening
  • chacha96
  • fat-library
  • versioned-symbols
  • curve25519
  • dsa-reorg
  • aead-api
  • set_key-changes
  • poly1305
  • aes-reorg
  • nettle-2.7-fixes
  • size_t-changes
  • ecc-support
  • experimental-20050201
  • lsh-1.4.2
  • nettle_3.1.1_release_20150424
  • nettle_3.1_release_20150407
  • nettle_3.1rc3
  • nettle_3.1rc2
  • nettle_3.1rc1
  • nettle_3.0_release_20140607
  • nettle_2.7.1_release_20130528
  • nettle_2.7_release_20130424
  • nettle_2.6_release_20130116
  • nettle_2.5_release_20120707
  • converted-master-branch-to-git
  • nettle_2.4_release_20110903
  • nettle_2.3_release_20110902
  • nettle_2.2_release_20110711
  • nettle_2.1_release_20100725
  • camellia_32bit_20100720
  • nettle_2.0_release_20090608
  • nettle_1.15_release_20061128
  • after_experimental_merge_20060516
  • head_before_experimental_merge_20060516
37 results

aes-encrypt-internal.c

Blame
  • Forked from Nettle / nettle
    Source project has a limited visibility.
    backend.cmod 155.76 KiB
    /* -*- c -*-
    || This file is part of Pike. For copyright information see COPYRIGHT.
    || Pike is distributed under GPL, LGPL and MPL. See the file COPYING
    || for more information.
    */
    
    /*
     * Backend object.
     */
    
    #include "global.h"
    #include "fdlib.h"
    #include "backend.h"
    #include "time_stuff.h"
    #include <errno.h>
    #ifdef HAVE_SYS_TYPES_H
    #include <sys/types.h>
    #endif
    #ifdef HAVE_SYS_PARAM_H
    #include <sys/param.h>
    #endif
    #include <string.h>
    #include "interpret.h"
    #include "object.h"
    #include "pike_error.h"
    #include "fd_control.h"
    #include "main.h"
    #include "callback.h"
    #include "threads.h"
    #include "array.h"
    #include <math.h>
    #include "interpret.h"
    #include "stuff.h"
    #include "bignum.h"
    #include "builtin_functions.h"
    #include "mapping.h"
    #include "svalue.h"
    #include "gc.h"
    #include "module_support.h"
    #include "block_allocator.h"
    
    /*
     * Things to do
     *
     * o what happens to callbacks on destruct?
     *
     *   They will just cease to generate any events. If the callback
     *   container object uses the old callback interface and has added an
     *   extra ref to itself to account for the callback connection it
     *   will become garbage. The new interface fixes this. /mast
     *
     * o automatic callback assignment based on current thread
     *
     *   Sounds very odd to me. /mast
     */
    
    /* For select */
    #ifdef HAVE_SYS_SELECT_H
    #include <sys/select.h>
    #else
    /* BeOS socket (select etc) stuff */
    #ifdef HAVE_NET_SOCKET_H
    #include <net/socket.h>
    #endif
    #endif
    #include <sys/stat.h>
    
    /* For poll and /dev/poll and epoll */
    #ifdef HAVE_POLL_H
    #include <poll.h>
    #endif /* HAVE_POLL_H */
    #ifdef HAVE_SYS_POLL_H
    #include <sys/poll.h>
    #endif /* HAVE_SYS_POLL_H */
    
    /* For /dev/poll */
    #ifdef HAVE_SYS_DEVPOLL_H
    #include <sys/devpoll.h>
    #endif /* HAVE_SYS_DEVPOLL_H */
    
    /* For epoll */
    #ifdef HAVE_SYS_EPOLL_H
    #include <sys/epoll.h>
    #endif /* HAVE_SYS_EPOLL_H */
    
    /* For kqueue. */
    #ifdef HAVE_SYS_EVENT_H
    #include <sys/event.h>
    #endif /* HAVE_SYS_EVENT_H */
    
    /* for kqueue + CFRunLoop */
    #ifdef HAVE_CORESERVICES_CORESERVICES_H
    #include <CoreServices/CoreServices.h>
    #endif /* HAVE_CORESERVICES_CORESERVICES_H */
    
    /* The following are used on Linux'es that have an old libc. */
    #ifdef HAVE_SYSCALL_H
    #include <syscall.h>
    #elif defined(HAVE_SYS_SYSCALL_H)
    #include <sys/syscall.h>
    #endif /* HAVE_SYSCALL_H || HAVE_SYS_SYSCALL_H */
    
    #if defined(BACKEND_USES_POLL_DEVICE) || defined(BACKEND_USES_KQUEUE)
    struct program * PollDeviceBackend_program;
    #endif /* BACKEND_USES_POLL_DEVICE || BACKEND_USES_KQUEUE */
    
    /*
     * Debugging and tracing.
     */
    
    /* #define POLL_DEBUG */
    /* #define CALL_OUT_DEBUG */
    
    #ifdef PIKE_EXTRA_DEBUG
    /* #define POLL_DEBUG */
    /* #define CALL_OUT_DEBUG */
    #endif
    
    #ifdef POLL_DEBUG
    #define IF_PD(x)	x
    #define PDUNUSED(x) x
    #else /* !POLL_DEBUG */
    #define IF_PD(x)
    #define PDUNUSED(x) UNUSED(x)
    #endif /* POLL_DEBUG */
    
    #ifdef CALL_OUT_DEBUG
    #define IF_CO(X) X
    #else
    #define IF_CO(X)
    #endif
    
    #ifdef PIKE_THREADS
    #define THR_NO (int) PTR_TO_INT (THREAD_T_TO_PTR (th_self()))
    #else
    #define THR_NO getpid()
    #endif
    
    /* Declarations for the legacy backend interface stuff. */
    
    static struct compat_cb_box * alloc_compat_cb_box();
    static void really_free_compat_cb_box(struct compat_cb_box * b);
    static int compat_box_dispatcher (struct fd_callback_box *box, int event);
    
    #ifdef BACKEND_USES_CFRUNLOOP
    /* used by CoreFoundation backend to get around lack of PDB structures early in the CMOD. */
    static void check_set_timer(struct timeval when);
    #endif
    /* CALL OUT STUFF */
    
    #ifdef PIKE_DEBUG
    #define MESS_UP_BLOCK(X) do {\
     (X)->next_arr=(struct Backend_CallOut_struct *)(ptrdiff_t)-1; \
     (X)->next_fun=(struct Backend_CallOut_struct *)(ptrdiff_t)-1; \
     (X)->prev_arr=(struct Backend_CallOut_struct **)(ptrdiff_t)-1; \
     (X)->prev_fun=(struct Backend_CallOut_struct **)(ptrdiff_t)-1; \
     (X)->pos=-1; \
     } while(0)
    #else
    #define MESS_UP_BLOCK(X)
    #endif
    
    #undef EXIT_BLOCK
    #define EXIT_BLOCK(X) do { \
      *(X->prev_arr)=X->next_arr; \
      if(X->next_arr) X->next_arr->prev_arr=X->prev_arr; \
      *(X->prev_fun)=X->next_fun; \
      if(X->next_fun) X->next_fun->prev_fun=X->prev_fun; \
      MESS_UP_BLOCK(X); \
      } while(0)
    
    struct hash_ent
    {
      struct Backend_CallOut_struct *arr;
      struct Backend_CallOut_struct *fun;
    };
    
    
    
    DECLARATIONS
    
    
    struct callback_list do_debug_callbacks;
    struct timeval current_time;
    int current_time_invalid = 1;
    
    /*
     * Stuff to map fds to the proper Backend
     */
    static struct Backend_struct **fd_map=0;
    static int fd_map_size=0;
    static struct object *default_backend_obj = NULL;
    PMOD_EXPORT struct Backend_struct *default_backend = NULL;
    #ifdef DO_PIKE_CLEANUP
    static int num_active_backends = 0;
    #endif
    
    #if defined(BACKEND_USES_CFRUNLOOP)
    static void noteEvents(CFFileDescriptorRef fdref, CFOptionFlags callBackTypes, void *info);
    void cfObserverCallback(CFRunLoopObserverRef observer,
        CFRunLoopActivity activity, void* info);
    void cfTimerCallback(CFRunLoopTimerRef timer, void * info);
    
    #endif /* BACKEND_USES_CFRUNLOOP */
    
    static int backend_do_call_outs(struct Backend_struct *me);
    #ifdef PIKE_DEBUG
    static void backend_verify_call_outs(struct Backend_struct *me);
    #endif
    #ifdef DO_PIKE_CLEANUP
    static void backend_cleanup();
    #endif
    
    struct Backend_struct *get_backend_for_fd(int fd)
    {
      if(fd<0 || fd>=fd_map_size) return 0;
      return fd_map[fd];
    }
    
    static void low_set_backend_for_fd(int fd, struct Backend_struct *b)
    {
    #ifdef PIKE_DEBUG
      if(fd<0) Pike_fatal("set_backend_for(%d)\n",fd);
    #endif
      if (!b) {
        /* Unregister the fd. */
        if (fd < fd_map_size) {
          fd_map[fd] = NULL;
        }
        return;
      }
      if(fd >= fd_map_size)
      {
        int old=fd_map_size;
        if(!fd_map_size) fd_map_size=64;
        while(fd >= fd_map_size) fd_map_size*=2;
        if (fd_map) {
          fd_map = (struct Backend_struct **)
    	realloc(fd_map, sizeof(struct Backend_struct *) * fd_map_size);
        } else {
          fd_map = (struct Backend_struct **)
    	malloc(sizeof(struct Backend_struct *) * fd_map_size);
        }
        if(!fd_map)
          Pike_fatal("Out of memory in backend:low_set_backend_for_fd.\n"
    		 "Tried to allocate %"PRINTSIZET"d bytes.\n",
    		 sizeof(struct Backend_struct *) * fd_map_size);
        
        MEMSET(fd_map+old,0,sizeof(struct Backend_struct *) * (fd_map_size-old));
      }
      fd_map[fd]=b;
    }
    
    struct Backend_struct *really_get_backend_for_fd(int fd)
    {
      struct Backend_struct *b;
      if((b=get_backend_for_fd(fd)))
        return b;
    
    #ifdef PIKE_DEBUG
      if(!default_backend)
        Pike_fatal("No backend!\n");
    #endif
      low_set_backend_for_fd(fd, default_backend);
      return default_backend;
    }
    
    /*! @module Pike
     */
    
    /*! @class __Backend
     *!   Base class for the various backend implementations.
     *!
     *!   Implements callback registration functions and defines the
     *!   main backend APIs.
     */
    PIKECLASS Backend
    {
      /* Provide a unique count to be able to tell backends apart with _sprintf. */
      static int unused_id = 0;
      CVAR int id;
    
      CVAR struct timeval next_timeout;
    
      /*
       * Backend callbacks
       */
      CVAR struct callback_list backend_callbacks;
    
      /*! @decl function(Backend:void) before_callback
       *! @decl function(Backend:void) after_callback
       *!
       *! If set, these are called just before and after the backend waits
       *! for an event.
       *!
       *! If an error is thrown from these callbacks then it is reported
       *! using @expr{master()->handle_error()@} - it doesn't interrupt
       *! the operation of the backend.
       */
      /* before_callback is not strictly necessary since one can just as
       * well run it before `(), but it's convenient to have a standard
       * place to hook in a function. */
      PIKEVAR function(Backend:void) before_callback;
      PIKEVAR function(Backend:void) after_callback;
    
      /* Thread currently executing in the backend. */
    #ifdef PIKE_THREADS
      CVAR struct thread_state *exec_thread;
    #else
      CVAR int exec_thread;		/* 1 if inside the backend. */
    #endif
    
      /*
       * PIPE for waking up
       */
      CVAR int wakeup_pipe_send_fd;
      CVAR struct fd_callback_box wakeup_cb_box;
      CVAR int may_need_wakeup;
    
      /*
       * FD callback data
       */
    
      /* An array indexed on fd with arbitrary upper and lower bounds. A
       * lower bound is used to cut down the size for backends that
       * handle a single or only a few fd's. */
      CVAR struct fd_callback_box **fd_boxes;
      CVAR int fd_boxes_start, fd_boxes_size;
    
      /* Callback boxes which don't correspond to any open file. */
      CVAR struct fd_callback_box **inactive_boxes, **free_inactive_box;
      CVAR int inactive_boxes_size;
    
      /*
       * Hooks to inheriting classes.
       */
    #ifdef PIKE_DEBUG
      typedef void debug_handler_fn (struct Backend_struct *me, void *data);
      CVAR debug_handler_fn *debug_handler;
    #endif /* PIKE_DEBUG */
      typedef void update_fd_set_handler_fn (struct Backend_struct *me, void *data,
    					 int fd,
    					 int old_events, int new_events, int flags);
      CVAR update_fd_set_handler_fn *update_fd_set_handler;
      CVAR void *handler_data;
    
      /*
       * CALL OUT variables
       */
      CVAR int num_pending_calls;		    /* no of busy pointers in heap */
      CVAR struct Backend_CallOut_struct **call_heap;   /* pointer to heap */
      CVAR int call_heap_size;		    /* no of pointers in heap */
      
      CVAR unsigned int hash_size;
      CVAR unsigned int hash_order;
      CVAR struct hash_ent *call_hash;
    
      /* Should really exist only in PIKE_DEBUG, but 
       * #ifdefs on the last cvar confuses precompile.pike.
       *	/grubba 2001-03-12
       * Should be fixed now -Hubbe
       */
    #ifdef PIKE_DEBUG
      CVAR int inside_call_out;
    #endif
    
      /* The object we're in. This ref isn't refcounted. */
      CVAR struct object *backend_obj;
    
    #ifdef _REENTRANT
      /* Currently only used for poll devices. */
      CVAR int set_busy;
      CVAR COND_T set_change;
    #endif
    
      DECLARE_STORAGE
    
      PMOD_EXPORT struct object *get_backend_obj (struct Backend_struct *b)
      {
        return b->backend_obj;
      }
    
      static int wakeup_callback(struct fd_callback_box *box, int UNUSED(event))
      {
        char buffer[1024];
        while( (fd_read(box->fd, buffer, sizeof(buffer)) < 0) && (errno==EINTR)); /* Clear 'flag' */
    #ifdef _REENTRANT
        while (box->backend->set_busy) {
          co_wait_interpreter(&box->backend->set_change);
        }
    #endif /* _REENTRANT */
        return 0;
      }
    
      /* This is used by threaded programs and signals to wake up the
       * master 'thread'.
       *
       * It's called from the signal handler so it must not lock any mutex
       * whatsoever. E.g. dmalloc stuff is verboten here.
       */
      PMOD_EXPORT void backend_wake_up_backend(struct Backend_struct *me)
      {
        char foo=0;
    
        if(me && me->may_need_wakeup && (me->wakeup_pipe_send_fd >= 0)) {
          /* Avoid fd_write with its dmalloc stuff. */
          int len;
          do {
    	len =
    #ifdef HAVE_WINSOCK_H
    	  debug_fd_write
    #else
    	  write
    #endif
    	  (me->wakeup_pipe_send_fd, &foo ,1);
          } while ((len < 0) && (errno == EINTR));
        }
      }
    
      /* Lower the timeout
       *
       * Typically used from backend callbacks.
       */
      PMOD_EXPORT void backend_lower_timeout(struct Backend_struct *me,
    					 struct timeval *tv)
      {
        if (my_timercmp(tv, <=, &me->next_timeout)) {
          me->next_timeout = *tv;
        }
      }
    
      /*
       * Backend callbacks.
       */
    
      PMOD_EXPORT struct callback *backend_debug_add_backend_callback(
        struct Backend_struct *me, callback_func call, void *arg,
        callback_func free_func)
      {
        return add_to_callback(& me->backend_callbacks, call, arg, free_func);
      }
    
      void call_backend_monitor_cb (struct Backend_struct *me, struct svalue *cb)
      {
        ref_push_object (me->backend_obj);
        safe_apply_svalue (cb, 1, 1);
        pop_stack();
      }
    
      /*
       * Call outs.
       */
    
      /*! @class CallOut
       *!
       *! Represents a single call_out in the call_out list.
       *!
       *! @seealso
       *!   @[call_out()]
       */
      PIKECLASS CallOut
        program_flags PROGRAM_USES_PARENT;
        flags ID_PROTECTED|ID_PRIVATE|ID_USED;
      {
        CVAR INT32 pos;
        CVAR size_t fun_hval;
        CVAR struct timeval tv;
        CVAR struct Backend_CallOut_struct *next_fun;
        CVAR struct Backend_CallOut_struct **prev_fun;
        CVAR struct Backend_CallOut_struct *next_arr;
        CVAR struct Backend_CallOut_struct **prev_arr;
        /*! @decl static array args
         *!
         *! The array containing the function and arguments.
         */
        PIKEVAR array args
          flags ID_STATIC;
        CVAR struct object *this;
    
        DECLARE_STORAGE;
    
    #undef CAR
    #undef CDR
    
    #define CAR(X) (((X)<<1)+1)
    #define CDR(X) (((X)<<1)+2)
    #define PARENT(X) (((X)-1)>>1)
    #define CALL_(X) (me->call_heap[(X)])
    #define CALL(X) ((struct Backend_CallOut_struct *)debug_malloc_pass(CALL_(X)))
    #define MOVECALL(X,Y) do { INT32 p_=(X); (CALL_(p_)=CALL(Y))->pos=p_; }while(0)
    #define CMP(X,Y) my_timercmp(& CALL(X)->tv, <, & CALL(Y)->tv)
    #define SWAP(X,Y) do{ struct Backend_CallOut_struct *_tmp=CALL(X); (CALL_(X)=CALL(Y))->pos=(X); (CALL_(Y)=_tmp)->pos=(Y); } while(0)
    
    #ifdef PIKE_DEBUG
      static void do_unprotect_call_outs(struct Backend_struct *me)
      {
        me->inside_call_out = 0;
      }
    
    #define DECLARE_PROTECT_CALL_OUTS	ONERROR pco_err
    #define PROTECT_CALL_OUTS()					\
      do {								\
        if(me->inside_call_out)					\
          Pike_fatal("Recursive call in call_out module.\n");	\
        SET_ONERROR(pco_err, do_unprotect_call_outs, me);		\
        me->inside_call_out=1;					\
      } while(0)
     
    #define UNPROTECT_CALL_OUTS()			\
      CALL_AND_UNSET_ONERROR(pco_err)
    
    #else /* !PIKE_DEBUG */
    #define DECLARE_PROTECT_CALL_OUTS
    #define PROTECT_CALL_OUTS()
    #define UNPROTECT_CALL_OUTS()
    #endif /* PIKE_DEBUG */
    
    #ifdef PIKE_DEBUG
     
     static void backend_verify_call_outs(struct Backend_struct *me)
       {
         struct array *v;
         int e,d;
         
         if(!d_flag) return;
         if(!me->call_heap) return;
    
         if(me->num_pending_calls<0 || me->num_pending_calls>me->call_heap_size)
           Pike_fatal("Error in call out tables.\n");
    
         if(d_flag<2) return;
    
         for(e=0;e<me->num_pending_calls;e++)
         {
           if(e)
           {
    	 if(CMP(e, PARENT(e)))
    	   Pike_fatal("Error in call out heap. (@ %d)\n",e);
           }
    
           if(!(v=CALL(e)->args))
    	 Pike_fatal("No arguments to call.\n");
    
           if(v->refs < 1)
    	 Pike_fatal("Array should have at least one reference.\n");
           
           if(v->malloced_size<v->size)
    	 Pike_fatal("Impossible array.\n");
           
           if(!v->size)
    	 Pike_fatal("Call out array of zero size!\n");
           
           if(CALL(e)->prev_arr[0] != CALL(e))
    	 Pike_fatal("call_out[%d]->prev_arr[0] is wrong!\n",e);
           
           if(CALL(e)->prev_fun[0] != CALL(e))
    	 Pike_fatal("call_out[%d]->prev_fun[0] is wrong!\n",e);
           
           if(CALL(e)->pos != e)
    	 Pike_fatal("Call_out->pos is not correct!\n");
    
           if(d_flag>4)
           {
    	 for(d=e+1;d<me->num_pending_calls;d++)
    	   if(CALL(e)->args == CALL(d)->args)
    	     Pike_fatal("Duplicate call out in heap.\n");
           }
         }
         
         for(d=0;d<10 && e<me->call_heap_size;d++,e++) {
           if (CALL(e)) Pike_fatal("Call out left in heap.\n");
         }
    
         for(e=0;e<(int)me->hash_size;e++)
         {
           struct Backend_CallOut_struct *c,**prev;
           for(prev=& me->call_hash[e].arr;(c=*prev);prev=& c->next_arr)
           {
    	 if(c->prev_arr != prev)
    	   Pike_fatal("c->prev_arr is wrong %p.\n",c);
    
    	 if(c->pos<0)
    	   Pike_fatal("Free call_out in call_out hash table %p.\n",c);
           }
    
           for(prev=& me->call_hash[e].fun;(c=*prev);prev=& c->next_fun)
           {
    	 if(c->prev_fun != prev)
    	   Pike_fatal("c->prev_fun is wrong %p.\n",c);
    	 
    	 if(c->pos<0)
    	   Pike_fatal("Free call_out in call_out hash table %p.\n",c);
           }
         }
       }
    
    
    #else
    #define backend_verify_call_outs(X)
    #endif
    
    
     static void adjust_down(struct Backend_struct *me,int pos)
       {
         while(1)
         {
           int a=CAR(pos), b=CDR(pos);
           if(a >= me->num_pending_calls) break;
           if(b < me->num_pending_calls)
    	 if(CMP(b, a))
    	   a=b;
           
           if(CMP(pos, a)) break;
           SWAP(pos, a);
           pos=a;
         }
       }
     
     static int adjust_up(struct Backend_struct *me,int pos)
       {
         int parent=PARENT(pos);
         int from;
    #ifdef PIKE_DEBUG
         if(pos <0 || pos>=me->num_pending_calls)
           Pike_fatal("Bad argument to adjust_up(%d)\n",pos);
    #endif
         if(!pos) return 0;
         
         if(CMP(pos, parent))
         {
           SWAP(pos, parent);
           from=pos;
           pos=parent;
           while(pos && CMP(pos, PARENT(pos)))
           {
    	 parent=PARENT(pos);
    	 SWAP(pos, parent);
    	 from=pos;
    	 pos=parent;
           }
           from+=from&1 ? 1 : -1;
           if(from < me->num_pending_calls && CMP(from, pos))
           {
    	 SWAP(from, pos);
    	 adjust_down(me,from);
           }
           return 1;
         }
         return 0;
       }
     
     static void adjust(struct Backend_struct *me,int pos)
       {
         if(!adjust_up(me,pos)) adjust_down(me,pos);
       }
     
        INIT
        {
          THIS->pos = -1;
          THIS->this = Pike_fp->current_object;
        }
    
        EXIT
        {
          struct Backend_CallOut_struct *this = THIS;
    
          if (this->pos >= 0) {
    	/* Still active in the heap. DO_PIKE_CLEANUP? */
    	struct Backend_struct *me = parent_storage(1);
    	int e = this->pos;
    
    	me->num_pending_calls--;
    	if (e != me->num_pending_calls) {
    	  MOVECALL(e, me->num_pending_calls);
    	  adjust(me, e);
    	}
    	CALL_(me->num_pending_calls) = NULL;
    	this->pos = -1;
    	free_object(this->this);
    	this->this = NULL;
          }
          EXIT_BLOCK(this);
        }
    
        /*! @decl void create(int|float seconds, mixed fun, mixed ... args)
         *!
         *!   Start a new call out.
         *!
         *!   This is the low-level implementation of @[call_out()].
         *!
         *!   @[call_out()] is essentially implemented as:
         *!   @code
         *!     array call_out(mixed fun, int|float seconds, mixed ... args)
         *!     {
         *!       return CallOut(seconds, fun, @@args)->args;
         *!     }
         *!   @endcode
         *!
         *! @seealso
         *!   @[call_out()]
         */
        PIKEFUN void create(int|float seconds, mixed fun, mixed ... extra_args)
          flags ID_PROTECTED;
        {
          struct array *callable;
          size_t fun_hval;
          size_t hval;
          struct Backend_struct *me = parent_storage(1);
          struct Backend_CallOut_struct *new = THIS;
          DECLARE_PROTECT_CALL_OUTS;
    
          push_array(callable = aggregate_array(args - 1));
          args = 2;
    
          /* NOTE: hash_svalue() can run Pike code! */
          fun_hval = hash_svalue(ITEM(callable));
    
          PROTECT_CALL_OUTS();
          if(me->num_pending_calls == me->call_heap_size)
          {
    	/* here we need to allocate space for more pointers */
    	struct Backend_CallOut_struct **new_heap;
    
    	if(!me->call_heap || !me->call_hash)
    	{
    	  if (!me->call_heap) {
    	    me->call_heap_size = 128;
    	    me->call_heap =
    	      (struct Backend_CallOut_struct **)
    	      xalloc(sizeof(struct Backend_CallOut_struct *) *
    		     me->call_heap_size);
    	    MEMSET(me->call_heap, 0, sizeof(struct Backend_CallOut_struct *) *
    		   me->call_heap_size);
    	    me->num_pending_calls = 0;
    	  }
    
    	  if (!me->call_hash) {
    	    me->hash_size = hashprimes[me->hash_order];
    	    me->call_hash =
    	      (struct hash_ent *)xalloc(sizeof(struct hash_ent)*me->hash_size);
    	    MEMSET(me->call_hash, 0, sizeof(struct hash_ent)*me->hash_size);
    	  }
    	}else{
    	  struct hash_ent *new_hash;
    	  int e;
    
    	  new_heap = (struct Backend_CallOut_struct **)
    	    realloc((char *)me->call_heap,
    		    sizeof(struct Backend_CallOut_struct *)*me->call_heap_size*2);
    	  if(!new_heap)
    	    Pike_error("Not enough memory for another call_out\n");
    	  MEMSET(new_heap + me->call_heap_size, 0,
    		 sizeof(struct Backend_CallOut_struct *)*me->call_heap_size);
    	  me->call_heap_size *= 2;
    	  me->call_heap = new_heap;
    
    	  if((new_hash=(struct hash_ent *)malloc(sizeof(struct hash_ent)*
    						 hashprimes[me->hash_order+1])))
    	  {
    	    free((char *)me->call_hash);
    	    me->call_hash = new_hash;
    	    me->hash_size = hashprimes[++me->hash_order];
    	    MEMSET(me->call_hash, 0, sizeof(struct hash_ent)*me->hash_size);
    
    	    /* Re-hash */
    	    for(e=0;e<me->num_pending_calls;e++)
    	    {
    	      struct Backend_CallOut_struct *c = CALL(e);
    	      hval = PTR_TO_INT(c->args);
    
    #define LINK(X,c)							\
    	      hval %= me->hash_size;					\
    	      if((c->PIKE_CONCAT(next_,X) = me->call_hash[hval].X))	\
    		c->PIKE_CONCAT(next_,X)->PIKE_CONCAT(prev_,X) =		\
    		  &c->PIKE_CONCAT(next_,X);				\
    	      c->PIKE_CONCAT(prev_,X) = &me->call_hash[hval].X;		\
    	      me->call_hash[hval].X = c
    
    	      LINK(arr,c);
    	      hval = c->fun_hval;
    	      LINK(fun,c);
    	    }
    	  }
    	}
          }
    
    #ifdef PIKE_DEBUG
          if (CALL(me->num_pending_calls)) {
    	Pike_fatal("Lost call out in heap.\n");
          }
    #endif /* PIKE_DEBUG */
    
          CALL_(me->num_pending_calls) = new;
          new->pos = me->num_pending_calls++;
          add_ref(Pike_fp->current_object);
    
          {
    	hval = PTR_TO_INT(callable);
    	LINK(arr,new);
    	hval = new->fun_hval = fun_hval;
    	LINK(fun,new);
          }
    
          switch(TYPEOF(*seconds))
          {
          case T_INT:
    	new->tv.tv_sec = seconds->u.integer;
    	new->tv.tv_usec = 0;
    	break;
    
          case T_FLOAT:
    	{
    	  FLOAT_TYPE tmp = seconds->u.float_number;
    	  new->tv.tv_sec = DO_NOT_WARN((long)floor(tmp));
    	  new->tv.tv_usec = DO_NOT_WARN((long)(1000000.0 * (tmp - floor(tmp))));
    	  break;
    	}
    
          default:
    	Pike_fatal("Bad timeout to new_call_out!\n");
          }
    
    #ifdef BACKEND_USES_CFRUNLOOP
          check_set_timer(new->tv);
    #endif
    #ifdef _REENTRANT
          if(num_threads>1)
          {
    	struct timeval tmp;
    	ACCURATE_GETTIMEOFDAY(&tmp);
    	my_add_timeval(& new->tv, &tmp);
    	IF_CO (fprintf (stderr, "BACKEND[%d]: Adding call out at %ld.%ld "
    			"(current time is %ld.%ld)\n", me->id,
    			new->tv.tv_sec, new->tv.tv_usec,
    			tmp.tv_sec, tmp.tv_usec));
          } else
    #endif
          {
    	struct timeval tmp;
    	INACCURATE_GETTIMEOFDAY(&tmp);
    	my_add_timeval(& new->tv, &tmp);
    	IF_CO (fprintf (stderr, "BACKEND[%d]: Adding call out at %ld.%ld "
    			"(current_time is %ld.%ld)\n", me->id,
    			new->tv.tv_sec, new->tv.tv_usec,
    			tmp.tv_sec, tmp.tv_usec));
          }
    
          new->args = callable;
          Pike_sp -= 2;
          dmalloc_touch_svalue(Pike_sp);
    
          adjust_up(me, me->num_pending_calls-1);
          backend_verify_call_outs(me);
    
    #ifdef _REENTRANT
          backend_wake_up_backend(me);
    #endif
         
          UNPROTECT_CALL_OUTS();
        }
      }
      /*! @endclass
       */
    
    #undef THIS
    #define THIS THIS_BACKEND
    
      static void backend_count_memory_in_call_outs(struct Backend_struct *me)
      {
        push_text("num_call_outs");
        push_int(me->num_pending_calls);
    
        push_text("call_out_bytes");     
        push_int64(me->call_heap_size * sizeof(struct Backend_CallOut_struct **)+
    	       me->num_pending_calls * sizeof(struct Backend_CallOut_struct));
    
      }
    
      static void count_memory_in_call_outs(struct callback *UNUSED(foo),
    					void *UNUSED(bar),
    					void *UNUSED(gazonk))
      {
        backend_count_memory_in_call_outs(default_backend);
      }
    
      /*! @decl mapping(string:int) get_stats()
       *!
       *! Get some statistics about the backend.
       *!
       *! @returns
       *!   Returns a mapping with the follwoing content:
       *!   @mapping
       *!     @member int "num_call_outs"
       *!       The number of active call-outs.
       *!     @member int "call_out_bytes"
       *!       The amount of memory used by the call-outs.
       *!   @endmapping
       */
      PIKEFUN mapping(string:int) get_stats()
      {
        struct svalue *save_sp = Pike_sp;
        backend_count_memory_in_call_outs(THIS);
        f_aggregate_mapping(Pike_sp - save_sp);
        stack_pop_n_elems_keep_top(args);
      }
    
       /* FIXME */
    #if 0
       MARK 
         {
           int e;
           struct Backend_struct *me=THIS;
           
           for(e=0;e<me->num_pending_calls;e++)
           {
    	 gc_mark(CALL(e)->args,0,"call out args");
           }
         }
    #endif
    
    /*! @decl array call_out(function f, float|int delay, mixed ... args)
     *!
     *! Make a delayed call to a function.
     *!
     *! @[call_out()] places a call to the function @[f] with the argument
     *! @[args] in a queue to be called in about @[delay] seconds.
     *!
     *! If @[f] returns @expr{-1@}, no other call out or callback will be
     *! called by the backend in this round. I.e. @[`()] will return right
     *! away. For the main backend that means it will immediately start
     *! another round and check files and call outs anew.
     *!
     *! @returns
     *!   Returns a call_out identifier that identifies this call_out.
     *!   This value can be sent to eg @[find_call_out()] or @[remove_call_out()].
     *!
     *! @seealso
     *!   @[remove_call_out()], @[find_call_out()], @[call_out_info()],
     *!   @[CallOut]
     */
       PIKEFUN array call_out(mixed f, int|float t, mixed ... rest)
         {
           struct svalue tmp;
           struct object *co;
           struct Backend_CallOut_struct *c;
    
           if(args<2)
    	 SIMPLE_TOO_FEW_ARGS_ERROR("call_out", 2);
    
           if(TYPEOF(*t) != T_INT && TYPEOF(*t) != T_FLOAT)
    	 SIMPLE_BAD_ARG_ERROR("call_out", 2, "int|float");
    
           /* Swap, for compatibility */
           tmp = Pike_sp[-args];
           Pike_sp[-args] = Pike_sp[1-args];
           Pike_sp[1-args] = tmp;
    
           apply_current(Backend_CallOut_program_fun_num, args);
           args = 1;
    
           get_all_args("low_call_out", args, "%o", &co);
    
           c = (struct Backend_CallOut_struct *)
    	 get_storage(co, Backend_CallOut_program);
    
           if (!c) Pike_error("low_call_out(): Unexpected object from CallOut.\n");
    
           ref_push_array(c->args);
    
           stack_pop_n_elems_keep_top(args);
         }
    
       /* Assumes current_time is correct on entry. */
       static int backend_do_call_outs(struct Backend_struct *me)
         {
           int call_count = 0;
           int args;
           struct timeval tmp, now;
           backend_verify_call_outs(me);
    
           INACCURATE_GETTIMEOFDAY(&now);
           tmp.tv_sec = now.tv_sec;
           tmp.tv_usec = now.tv_usec;
           tmp.tv_sec++;
           while(me->num_pending_calls &&
    	     my_timercmp(&CALL(0)->tv, <= ,&now))
           {
    	 struct timeval now;
    	 /* unlink call out */
    	 struct Backend_CallOut_struct *cc;
    	 DECLARE_PROTECT_CALL_OUTS;
    	 
    	 PROTECT_CALL_OUTS();
    	 cc=CALL(0);
    	 if(--me->num_pending_calls)
    	 {
    	   MOVECALL(0,me->num_pending_calls);
    	   adjust_down(me, 0);
    	 }
    	 CALL_(me->num_pending_calls) = NULL;
    	 UNPROTECT_CALL_OUTS();
    	 cc->pos = -1;
    
    	 args = cc->args->size;
    	 push_array_items(cc->args);
    	 cc->args = NULL;
    	 free_object(cc->this);
    
    	 check_destructed(Pike_sp - args);
    	 if(TYPEOF(Pike_sp[-args]) != T_INT)
    	 {
    	   IF_CO(
    	     fprintf(stderr, "[%d]BACKEND[%d]: backend_do_call_outs: "
    		     "calling call out ", THR_NO, me->id);
    	     print_svalue (stderr, Pike_sp - args);
    	     fputc ('\n', stderr);
    	   );
    	   call_count++;
    	   f_call_function(args);
    	   if (TYPEOF(Pike_sp[-1]) == T_INT && Pike_sp[-1].u.integer == -1) {
    	     pop_stack();
    	     backend_verify_call_outs(me);
    	     call_count = -call_count;
    	     break;
    	   }
    	   else
    	     pop_stack();
    	 }else{
    	   IF_CO(fprintf(stderr, "[%d]BACKEND[%d]: backend_do_call_outs: "
    			 "ignoring destructed call out\n", THR_NO, me->id));
    	   pop_n_elems(args);
    	 }
    	 backend_verify_call_outs(me);
    	 
    	 ACCURATE_GETTIMEOFDAY(&now);
    	 if(my_timercmp(&now, > , &tmp)) break;
           }
    
           IF_CO (
    	 if (me->num_pending_calls)
    	   fprintf (stderr, "BACKEND[%d]: backend_do_call_outs: stopping with %d "
    		    "call outs left, closest with time %ld.%ld "
    		    "(current_time %ld.%ld, limit at %ld.%ld)\n",
    		    me->id, me->num_pending_calls,
    		    CALL(0)->tv.tv_sec, CALL(0)->tv.tv_usec,
    		    now.tv_sec, now.tv_usec,
    		    tmp.tv_sec, tmp.tv_usec);
    	 else
    	   fprintf (stderr, "BACKEND[%d]: backend_do_call_outs: "
    		    "no outstanding call outs\n",
    		    me->id);
           );
    
           return call_count;
         }
    
       /* NB: Calls Pike code, so MUST NOT be used in a PROTECT_CALL_OUTS()
        *     context.
        *
        *     Note that a non-NULL return value only indicates that the
        *     fun was found before the calls of is_eq(), which may have
        *     caused the call_out to be removed.
        */
       static struct array *backend_find_call_out_info(struct Backend_struct *me,
    						   struct svalue *fun)
         {
           size_t hval, fun_hval;
           struct Backend_CallOut_struct *c;
           struct svalue *save_sp = Pike_sp;
           DECLARE_PROTECT_CALL_OUTS;
    
           if(!me->num_pending_calls) return NULL;
    
           PROTECT_CALL_OUTS();
    
           if(TYPEOF(*fun) == T_ARRAY)
           {
    	 hval=PTR_TO_INT(fun->u.array);
    	 hval%=me->hash_size;
    	 for(c=me->call_hash[hval].arr;c;c=c->next_arr)
    	 {
    	   if(c->args == fun->u.array)
    	   {
    #ifdef PIKE_DEBUG
    	     if(CALL(c->pos) != c)
    	       Pike_fatal("Call_out->pos not correct!\n");
    #endif
    	     UNPROTECT_CALL_OUTS();
    	     add_ref(c->args);
    	     return c->args;
    	   }
    	 }
           }
    
           fun_hval=hash_svalue(fun);
           hval = fun_hval % me->hash_size;
           for(c=me->call_hash[hval].fun;c;c=c->next_fun)
           {
    	 if(c->fun_hval == fun_hval)
    	 {
    #ifdef PIKE_DEBUG
    	   if(CALL(c->pos) != c)
    	     Pike_fatal("Call_out->pos not correct!\n");
    #endif
    	   /* Delay the is_eq() call until we've finished
    	    * scanning the hash table.
    	    */
    	   ref_push_array(c->args);
    	 }
           }
           UNPROTECT_CALL_OUTS();
    
           /* Note: is_eq() may call Pike code (which we want),
    	*       however, we can't let it modify the hash
    	*       table while we're scanning it.
    	*/
           while (Pike_sp > save_sp) {
    	 struct array *res = Pike_sp[-1].u.array;
    	 /* FIXME: Use CYCLIC! */
    	 if (is_eq(fun, ITEM(res))) {
    	   add_ref(res);
    	   pop_n_elems(Pike_sp - save_sp);
    	   return res;
    	 }
    	 pop_stack();
           }
    
           return NULL;
         }
    
       /* Typically used in a PROTECT_CALL_OUTS() context. */
       static int backend_find_call_out(struct Backend_struct *me,
    				    struct array *co_info)
         {
           size_t hval, fun_hval;
           struct Backend_CallOut_struct *c;
    
           if(!co_info || !me->num_pending_calls) return -1;
    
           hval=PTR_TO_INT(co_info);
           hval%=me->hash_size;
           for(c=me->call_hash[hval].arr;c;c=c->next_arr)
           {
    	 if(c->args == co_info)
    	 {
    #ifdef PIKE_DEBUG
    	   if(CALL(c->pos) != c)
    	     Pike_fatal("Call_out->pos not correct!\n");
    #endif
    	   return c->pos;
    	 }
           }
    
           return -1;
         }
    
    /*! @decl int _do_call_outs()
     *!
     *! Do all pending call_outs.
     *!
     *! This function runs all pending call_outs that should have been
     *! run if Pike returned to the backend.  It should not be used in
     *! normal operation.
     *!
     *! As a side-effect, this function sets the value returned by
     *! @[time(1)] to the current time.
     *!
     *! @returns
     *! Zero if no call outs were called, nonzero otherwise.
     *!
     *! @seealso
     *! @[call_out()], @[find_call_out()], @[remove_call_out()]
     */
       PIKEFUN int _do_call_outs()
         {
           INVALIDATE_CURRENT_TIME();
           RETURN backend_do_call_outs(THIS);
         }
    
    /*! @decl int find_call_out(function f)
     *! @decl int find_call_out(array id)
     *!
     *! Find a call out in the queue.
     *!
     *! This function searches the call out queue. If given a function as
     *! argument, it looks for the first call out scheduled to that function.
     *!
     *! The argument can also be a call out id as returned by @[call_out()], in
     *! which case that call_out will be found (Unless it has already been
     *! called).
     *!
     *! @returns
     *!   @[find_call_out()] returns the remaining time in seconds before that
     *!   call_out will be executed. If no call_out is found,
     *!   @[zero_type](@[find_call_out](f)) will return 1.
     *!
     *! @seealso
     *!   @[call_out()], @[remove_call_out()], @[call_out_info()]
     */
       PIKEFUN int find_call_out(function|mixed f)
       {
         struct Backend_struct *me=THIS;
         struct array *co_info;
    
         backend_verify_call_outs(me);
    
         co_info = backend_find_call_out_info(me, f);
    
         if(!co_info)
         {
           /* NB: This is a very exotic value! */
           SET_SVAL(*Pike_sp, T_INT, NUMBER_UNDEFINED, integer, -1);
           Pike_sp++;
         } else {
           int e;
           struct timeval now;
           DECLARE_PROTECT_CALL_OUTS;
           PROTECT_CALL_OUTS();
           e = backend_find_call_out(me, co_info);
           pop_n_elems(args);
           free_array(co_info);
           if (e == -1) {
    	 /* NB: This is a very exotic value! */
    	 SET_SVAL(*Pike_sp, T_INT, NUMBER_UNDEFINED, integer, -1);
    	 Pike_sp++;
           }else{
    	 INACCURATE_GETTIMEOFDAY(&now);
    	 push_int(CALL(e)->tv.tv_sec - now.tv_sec);
           }
           UNPROTECT_CALL_OUTS();
         }
         backend_verify_call_outs(me);
       }
    
    /*! @decl int remove_call_out(function f)
     *! @decl int remove_call_out(array id)
     *!
     *! Remove a call out from the call out queue.
     *!
     *! This function finds the first call to the function @[f] in the call_out
     *! queue and removes it.  You can also give a call out id as argument (as
     *! returned by @[call_out()]).
     *!
     *! @returns
     *!   The remaining time in seconds left to that call out will be returned.
     *!   If no call_out was found, @[zero_type](@[remove_call_out](@[f]))
     *!   will return 1.
     *!
     *! @seealso
     *!   @[call_out_info()], @[call_out()], @[find_call_out()]
     */
       PIKEFUN int remove_call_out(function|mixed f)
       {
         struct Backend_struct *me=THIS;
         struct array *co_info;
    
         backend_verify_call_outs(me);
    
         co_info = backend_find_call_out_info(me, f);
    
         if(!co_info)
         {
           /* NB: This is a very exotic value! */
           SET_SVAL(*Pike_sp, T_INT, NUMBER_UNDEFINED, integer, -1);
           Pike_sp++;
         } else {
           int e;
           DECLARE_PROTECT_CALL_OUTS;
    
           PROTECT_CALL_OUTS();
           backend_verify_call_outs(me);
           e = backend_find_call_out(me, co_info);
           backend_verify_call_outs(me);
           if(e!=-1)
           {
    	 struct Backend_CallOut_struct *c = CALL(e);
    	 struct timeval now;
    
    	 INACCURATE_GETTIMEOFDAY(&now);
    	 IF_CO (fprintf (stderr, "BACKEND[%d]: Removing call out at %ld.%ld "
    			 "(current_time is %ld.%ld)\n", me->id,
    			 c->tv.tv_sec, c->tv.tv_usec,
    			 now.tv_sec, now.tv_usec));
    	 pop_n_elems(args);
    	 push_int(c->tv.tv_sec - now.tv_sec);
    
    	 me->num_pending_calls--;
    	 if(e!=me->num_pending_calls)
    	 {
    	   MOVECALL(e,me->num_pending_calls);
    	   adjust(me,e);
    	 }
    	 CALL_(me->num_pending_calls) = NULL;
    	 c->pos = -1;
    
    	 free_object(c->this);
           }else{
    	 pop_n_elems(args);
    	 /* NB: This is a very exotic value! */
    	 SET_SVAL(*Pike_sp, T_INT, NUMBER_UNDEFINED, integer, -1);
    	 Pike_sp++;
           }
           free_array(co_info);
           backend_verify_call_outs(me);
           UNPROTECT_CALL_OUTS();
         }
       }
    
    /* return an array containing info about all call outs:
     * ({  ({ delay, caller, function, args, ... }), ... })
     */
       struct array *backend_get_all_call_outs(struct Backend_struct *me)
         {
           int e;
           struct array *ret;
           struct timeval now;
           ONERROR err;
           DECLARE_PROTECT_CALL_OUTS;
    
           backend_verify_call_outs(me);
           PROTECT_CALL_OUTS();
           ret=allocate_array_no_init(0, me->num_pending_calls);
           SET_ONERROR(err, do_free_array, ret);
           ret->type_field = BIT_ARRAY;
           if(me->num_pending_calls) INACCURATE_GETTIMEOFDAY(&now);
           for(e=0;e<me->num_pending_calls;e++)
           {
    	 struct array *v;
    	 v=allocate_array_no_init(CALL(e)->args->size+2, 0);
    	 ITEM(v)[0].u.integer=CALL(e)->tv.tv_sec - now.tv_sec;
    
    	 /* FIXME: ITEM(v)[1] used to be the current object
    	  *        from when the call_out was created, but
    	  *        that is always the backend since the
    	  *        backend.cmod rewrite.
    	  *        Now we just leave it zero.
    	  */
    	 v->type_field = BIT_INT;
    
    	 v->type_field |=
    	   assign_svalues_no_free(ITEM(v)+2,
    				  ITEM(CALL(e)->args),
    				  CALL(e)->args->size,BIT_MIXED);
    	 
    	 SET_SVAL(ITEM(ret)[e], T_ARRAY, 0, array, v);
    	 ret->size++;
           }
           UNSET_ONERROR(err);
           UNPROTECT_CALL_OUTS();
           return ret;
         }
    
    /*! @decl array(array) call_out_info()
     *!
     *! Get info about all call_outs.
     *!
     *! This function returns an array with one entry for each entry in the
     *! call out queue. The first in the queue will be at index 0. Each index
     *! contains an array that looks like this:
     *! @array
     *!   @elem int time_left
     *!     Time remaining in seconds until the call_out is to be performed.
     *!   @elem int(0..0) zero
     *!     Used to be the object that scheduled the call_out.
     *!   @elem function fun
     *!     Function to be called.
     *!   @elem mixed ... args
     *!     Arguments to the function.
     *! @endarray
     *!
     *! @seealso
     *!   @[call_out()], @[find_call_out()], @[remove_call_out()]
     */
       PIKEFUN array(array) call_out_info()
         {
           RETURN backend_get_all_call_outs(THIS);
         }
    
      /*
       * FD box handling
       */
    
    #define GET_ACTIVE_BOX(ME, FD)						\
       ((ME)->fd_boxes[(FD) - (ME)->fd_boxes_start])
    
    #define GET_BOX(ME, FD)							\
       ((FD) < 0 ?								\
        (ME)->inactive_boxes[~(FD)] :					\
        (ME)->fd_boxes[(FD) - (ME)->fd_boxes_start])
    
    #define SAFE_GET_ACTIVE_BOX(ME, FD)					\
       ((FD) >= (ME)->fd_boxes_start &&					\
        (FD) < (ME)->fd_boxes_start + (ME)->fd_boxes_size ?			\
        GET_ACTIVE_BOX (ME, FD) : NULL)
    
    
       static struct fd_callback_box *safe_get_box (struct Backend_struct *me, int fd)
       {
         if (fd < 0) {
           fd = ~fd;
           if (fd < me->inactive_boxes_size) {
    	 struct fd_callback_box *box = me->inactive_boxes[fd];
    	 /* Avoid free list pointers. */
    	 if ((struct fd_callback_box **) box < me->inactive_boxes ||
    	     (struct fd_callback_box **) box >= me->inactive_boxes + me->inactive_boxes_size)
    	   return box;
           }
         }
         else {
           fd -= me->fd_boxes_start;
           if (fd >= 0 && fd < me->fd_boxes_size)
    	 return me->fd_boxes[fd];
         }
         return NULL;
       }
    
       PMOD_EXPORT struct fd_callback_box *get_fd_callback_box_for_fd( struct Backend_struct *me, int fd )
       {
           return safe_get_box( me, fd );
       }
    
    /* NOTE: Some versions of AIX seem to have a
     *         #define events reqevents
     *       in one of the poll headerfiles. This will break
     *       the fd_box event handling.
     */
    #undef events
    
       /* Make sure the WANT_EVENT() macro is useable... */
    #undef READ
    #undef WRITE
    #undef READ_OOB
    #undef WRITE_OOB
    #undef FS_EVENT
    #undef ERROR
    
    #define WANT_EVENT(BOX, WHAT)						\
       ((BOX) && (BOX)->events & PIKE_CONCAT (PIKE_BIT_FD_, WHAT))
    
    #define FOR_EACH_ACTIVE_FD_BOX(ME, BOX_VAR)				\
       struct Backend_struct *me_ = (ME);					\
       struct fd_callback_box *BOX_VAR, **boxes_ = me_->fd_boxes;		\
       int b_, max_ = me_->fd_boxes_size;					\
       for (b_ = 0; b_ < max_; b_++)					\
         if ((BOX_VAR = boxes_[b_]))
    
    #define FOR_EACH_INACTIVE_FD_BOX(ME, BOX_VAR)				\
       struct Backend_struct *me_ = (ME);					\
       struct fd_callback_box *BOX_VAR, **boxes_ = me_->inactive_boxes;	\
       int b_ = 0, max_ = me_->inactive_boxes_size;				\
       for (b_ = 0; b_ < max_; b_++)					\
         if ((BOX_VAR = boxes_[b_]) &&					\
    	 /* Avoid free list pointers. */				\
    	 ((struct fd_callback_box **) BOX_VAR < boxes_ ||		\
    	  (struct fd_callback_box **) BOX_VAR >= boxes_+max_))
    
    #ifdef PIKE_DEBUG
      static void check_box (struct fd_callback_box *box, int fd)
      {
        struct Backend_struct *me;
        if (!box) return;
        if (!(me = box->backend))
          Pike_fatal ("fd_callback_box not hooked to any backend.\n");
        if (fd == INT_MAX)
          fd = box->fd;
        else if (fd != box->fd)
          Pike_fatal ("fd in callback box doesn't correspond to where it's found.\n");
        if (safe_get_box (me, fd) != box)
          Pike_fatal ("fd_callback_box not hooked in correctly for fd %d.\n", box->fd);
      }
    #else
    #  define check_box(box, fd) do {} while (0)
    #endif
    
       static void add_fd_box (struct fd_callback_box *box)
       {
         struct Backend_struct *me = box->backend;
         int fd = box->fd;
    
    #ifdef PIKE_DEBUG
         if (fd >= 0) {
           struct fd_callback_box *old_box = SAFE_GET_ACTIVE_BOX (me, fd);
           if (old_box == box)
    	 Pike_fatal ("The box is already hooked in.\n");
           if (old_box)
    	 Pike_fatal ("There's another callback box %p for fd %d.\n",
    		     old_box, fd);
           if (get_backend_for_fd (fd) && get_backend_for_fd (fd) != me)
    	 Pike_fatal ("The fd is allocated to another backend.\n");
         }
         else {
           int i;
           for (i = 0; i < me->inactive_boxes_size; i++)
    	 if (me->inactive_boxes[i] == box)
    	   Pike_fatal ("The box is already hooked in.\n");
         }
    #endif
    
         if (fd >= 0) {
           low_set_backend_for_fd (fd, me);
    
           if (!me->fd_boxes_size) {
    	 /* Start small since backends with only a single fd aren't uncommon. */
    	 me->fd_boxes_size = 4;
    	 me->fd_boxes = calloc (sizeof (me->fd_boxes[0]), me->fd_boxes_size);
    	 if (!me->fd_boxes)
    	   Pike_fatal ("Out of memory in backend::add_fd_box(): "
    		       "Tried to allocate %d fd_callback_box pointers\n",
    		       me->fd_boxes_size);
    	 me->fd_boxes_start = fd;
    	 fd = 0;
           }
    
           else if (fd < me->fd_boxes_start) {
    	 int old_size = me->fd_boxes_size, shift = me->fd_boxes_size;
    	 struct fd_callback_box **old_boxes = me->fd_boxes;
    	 IF_PD(fprintf(stderr, "me->fd_boxes: %p (%d ==> %d)\n",
    		       me->fd_boxes, me->fd_boxes_start, fd));
    	 while (fd < me->fd_boxes_start - shift) shift *= 2;
    	 if (me->fd_boxes_start - shift < 0) shift = me->fd_boxes_start;
    	 me->fd_boxes_start -= shift;
    	 me->fd_boxes_size += shift;
    	 debug_malloc_touch(me->fd_boxes);
    	 me->fd_boxes =
    	   realloc (me->fd_boxes, sizeof (me->fd_boxes[0]) * me->fd_boxes_size);
    	 if (!me->fd_boxes)
    	   Pike_fatal ("Out of memory in backend::add_fd_box(): "
    		       "Tried to allocate %d fd_callback_box pointers\n",
    		       me->fd_boxes_size);
    	 MEMMOVE (me->fd_boxes + shift, me->fd_boxes,
    		  sizeof (me->fd_boxes[0]) * old_size);
    	 MEMSET (me->fd_boxes, 0, sizeof (me->fd_boxes[0]) * shift);
    	 debug_malloc_touch(me->fd_boxes);
    	 fd -= me->fd_boxes_start;
           }
    
           else {
    	 fd -= me->fd_boxes_start;
    	 if (fd >= me->fd_boxes_size) {
    	   int old_size=me->fd_boxes_size;
    	   while(fd >= me->fd_boxes_size) me->fd_boxes_size*=2;
    	   debug_malloc_touch(me->fd_boxes);
    	   me->fd_boxes =
    	     realloc(me->fd_boxes, sizeof(me->fd_boxes[0]) * me->fd_boxes_size);
    	   if( !me->fd_boxes )
    	     Pike_fatal("Out of memory in backend::add_fd_box(): "
    			"Tried to allocate %d fd_callback_box pointers\n",
    			me->fd_boxes_size);
    	   MEMSET(me->fd_boxes+old_size,
    		  0,
    		  (me->fd_boxes_size-old_size)*sizeof(me->fd_boxes[0]));
    	   debug_malloc_touch(me->fd_boxes);
    	 }
           }
    
           me->fd_boxes[fd] = box;
         }
    
         else {			/* Add an inactive box. */
           int pos;
    
           if (!me->free_inactive_box) {
    	 pos = me->inactive_boxes_size;
    	 if (!me->inactive_boxes_size)
    	   me->inactive_boxes_size = 4;
    	 else
    	   me->inactive_boxes_size *= 2;
    	 if (!me->inactive_boxes)
    	   me->inactive_boxes =
    	     malloc (sizeof (me->inactive_boxes[0]) * me->inactive_boxes_size);
    	 else {
    #ifdef PIKE_DEBUG
    	   {
    	     struct fd_callback_box **p, **boxes = me->inactive_boxes;
    	     int i;
    	     int max = pos; /* me->inactive_boxes_size before enlargement. */
    	     for (i = 0; i < max; i++)
    	       if ((p = (struct fd_callback_box **) boxes[i]) &&
    		   p >= boxes && p < boxes + max)
    		 Pike_fatal ("Still got free list pointers in inactive box "
    			     "list that is about to be enlarged.\n");
    	   }
    #endif
    	   me->inactive_boxes =
    	     realloc (me->inactive_boxes,
    		      sizeof (me->inactive_boxes[0]) * me->inactive_boxes_size);
    	 }
    	 if (!me->inactive_boxes)
    	   Pike_fatal ("Out of memory in backend::add_fd_box(): "
    		       "Tried to allocate %d inactive fd_callback_box pointers\n",
    		       me->inactive_boxes_size);
    	 me->free_inactive_box = me->inactive_boxes + pos;
    	 while (++pos < me->inactive_boxes_size)
    	   me->inactive_boxes[pos - 1] =
    	     (struct fd_callback_box *) (me->inactive_boxes + pos);
    	 me->inactive_boxes[pos - 1] = NULL;
           }
    
           pos = me->free_inactive_box - me->inactive_boxes;
           me->free_inactive_box = (struct fd_callback_box **) *me->free_inactive_box;
    
           me->inactive_boxes[pos] = box;
           box->fd = ~pos;
         }
       }
    
       static void remove_fd_box (struct fd_callback_box *box)
       {
         struct Backend_struct *me = box->backend;
         int fd = box->fd;
         check_box (box, INT_MAX);
    
         /* FIXME: Shrink arrays? */
    
         if (fd >= 0) {
           low_set_backend_for_fd (fd, NULL);
           me->fd_boxes[fd - me->fd_boxes_start] = NULL;
         }
         else {
           fd = ~fd;
           me->inactive_boxes[fd] = (struct fd_callback_box *) me->free_inactive_box;
           me->free_inactive_box = me->inactive_boxes + fd;
         }
       }
    
       static void update_fd_set (struct Backend_struct *me, int fd,
    			      int old_events, int new_events, int flags)
       {
         if (fd < 0) return;
    
    #ifdef __NT__
         if (new_events && !(fd_query_properties(fd, fd_CAN_NONBLOCK) & fd_CAN_NONBLOCK)) {
           Pike_fatal("update_fd_set() on non-socket!\n");
         }
    #endif /* __NT__ */
    
         IF_PD(fprintf(stderr, "update_fd_set(%p, %d, 0x%08x, 0x%08x)\n",
    		   me, fd, old_events, new_events));
    
         if (me->update_fd_set_handler) {
           me->update_fd_set_handler(me, me->handler_data,
    				 fd, old_events, new_events, flags);
         } else {
           Pike_fatal("No update_fd_set_handler set[%p, %d, 0x%04x, 0x%04x].\n");
         }
       }
    
      PMOD_EXPORT void hook_fd_callback_box (struct fd_callback_box *box)
      {
        struct Backend_struct *me = box->backend;
        int fd = box->fd;
        IF_PD(fprintf(stderr, "[%d]BACKEND[%d]: hook_fd_callback_box: "
    		  "fd %d, events 0x%x, object %p\n",
    		  THR_NO, me->id, fd, box->events, box->ref_obj));
    #ifdef PIKE_DEBUG
        if (!me) Pike_fatal ("Backend not set.\n");
    #endif
    #ifdef __NT__
        if ((fd >= 0) && box->events &&
    	!(fd_query_properties(fd, fd_CAN_NONBLOCK) & fd_CAN_NONBLOCK)) {
          Pike_fatal("hook_fd_callback_box() on non-socket!\n"
    		 "  fd: %d\n"
    		 "  events: 0x%04x\n"
    		 "  fd_properties: 0x%04x\n",
    		 fd, box->events, fd_query_properties(fd, fd_CAN_NONBLOCK));
        }
    #endif /* __NT__ */
        add_fd_box (box);
        if (fd >= 0) update_fd_set (me, fd, 0, box->events, box->flags);
        if (box->ref_obj && box->events) add_ref (box->ref_obj);
      }
    
      PMOD_EXPORT void unhook_fd_callback_box (struct fd_callback_box *box)
      {
        /* Accept an unhooked box; can happen when we're called from an
         * object exit hook due to being freed by free_object below. */
        if (!box->backend) {
          IF_PD(fprintf(stderr, "[%d]BACKEND[unhooked box]: unhook_fd_callback_box: "
    		    "fd %d, object %p\n", THR_NO, box->fd, box->ref_obj));
          return;
        }
    
        check_box (box, INT_MAX);
        IF_PD(fprintf(stderr, "[%d]BACKEND[%d]: unhook_fd_callback_box: fd %d, object %p\n",
    		  THR_NO, box->backend->id, box->fd, box->ref_obj));
    
        if (box->fd >= 0) update_fd_set (box->backend, box->fd, box->events, 0, box->flags);
        remove_fd_box (box);
        box->backend = NULL;
        /* Make sure no further callbacks are called on this box. */
        box->revents = 0;
        box->rflags = 0;
        if (box->ref_obj && box->events) {
          /* Use gc safe method to allow calls from within the gc. */
          /* box->ref_obj is only converted from a counted to
           * non-counted ref, so it shouldn't be clobbered by the free. */
          union anything u;
          u.object = box->ref_obj;
          gc_free_short_svalue (&u, T_OBJECT);
        }
      }
    
      PMOD_EXPORT void set_fd_callback_events (struct fd_callback_box *box, int events, int flags)
      {
        int old_events = box->events;
        check_box (box, INT_MAX);
        IF_PD(fprintf(stderr, "[%d]BACKEND[%d]: set_fd_callback_events: "
    		  "fd %d, events from 0x%x to 0x%x, object %p\n",
    		  THR_NO, box->backend->id, box->fd, old_events, events,
    		  box->ref_obj));
        if (box->fd >= 0) update_fd_set (box->backend, box->fd, old_events, events, flags);
        box->events = events;
        box->flags = flags;
    
        if (box->ref_obj) {
          if (!old_events) {
    	if (events) add_ref (box->ref_obj);
          }
          else
    	if (!events) {
    	  /* Use gc safe method to allow calls from within the gc. */
    	  /* box->ref_obj is only converted from a counted to
    	   * non-counted ref, so it shouldn't be clobbered by the free. */
    	  union anything u;
    	  u.object = box->ref_obj;
    	  gc_free_short_svalue (&u, T_OBJECT);
    	}
        }
      }
    
      PMOD_EXPORT void change_backend_for_box (struct fd_callback_box *box,
    					   struct Backend_struct *new)
      {
        struct Backend_struct *old = box->backend;
        if (old) check_box (box, INT_MAX);
    
    #ifdef PIKE_DEBUG
        if (!new) Pike_fatal ("New backend is invalid.\n");
    #endif
    
        IF_PD(fprintf(stderr, "[%d]BACKEND[%d]: change_backend_for_box: "
    		  "fd %d, new backend %d\n",
    		  THR_NO, old?old->id:-1, box->fd, new->id));
    
        if (old != new) {
          if (old) {
    	if (box->fd >= 0) update_fd_set (old, box->fd, box->events, 0, box->flags);
    	remove_fd_box (box);
    	if (box->next) {
    	  /* The box is active in the old backend. Unlink it. */
    	  struct fd_callback_box *pred = box->next;
    	  /* Find the predecessor. */
    	  while (pred->next != box) {
    	    pred = pred->next;
    	  }
    	  pred->next = box->next;
    	  box->next = NULL;
    	  if (box->ref_obj) free_object(box->ref_obj);
    	}
          }
          box->backend = new;
          add_fd_box (box);
          if (box->fd >= 0) update_fd_set (new, box->fd, 0, box->events, box->flags);
        }
      }
    
      PMOD_EXPORT void change_fd_for_box (struct fd_callback_box *box, int new_fd)
      {
        int old_fd = box->fd;
    
        if (!box->backend) {
          /* Convenience so that the caller doesn't have to check if the
           * box is hooked in. */
          IF_PD(fprintf(stderr, "[%d]BACKEND[unhooked box]: change_fd_for_box: "
    		    "fd from %d to %d, obj: %p\n",
    		    THR_NO, old_fd, new_fd, box->ref_obj));
          box->fd = new_fd;
          box->revents = 0;
          box->rflags = 0;
        }
    
        else {
          check_box (box, INT_MAX);
    
          if (old_fd >= 0 ? old_fd != new_fd : new_fd >= 0) {
    	if (old_fd >= 0) update_fd_set (box->backend, old_fd, box->events, 0, box->flags);
    	remove_fd_box (box);
    	box->fd = new_fd;
    	add_fd_box (box);
    	new_fd = box->fd;
    	box->revents = 0;
        box->rflags = 0;
    	if (new_fd >= 0) update_fd_set (box->backend, new_fd, 0, box->events, box->flags);
          }
    
          IF_PD(fprintf(stderr, "[%d]BACKEND[%d]: change_fd_for_box: "
    		    "fd from %d to %d\n", THR_NO, box->backend->id, old_fd, new_fd));
        }
      }
    
      static void do_free_fd_box(struct fd_callback_box *box)
      {
        if (box->ref_obj) free_object(box->ref_obj);
      }
    
      static void do_free_fd_list(struct fd_callback_box *fd_list)
      {
        struct fd_callback_box *box;
        while ((box = fd_list->next)) {
          fd_list->next = box->next;
          box->next = NULL;
          if (box->ref_obj) free_object(box->ref_obj);
        }
      }
    
    #ifdef PIKE_DEBUG
    
      static void backend_do_debug(struct Backend_struct *me)
      {
        backend_verify_call_outs(me);
    
        if (me->debug_handler) {
          me->debug_handler(me, me->handler_data);
        }
          
        {FOR_EACH_ACTIVE_FD_BOX (me, box) check_box (box, INT_MAX);}
        {FOR_EACH_INACTIVE_FD_BOX (me, box) check_box (box, INT_MAX);}
      }
    
    #endif	/* PIKE_DEBUG */
    
      /*! @decl float|int(0..0) `()(void|float|int(0..0) sleep_time)
       *!   Perform one pass through the backend.
       *!
       *!   Calls any outstanding call-outs and non-blocking I/O
       *!   callbacks that are registred in this backend object.
       *!
       *! @param sleep_time
       *!   Wait at most @[sleep_time] seconds. The default when
       *!   unspecified or the integer @expr{0@} is no time limit.
       *!
       *! @returns
       *!   If the backend did call any callbacks or call outs then the
       *!   time spent in the backend is returned as a float. Otherwise
       *!   the integer @expr{0@} is returned.
       *!
       *! @seealso
       *!   @[Pike.DefaultBackend], @[main()]
       */
      PIKEFUN float|int(0..0) `()(void|float|int(0..0) sleep_time)
        prototype;
      {
      }
    
    #ifndef tObjImpl_THREAD_THREAD
      /* Kludge for precompile.pike; it resolves object(Thread.Thread)
       * to tObjImpl_THREAD_THREAD, while "program_id.h" only knows about
       * tObjImpl_THREAD_ID.
       */
    #define tObjImpl_THREAD_THREAD	tObjImpl_THREAD_ID
    #endif /* !tObjImpl_THREAD_THREAD */
    
      /*! @decl Thread.Thread executing_thread()
       *! @decl int executing_thread()
       *!
       *! Return the thread currently executing in the backend. I.e. the
       *! thread that has called @[`()] and hasn't exited from that call.
       *! Zero is returned if there's no thread in the backend.
       *!
       *! If Pike is compiled without thread support then @expr{1@} is
       *! returned if we're inside the backend, @expr{0@} otherwise.
       */
      PIKEFUN object(Thread.Thread)|int(0..1) executing_thread()
        /* FIXME: The type is too weak, but precompile.pike doesn't
         * understand different function variants in cpp branches. */
      {
        pop_n_elems (args);
    #ifdef PIKE_THREADS
        if (THIS->exec_thread)
          ref_push_object (THIS->exec_thread->thread_obj);
        else
          push_int (0);
    #else
        push_int (THIS->exec_thread);
    #endif
      }
    
      /*! @decl void add_file(Stdio.File|Stdio.FILE f)
       *!
       *! Register a file to be handled by this backend.
       *! 
       *! @param f
       *!   File to register.
       *!
       *! Registers @[f] to be handled by this backend.
       *! This simply does @expr{f->set_backend(backend)@} where
       *! @expr{backend@} is this object.
       *!
       *! @seealso
       *!   @[Pike.DefaultBackend], @[main()]
       */
      PIKEFUN void add_file(object f)
      {
        ref_push_object (Pike_fp->current_object);
        apply (f, "set_backend", 1);
        pop_stack();
      }
    
    
       /*! @decl int id()
        *!
        *! Return an integer that uniquely identifies this backend. For
        *! the default backend that integer is @expr{0@}.
        */
       PIKEFUN int id()
       {
         RETURN (THIS->id);
       }
    
       PIKEFUN string _sprintf(int type, mapping flags)
       {
         if (type == 'O') {
           push_constant_text ("Pike.Backend(%d)");
           push_int (THIS->id);
           f_sprintf (2);
           stack_pop_n_elems_keep_top (args);
         }
         else {
           pop_n_elems (args);
           push_int (0);
         }
       }
    
      extern int pike_make_pipe(int *);
    
      GC_CHECK
      {
        struct Backend_struct *me =
          (struct Backend_struct *) Pike_fp->current_storage;
        int e;
    
        for (e = 0; e < me->num_pending_calls; e++) {
          if (CALL(e)->this)
    	debug_gc_check (CALL(e)->this,
    			" as call out in backend object");
        }
    
        {FOR_EACH_ACTIVE_FD_BOX (me, box) {
    	check_box (box, INT_MAX);
    	if (box->ref_obj && box->events)
    	  debug_gc_check (box->ref_obj, " as container object "
    			  "for an active callback in backend object");
          }}
        {FOR_EACH_INACTIVE_FD_BOX (me, box) {
    	check_box (box, INT_MAX);
    	if (box->ref_obj && box->events)
    	  debug_gc_check (box->ref_obj, " as container object "
    			  "for an inactive callback in backend object");
          }}
      }
    
      GC_RECURSE
      {
        struct Backend_struct *me =
          (struct Backend_struct *) Pike_fp->current_storage;
        int e;
    
        for (e = 0; e < me->num_pending_calls; e++) {
          if (CALL(e)->this)
    	gc_recurse_short_svalue ((union anything *) &CALL(e)->this, T_OBJECT);
        }
    
        {FOR_EACH_ACTIVE_FD_BOX (me, box) {
    	if (box->ref_obj && box->events)
    	  gc_recurse_short_svalue ((union anything *) &box->ref_obj, T_OBJECT);
          }}
        {FOR_EACH_INACTIVE_FD_BOX (me, box) {
    	if (box->ref_obj && box->events)
    	  gc_recurse_short_svalue ((union anything *) &box->ref_obj, T_OBJECT);
          }}
      }
    
      static void low_backend_cleanup (struct Backend_struct *me)
      {
        me->exec_thread = 0;
      }
    
      static void low_backend_once_setup(struct Backend_struct *me,
    				     struct timeval *start_time)
      {
    #ifdef PIKE_DEBUG
        struct timeval max_timeout;
    #endif
        struct timeval *next_timeout = &me->next_timeout, now;
    
        alloca(0);			/* Do garbage collect */
    #ifdef PIKE_DEBUG
        if(d_flag > 1) do_debug();
    #endif
    
        if(me->exec_thread) {
    #ifdef PIKE_THREADS
          if (me->exec_thread != Pike_interpreter.thread_state)
    	Pike_error ("Backend already in use by another thread.\n");
          else
    #endif
    	/* It's actually not a problem to make this function
    	 * reentrant, but that'd introduce a risk of races in the
    	 * callbacks (i.e. between when a read callback is called
    	 * and when it reads the data), and besides I can't think
    	 * of any sane way to use it. Also, this error can help
    	 * discover otherwise tricky bugs. /mast */
    #ifndef BACKEND_USES_CFRUNLOOP
    	Pike_error ("Backend already running - cannot reenter.\n");
    #else
            ; /* Do nothing, as we can call setup more than once. */
    #endif
        }
    #ifdef PIKE_THREADS
        me->exec_thread = Pike_interpreter.thread_state;
    #else
        me->exec_thread = 1;
    #endif
    
    #ifndef OWN_GETHRTIME
        ACCURATE_GETTIMEOFDAY(&now);
    #else
        /* good place to run the gethrtime-conversion update
           since we have to run gettimeofday anyway /Mirar */
        INACCURATE_GETTIMEOFDAY(&now);
        own_gethrtime_update(&now);
    #endif
        if (start_time->tv_sec < 0) {
          next_timeout->tv_sec = -1;
          next_timeout->tv_usec = 0;
        }
        else {
          *next_timeout = *start_time;
          my_add_timeval(next_timeout, &now);
        }
    
        *start_time = now;
    
        /* Call outs */
        if(me->num_pending_calls)
          if(next_timeout->tv_sec < 0 ||
    	 my_timercmp(& CALL(0)->tv, < , next_timeout))
    	*next_timeout = CALL(0)->tv;
    
    #ifdef PIKE_DEBUG
        max_timeout = *next_timeout;
    #endif
        call_callback(& me->backend_callbacks, me);
    #ifdef PIKE_DEBUG
        if (max_timeout.tv_sec >= 0 &&
    	(next_timeout->tv_sec < 0 ||
    	 my_timercmp (&max_timeout, <, next_timeout)))
          Pike_fatal ("Timeout raised from %lu.%lu to %lu.%lu by a backend callback.\n",
    		  (unsigned long)max_timeout.tv_sec,
    		  (unsigned long)max_timeout.tv_usec,
    		  (unsigned long)next_timeout->tv_sec,
    		  (unsigned long)next_timeout->tv_usec);
    #endif
    
        if (next_timeout->tv_sec < 0) {
          /* Wait "forever". */
          next_timeout->tv_sec = 100000000;
          next_timeout->tv_usec = 0;
        }
        else if(my_timercmp(next_timeout, > , &now))
        {
          my_subtract_timeval(next_timeout, &now);
        }else{
          next_timeout->tv_usec = 0;
          next_timeout->tv_sec = 0;
        }
      }
    
    
      /* Call callbacks for the active events.
       *
       * NOTE: The first element in the fd_list is a sentinel!
       *
       * returns 1 on early exit.
       */
      static int backend_call_active_callbacks(struct fd_callback_box *fd_list,
                                               struct Backend_struct *PDUNUSED(me))
      {
        struct fd_callback_box *box;
        while((box = fd_list->next))
        {
          int fd = box->fd;
          ONERROR uwp;
    
          /* Unhook the box. */
          fd_list->next = box->next;
          box->next = NULL;
          SET_ONERROR(uwp, do_free_fd_box, box);
    
          IF_PD(fprintf(stderr, "[%d]BACKEND[%d]: Examining box for fd %d"
    		    " revents:0x%04x\n",
    		    THR_NO, me->id, fd, box->revents));
    
          if (box->fd < 0) {
    	/* The box is no longer active.
    	 * Or we have found our sentinel fd_list.
    	 *
    	 * Note that the loop will terminate, since we
    	 * have broken the cycle above when we set
    	 * box->next to NULL.
    	 */
    	CALL_AND_UNSET_ONERROR(uwp);
    	continue;
          }
    
          /* From the roxen-chat re: connecttest.pike/FreeBSD:
           *
           * kqueue is returning the correct info:
           * {7,EVFILT_READ,EV_ADD|EV_EOF,61,0x0,0x0}.
           * errno 61 is ECONNREFUSED
           */
          if (box->revents & box->events & PIKE_BIT_FD_READ_OOB) {
    	IF_PD(fprintf(stderr, "[%d]BACKEND[%d]: read_oob_callback(%d, %p)\n",
    		      THR_NO, me->id, fd, box->ref_obj));
    	errno = 0;
    	if (box->callback (box, PIKE_FD_READ_OOB) == -1) {
    	  CALL_AND_UNSET_ONERROR(uwp);
    	  goto backend_round_done;
    	}
          }
    
          if (box->revents & box->events & PIKE_BIT_FD_READ) {
    	/* FIXME: Consider utilizing ACTIVE_POLLSET[i].data in
    	 *        the kqueue case.
    	 */
    	IF_PD(fprintf(stderr, "[%d]BACKEND[%d]: read_callback(%d, %p)\n",
    		      THR_NO, me->id, fd, box->ref_obj));
    	errno = 0;
    	if (box->callback (box, PIKE_FD_READ) == -1) {
    	  CALL_AND_UNSET_ONERROR(uwp);
    	  goto backend_round_done;
    	}
          }
    
          if (box->revents & box->events & PIKE_BIT_FD_WRITE_OOB) {
    	IF_PD(fprintf(stderr, "[%d]BACKEND[%d]: write_oob_callback(%d, %p)\n",
    		      THR_NO, me->id, fd, box->ref_obj));
    	errno = 0;
    	if (box->callback (box, PIKE_FD_WRITE_OOB) == -1) {
    	  CALL_AND_UNSET_ONERROR(uwp);
    	  goto backend_round_done;
    	}
          }
    
          if (box->revents & box->events & PIKE_BIT_FD_WRITE) {
    	/* FIXME: Consider utilizing ACTIVE_POLLSET[i].data in
    	 *        the kqueue case.
    	 */
    	IF_PD(fprintf(stderr, "[%d]BACKEND[%d]: write_callback(%d, %p)\n",
    		      THR_NO, me->id, fd, box->ref_obj));
    	errno = 0;
    	if (box->callback (box, PIKE_FD_WRITE) == -1) {
    	  CALL_AND_UNSET_ONERROR(uwp);
    	  goto backend_round_done;
    	}
          }
    
    
          if (box->revents & box->events & PIKE_BIT_FD_FS_EVENT) {
    	/* FIXME: Consider utilizing ACTIVE_POLLSET[i].data in
    	 *        the kqueue case.
    	 */
    	IF_PD(fprintf(stderr, "[%d]BACKEND[%d]: fs_event_callback(%d, %p)\n",
    		      THR_NO, me->id, fd, box->ref_obj));
    	errno = 0;
    	if (box->callback (box, PIKE_FD_FS_EVENT) == -1) {
    	  CALL_AND_UNSET_ONERROR(uwp);
    	  goto backend_round_done;
    	}
          }
    
          if (box->revents & PIKE_BIT_FD_ERROR) {
    	/* Error */
    	int old_events;
    	int err;
    	ACCEPT_SIZE_T len = sizeof (err);
    	errno = 0;
    	/* FIXME: This could be too late - the error might be
    	 * clobbered by the callbacks we might have called
    	 * above. */
    	if (!getsockopt (fd, SOL_SOCKET, SO_ERROR, (void *) &err, &len)) {
    	  IF_PD (fprintf (stderr,
    			  "[%d]BACKEND[%d]: POLLERR on %d, error=%d\n",
    			  THR_NO, me->id, fd, err));
    	  errno = err;
    	}
    	else {
    	  /* Note: This happens for FIFOs and PIPEs on Linux on the write-end
    	   *       if the read-end has been closed.
    	   */
    #ifdef PIKE_DEBUG
    #ifdef ENOTSOCK
    	  if (errno != ENOTSOCK) {
    #endif
    	    fprintf(stderr,
    		    "Got POLLERR on non-socket fd %d (getsockopt errno=%d)\n",
    		    fd, errno);
    #ifdef ENOTSOCK
    	  } else {
    	    IF_PD(fprintf(stderr, "Got POLLERR on non-socket fd %d\n", fd));
    	  }
    #endif
    #endif /* PIKE_DEBUG */
    	  errno = err = EPIPE;
    	}
    
    	box->revents = 0;
        box->rflags = 0;
    	    
    	/* We don't want to keep this fd anymore.
    	 * Note: This disables any further callbacks.
    	 */
    	old_events = box->events;
    	set_fd_callback_events (box, box->events & PIKE_BIT_FD_ERROR, box->flags);
    	if (WANT_EVENT (box, ERROR)) {
    	  IF_PD(fprintf(stderr,
    			"[%d]BACKEND[%d]: error event on fd %d sent to %p\n",
    			THR_NO, me->id, fd, box->ref_obj));
    	  if (box->callback (box, PIKE_FD_ERROR) == -1) {
    	    CALL_AND_UNSET_ONERROR(uwp);
    	    goto backend_round_done;
    	  }
    	}
    	/* The following is temporary compat stuff. */
    	/* kqueue TODO: shouldn't need to do anything here for fs events, but should verify that. */
    	else if (old_events & PIKE_BIT_FD_READ) {
    	  IF_PD(fprintf(stderr,
    			"[%d]BACKEND[%d]: read_callback(%d, %p) for error %d\n",
    			THR_NO, me->id, fd, box->ref_obj, err));
    	  if (box->callback (box, PIKE_FD_READ) == -1) {
    	    CALL_AND_UNSET_ONERROR(uwp);
    	    goto backend_round_done;
    	  }
    	} else if (old_events & PIKE_BIT_FD_WRITE) {
    	  IF_PD(fprintf(stderr,
    			"[%d]BACKEND[%d]: write_callback(%d, %p) for error %d\n",
    			THR_NO, me->id, fd, box->ref_obj, err));
    	  if (box->callback (box, PIKE_FD_WRITE) == -1) {
    	    CALL_AND_UNSET_ONERROR(uwp);
    	    goto backend_round_done;
    	  }
    	}
          }
    
          CALL_AND_UNSET_ONERROR(uwp);
        }
        return 0;
    
      backend_round_done:
        return 1;
      }
    
      INIT
      {
        struct Backend_struct *me = THIS;
    
        me->id = unused_id++;
    
        IF_PD (fprintf (stderr, "[%d]BACKEND[%d]: init\n", THR_NO, me->id));
    
    #ifdef _REENTRANT
        THIS->set_busy = 0;
        co_init(&THIS->set_change);
    #endif /* _REENTRANT */
        me->exec_thread = 0;
    
        me->backend_callbacks.callbacks=0;
        me->backend_callbacks.num_calls=0;
    
        INVALIDATE_CURRENT_TIME(); /* Why? /mast */
    
        me->num_pending_calls=0;
        me->call_heap = 0;
        me->call_heap_size = 0;
        me->hash_size=0;
        me->hash_order=5;
        me->call_hash=0;
    
        me->backend_obj = Pike_fp->current_object; /* Note: Not refcounted. */
    
    #ifdef PIKE_DEBUG
        me->inside_call_out=0;
    #endif
    
        me->fd_boxes=0;
        me->fd_boxes_start = me->fd_boxes_size = 0;
        me->inactive_boxes = me->free_inactive_box = NULL;
        me->inactive_boxes_size = 0;
    
    #ifdef PIKE_DEBUG
        me->debug_handler = NULL;
    #endif
        me->update_fd_set_handler = NULL;
        me->handler_data = me;
    
        /* Note that we can't hook the wakeup pipe
         * until we are fully initialized.
         * The actual hooking of the wakeup pipe
         * is therefore done in create() below.
         */
        me->wakeup_pipe_send_fd = -1;
        INIT_FD_CALLBACK_BOX(&me->wakeup_cb_box, me, NULL, -1,
    			 PIKE_BIT_FD_READ, wakeup_callback, 0);
    
        me->may_need_wakeup = 0;
    
    #ifdef DO_PIKE_CLEANUP
        num_active_backends++;
    #endif
      }
      
      EXIT
        gc_trivial;
      {
        struct Backend_struct *me=THIS;
        int e;
    
        IF_PD (fprintf (stderr, "[%d]BACKEND[%d]: exit\n", THR_NO, me->id));
    
        free_callback_list(& THIS->backend_callbacks);
    
        if (THIS->wakeup_cb_box.fd >= 0)
          fd_close(THIS->wakeup_cb_box.fd);
        if (me->wakeup_pipe_send_fd >= 0)
          fd_close(THIS->wakeup_pipe_send_fd);
    
        if (me->fd_boxes) {
          FOR_EACH_ACTIVE_FD_BOX (me, box) {
    	check_box (box, INT_MAX);
    
    #ifdef PIKE_DEBUG
    	if (get_backend_for_fd (box->fd) != me)
    	  Pike_fatal ("Inconsistency in global fd map for fd %d: "
    		      "backend is %p, expected %p.\n",
    		      box->fd, get_backend_for_fd (box->fd), me);
    #endif
    
    	if (box->callback == compat_box_dispatcher) {
    #ifdef PIKE_DEBUG
    	  fprintf (stderr, "[%d]BACKEND[%d]: "
    		   "Compat callbacks left at exit for fd %d: 0x%x\n",
    		   THR_NO, me->id, box->fd, box->events);
    #endif
    	  really_free_compat_cb_box ((struct compat_cb_box *) box);
    	}
    
    	if (box->backend) {
    	  box->backend = NULL;
    	  if (box->ref_obj && box->events)
    	    free_object (box->ref_obj);
    	}
          }
    
          free(me->fd_boxes);
          me->fd_boxes = NULL;
          me->fd_boxes_start = me->fd_boxes_size = 0;
        }
    
        if (me->inactive_boxes) {
          FOR_EACH_INACTIVE_FD_BOX (me, box) {
    	check_box (box, INT_MAX);
    #ifdef PIKE_DEBUG
    	if (box->callback == compat_box_dispatcher)
    	  Pike_fatal ("Got inactive callback in compat interface.\n");
    #endif
    
    	if (box->backend) {
    	  box->backend = NULL;
    	  if (box->ref_obj && box->events)
    	    free_object (box->ref_obj);
    	}
          }
    
          free(me->inactive_boxes);
          me->inactive_boxes = me->free_inactive_box = NULL;
          me->inactive_boxes_size = 0;
        }
    
        /* Make sure we aren't referenced any more. */
        /* FIXME: Ought to keep better track of our fds so that we don't
         * need to do this loop. /mast */
        for (e = 0; e < fd_map_size; e++) {
          if (fd_map[e] == me) fd_map[e] = NULL;
        }
    
        /* CALL OUT */
        backend_verify_call_outs(me);
        for(e=0;e<me->num_pending_calls;e++)
        {
          CALL(e)->pos = -1;
          if (CALL(e)->this)
    	free_object(CALL(e)->this);
        }
        me->num_pending_calls=0;
        if(me->call_heap) free((char*)me->call_heap);
        me->call_heap = NULL;
        if(me->call_hash) free((char*)me->call_hash);
        me->call_hash=NULL;
    
    #ifdef DO_PIKE_CLEANUP
        if (!--num_active_backends) backend_cleanup();
    #endif
      }
    
      /*! @decl void create()
       */
      PIKEFUN void create()
        flags ID_PROTECTED;
      {
        struct Backend_struct *me = THIS;
    
        if (!me->update_fd_set_handler) {
          Pike_error("Attempt to clone the base Backend class.\n");
        }
    
        if (me->wakeup_pipe_send_fd < 0) {
          int pipe[2];
          IF_PD(fprintf(stderr, "[%d]BACKEND[%d]: Creating wakeup pipe...\n",
    		    THR_NO, me->id));
          if(pike_make_pipe(pipe) < 0)
    	Pike_error("Couldn't create backend wakeup pipe! errno=%d.\n",errno);
    
          set_nonblocking(pipe[0],1);
          set_nonblocking(pipe[1],1);
          IF_PD(fprintf(stderr, "[%d]BACKEND[%d]: Initializing wakeup pipe...\n",
    		    THR_NO, me->id));
          change_fd_for_box (&me->wakeup_cb_box, pipe[0]);
          me->wakeup_pipe_send_fd = pipe[1];
    
          IF_PD(fprintf(stderr, "[%d]BACKEND[%d]: Wakeup pipe is [%d, %d]\n",
    		    THR_NO, me->id,
    		    me->wakeup_pipe_send_fd, THIS->wakeup_cb_box.fd));
    
          /* Don't keep these on exec! */
          set_close_on_exec(pipe[0], 1);
          set_close_on_exec(pipe[1], 1);
        }
      }
    }
    
    /*! @endclass
     */
    
    /*
     * POLL/SELECT selection
     */
    
    #ifndef HAVE_AND_USE_POLL
    /* Various BSDs have simulated poll(2) APIs. */
    #undef HAVE_POLL
    #endif
    
    /* #undef BACKEND_USES_DEVPOLL */
    /* #undef BACKEND_USES_DEVEPOLL */
    /* #undef BACKEND_USES_POLL_DEVICE */
    
    #ifdef HAVE_POLL
    
    /*
     * Backends using poll(2) or similar.
     */
    
    /* Some constants... */
    
    /* Notes on POLLRDNORM and POLLIN:
     *
     * According to the AIX manual, POLLIN and POLLRDNORM are both set
     * if there's a nonpriority message on the read queue. POLLIN is
     * also set if the message is of 0 length.
     */
    
    #ifndef POLLRDNORM
    #define POLLRDNORM	POLLIN
    #endif /* !POLLRDNORM */
    
    #ifndef POLLRDBAND
    #define POLLRDBAND	POLLPRI
    #endif /* !POLLRDBAND */
    
    #ifndef POLLWRNORM
    #define POLLWRNORM	POLLOUT
    #endif /* POLLWRNORM */
    
    #ifndef POLLWRBAND
    #define POLLWRBAND	POLLOUT
    #endif /* !POLLWRBAND */
    
    #define MY_POLLIN POLLRDNORM|POLLIN
    #define MY_POLLOUT POLLWRNORM|POLLOUT
    
    #define MY_POLLEXCEPT	POLLRDBAND|POLLRDNORM|POLLIN
    #define MY_POLLRDBAND	POLLRDBAND|POLLPRI
    #define MY_POLLWREXCEPT	POLLWRBAND|POLLWRNORM|POLLOUT
    #define MY_POLLWRBAND	POLLWRBAND|MY_POLLOUT
    #define MY_POLLNVAL	POLLNVAL
    
    #if (POLLRDBAND != POLLRDNORM) && (POLLRDBAND != POLLIN)
    #define RDBAND_IS_SPECIAL
    #endif
    
    #if (POLLWRBAND != POLLOUT) && (POLLWRBAND != POLLWRNORM)
    #define WRBAND_IS_SPECIAL
    #endif
    
    #define TIMEOUT_IS_MILLISECONDS
    
    #ifdef BACKEND_USES_DEVPOLL
    /*
     * Backend using /dev/poll-style poll device.
     *
     * Used on:
     *   Solaris 7 + patches and above.
     *   OSF/1 + patches and above.
     *   IRIX 5.6.15m and above.
     */
    
    #define POLL_EVENT	struct pollfd
    #define OPEN_POLL_DEVICE(X)	open(PIKE_POLL_DEVICE, O_RDWR)
    #define CHILD_NEEDS_TO_REOPEN
    
    #define DECLARE_POLL_EXTRAS		\
      POLL_EVENT poll_fds[POLL_SET_SIZE];	\
      struct dvpoll poll_request = {	\
        poll_fds,				\
        POLL_SET_SIZE,			\
        0,					\
      }
    
    #define PDB_POLL(PFD, TIMEOUT)					\
      ((poll_request.dp_timeout = (TIMEOUT)),			\
       (ioctl(PFD, DP_POLL, &poll_request, sizeof(poll_request))))
    
    int POLL_DEVICE_SET_EVENTS(struct Backend_struct *PDUNUSED(me),
    			   int pfd, int fd, INT32 events)
    {
      struct pollfd poll_state[2];
      int e;
      int sz = sizeof(poll_state);
    
      IF_PD(fprintf(stderr, "POLL_DEVICE_SET_EVENTS(%p, %d, %d, 0x%08x)\n",
    		me, pfd, fd, events));
    
      /* NOTE: POLLREMOVE must (unfortunately) be a separate request. */
      poll_state[0].fd = fd;
      poll_state[0].events = POLLREMOVE;
      poll_state[0].revents = 0;
      poll_state[1].fd = fd;
      poll_state[1].events = events;
      poll_state[1].revents = 0;
    
      if (!events) {
        /* We're not interrested in the fd anymore. */
        sz = sizeof(poll_state[0]);
      }
    
    #ifdef _REENTRANT
      /* FIXME: Ought to check if we're the backend.
       */
      if(num_threads>1)
      {
        /* Release the poll set from the backend. */
        IF_PD(fprintf(stderr, "POLL_DEVICE_SET_EVENTS[%p] grabbing the poll set\n",
    		  me));
        while (me->set_busy) {
          co_wait_interpreter(&me->set_change);
        }
        me->set_busy = 1;
        IF_PD(fprintf(stderr, "POLL_DEVICE_SET_EVENTS[%p] wake up backend\n",
    		  me));
        backend_wake_up_backend(me);
        /* The backend is now waiting in wakeup_callback(). */
      }
    #endif /* _REENTRANT */
    
      IF_PD(fprintf(stderr, "POLL_DEVICE_SET_EVENTS[%p] updating the poll set\n",
    		me));
      while (((e = write(pfd, poll_state, sz)) < 0) && (errno == EINTR))
        ;
    
    #ifdef _REENTRANT
      me->set_busy = 0;
      if(num_threads>1)
      {
        /* Release the backend from wakeup_callback(). */
        IF_PD(fprintf(stderr, "POLL_DEVICE_SET_EVENTS[%p] releasing the backend\n",
    		  me));
        co_broadcast(&me->set_change);
      }
    #endif /* _REENTRANT */
    
      if (e < 0) {
        Pike_fatal("Failed to set state for fd %d in " PIKE_POLL_DEVICE
    	       " (errno:%d).\n",
    	       fd, errno);
      }
    
      /* FIXME: Probably ought to support partial writes. */
      if (e != sz) {
        Pike_fatal("Failed to set state for fd %d in " PIKE_POLL_DEVICE
    	       " short write (%d != %d).\n",
    	       fd, e, (int)sizeof(poll_state));
      }
      IF_PD(fprintf(stderr, "POLL_DEVICE_SET_EVENTS[%p] ==> %d\n",
    		me, e));
      return e;
    }
    	
    #elif defined(BACKEND_USES_DEVEPOLL)
    /*
     * Backend using /dev/epoll-style poll device.
     *
     * Used on:
     *   Linux 2.6 and above.
     * Note:
     *   Some libc's are missing wrappers for the system calls, so
     *   we include the appropriate wrappers below.
     */
    
    #ifndef PIKE_POLL_DEVICE
    #define PIKE_POLL_DEVICE	"epoll"
    #endif
    
    /* The following three are defined by <gnu/stubs.h> which is included
     * from <features.h> which is included from just about everywhere, so
     * it is safe to assume that they have been defined if appropriate.
     */
    #if defined(__stub_epoll_create) || defined(__stub_epoll_ctl) || \
        defined(__stub_epoll_wait)
    /* We have a libc without the wrappers for epoll support.
     */
    #ifndef __NR_epoll_create
    /* Our libc doesn't even know the syscall numbers for the epoll syscalls.
     */
    #ifdef __i386__
    #define __NR_epoll_create 254
    #define __NR_epoll_ctl 255
    #define __NR_epoll_wait 256
    #elif defined(__ia64__)
    #define __NR_epoll_create 1243
    #define __NR_epoll_ctl 1244
    #define __NR_epoll_wait 1245
    #elif defined(__x86_64__)
    #define __NR_epoll_create 214
    #define __NR_epoll_ctl 233
    #define __NR_epoll_wait 232
    #else /* cpu types */
    #error Syscall numbers for epoll_create et al not known on this architecture.
    #endif /* cpu types */
    #endif /* !defined(__NR_epoll_create) */
    #if defined(_syscall1) && defined(_syscall4)
    _syscall1(int, epoll_create, int, size);                               
    _syscall4(int, epoll_ctl, int, epfd, int, op, int, fd,
    	  struct epoll_event *, event);
    _syscall4(int, epoll_wait, int, epfd, struct epoll_event *, pevents,
    	  int, maxevents, int, timeout);
    #undef __stub_epoll_create
    #undef __stub_epoll_ctl
    #undef __stub_epoll_wait
    #else /* !_syscall1 || !_syscall4 */
    #error Missing macros for generation of syscall wrappers.
    #endif /* _syscall1 && _syscall4 */
    #endif /* __stub_epoll_{create, ctl, wait} */
    
    #define POLL_EVENT	struct epoll_event
    #define PDB_GET_FD(EVENT)	EVENT.data.fd
    #define PDB_GET_EVENTS(EVENT)	EVENT.events
    
    /* FIXME: Might want another value instead on POLL_SET_SIZE. */
    #define OPEN_POLL_DEVICE(X)	epoll_create(POLL_SET_SIZE)
    
    #define DECLARE_POLL_EXTRAS		\
      POLL_EVENT poll_fds[POLL_SET_SIZE]
    
    #define PDB_POLL(PFD, TIMEOUT)				\
      epoll_wait(PFD, poll_fds, POLL_SET_SIZE, TIMEOUT)
    
    int POLL_DEVICE_SET_EVENTS(struct Backend_struct *UNUSED(me),
    			   int pfd, int fd, INT32 events)
    {
      int e;
    
      if (events) {
        struct epoll_event ev;
    #ifdef __CHECKER__
        MEMSET(&ev, 0, sizeof(ev));
    #endif
        ev.events = events;
        ev.data.fd = fd;
    
        /* To avoid valgrind complaints when fd doesn't fill up the
         * ev.data union. */
        PIKE_MEM_RW (ev.data);
    
        /* The /dev/epoll interface exposes kernel implementation details...
         */
        IF_PD(fprintf(stderr, "epoll_ctl(%d, EPOLL_CTL_MOD, %d, { 0x%08x, %d })\n",
    		  pfd, fd, events, fd));
        while (((e = epoll_ctl(pfd, EPOLL_CTL_MOD, fd, &ev)) < 0)  &&
    	   (errno == EINTR))
          ;
        if ((e < 0) && (errno == ENOENT)) {
          IF_PD(fprintf(stderr,
    		    "epoll_ctl(%d, EPOLL_CTL_ADD, %d, { 0x%08x, %d })\n",
    		    pfd, fd, events, fd));
          while (((e = epoll_ctl(pfd, EPOLL_CTL_ADD, fd, &ev)) < 0)  &&
    	     (errno == EINTR))
    	;
        }
      } else {
        struct epoll_event dummy;
        /* The last argument must be a proper struct pointer even
         * though it isn't used...
         */
        PIKE_MEM_RW (dummy);
        IF_PD(fprintf(stderr, "epoll_ctl(%d, EPOLL_CTL_DEL, %d, &dummy)\n",
    		  pfd, fd));
        while (((e = epoll_ctl(pfd, EPOLL_CTL_DEL, fd, &dummy)) < 0) &&
    	   (errno == EINTR))
          ;
        if ((e < 0) && (errno == ENOENT)) return 0;
      }
      IF_PD(if (e < 0) {
        fprintf(stderr, "epoll_ctl() failed with errno: %d\n", errno);
      });
        
      return e;
    }
    
    #endif /* HAVE_SYS_DEVPOLL_H || HAVE_SYS_EPOLL_H */
    
    #ifdef HAVE_POLL
    
    /*
     * Backend using poll(2).
     *
     * This is used on most older SVR4- or POSIX-style systems.
     */
    
    #define PB_POLL(SET, TIMEOUT)				\
      poll((SET).poll_fds, (SET).num_in_poll, (TIMEOUT))
    
    struct pb_selectors
    {
      struct pollfd *poll_fds;
      int poll_fd_size;
      int num_in_poll;
    };
    
    static void pb_MY_FD_SET(struct pb_selectors *me, int fd, int add)
    {
      int i;
      IF_PD(fprintf(stderr, "BACKEND: MY_FD_SET(%d, 0x%04x)\n", fd, add));
      for(i=0; i<me->num_in_poll; i++)
      {
        if(me->poll_fds[i].fd == fd)
        {
          me->poll_fds[i].events |= add;
          return;
        }
      }
      me->num_in_poll++;
      if (me->num_in_poll > me->poll_fd_size)
      {
        me->poll_fd_size += me->num_in_poll;	/* Usually a doubling */
        if (me->poll_fds) {
          me->poll_fds =
    	realloc(me->poll_fds, sizeof(struct pollfd)*me->poll_fd_size);
        } else {
          me->poll_fds = malloc(sizeof(struct pollfd)*me->poll_fd_size);
        }
        if (!me->poll_fds)
        {
          Pike_fatal("Out of memory in backend::MY_FD_SET()\n"
    	    "Tried to allocate %d pollfds\n", me->poll_fd_size);
        }
      }
      me->poll_fds[me->num_in_poll-1].fd = fd;
      me->poll_fds[me->num_in_poll-1].events = add;
    }
    
    static void pb_MY_FD_CLR(struct pb_selectors *me, int fd, int sub)
    {
      int i;
      IF_PD(fprintf(stderr, "BACKEND: POLL_FD_CLR(%d, 0x%04x)\n", fd, sub));
      if(!me->poll_fds) return;
      for(i=0; i<me->num_in_poll; i++)
      {
        if(me->poll_fds[i].fd == fd)
        {
          me->poll_fds[i].events &= ~sub;
          if(!me->poll_fds[i].events)
          {
    	/* Note that num_in_poll is decreased here.
    	 * This is to avoid a lot of -1's below.
    	 * /grubba
    	 */
    	me->num_in_poll--;
    	if(i != me->num_in_poll)
    	{
    	  me->poll_fds[i] = me->poll_fds[me->num_in_poll];
    	}
    	/* Might want to shrink poll_fds here, but probably not. */
          }
          break;
        }
      }
    }
      
    
    static void pb_copy_selectors(struct pb_selectors *to,
    			      struct pb_selectors *from)
    {
      IF_PD(fprintf(stderr, "BACKEND: copy_poll_set() from->num_in_poll=%d\n",
    		from->num_in_poll));
      
      if (to->poll_fd_size < from->num_in_poll)
      {
        IF_PD(fprintf(stderr, "BACKEND: copy_poll_set() size %d -> %d\n",
    		  to->poll_fd_size,
    		  from->poll_fd_size));
        to->poll_fd_size=from->poll_fd_size;
        if (to->poll_fds) {
          to->poll_fds =
    	realloc(to->poll_fds, sizeof(struct pollfd)*to->poll_fd_size);
        } else {
          to->poll_fds =
    	malloc(sizeof(struct pollfd)*to->poll_fd_size);
        }
        if (!to->poll_fds) {
          Pike_fatal("Out of memory in backend::copy_poll_set()\n"
    	    "Tried to allocate %d pollfds\n", to->poll_fd_size);
        }
      }
      
      MEMCPY(to->poll_fds,
    	 from->poll_fds,
    	 sizeof(struct pollfd)*from->num_in_poll);
      to->num_in_poll=from->num_in_poll;
    }
    
    #endif /* HAVE_POLL */
    
    #define PB_GET_FD(EVENT)	EVENT.fd
    #ifndef PDB_GET_FD
    #define PDB_GET_FD(EVENT)	PB_GET_FD(EVENT)
    #endif
    #define PB_GET_EVENTS(EVENT)	EVENT.revents
    #ifndef PDB_GET_EVENTS
    #define PDB_GET_EVENTS(EVENT)	PB_GET_EVENTS(EVENT)
    #endif
    #ifndef PDB_GET_FLAGS
    #define PDB_GET_FLAGS(EVENT)	0
    #endif
    
    #elif defined(BACKEND_USES_KQUEUE)
    /*
     * Backend using kqueue-style poll device.
     *
     * FIXME: Not fully implemented yet! Out of band data handling is missing.
     *
     * Used on
     *   FreeBSD 4.1 and above.
     *   MacOS X/Darwin 7.x and above.
     *   Various other BSDs.
     */
    
    
    #define POLL_EVENT	struct kevent
    
    #if defined(BACKEND_USES_CFRUNLOOP)
    #define OPEN_POLL_DEVICE(X)	my_kqueue(X)
    #else
    #define OPEN_POLL_DEVICE(X)	kqueue()
    #endif
    
    #define CHILD_NEEDS_TO_REOPEN
    
    #define PIKE_POLL_DEVICE	"kqueue"
    
    #define TIMEOUT_IS_TIMESPEC
    
    #define MY_POLLIN	EVFILT_READ
    #define MY_POLLOUT	EVFILT_WRITE
    
    /* NOTE: The following 4 event types are specific to kqueue(2) */
    #define MY_POLLFSEVENT	EVFILT_VNODE
    #define MY_POLLPROCESS	EVFILT_PROC
    #define MY_POLLSIGNAL	EVFILT_SIGNAL
    #define MY_POLLTIMER	EVFILT_TIMER
    
    #define MY_POLLERR	EV_ERROR
    #if 0
    #define MY_POLLHUP	EV_EOF
    #else /* !0 */
    #define MY_POLLHUP	0
    #endif /* 0 */
    
    /* FIXME: The kqueue API has no documented support for out of band data. */
    #define MY_POLLEXCEPT	EVFILT_READ
    #define MY_POLLRDBAND	EVFILT_READ
    #define MY_POLLWREXCEPT	EVFILT_WRITE
    #define MY_POLLWRBAND	EVFILT_WRITE
    
    #define DECLARE_POLL_EXTRAS		\
      POLL_EVENT poll_fds[POLL_SET_SIZE]
    
    #define PDB_POLL(SET, TIMEOUT)					\
      kevent((SET), NULL, 0, poll_fds, POLL_SET_SIZE, &(TIMEOUT))
    
    #define PDB_GET_FD(EVENT)		EVENT.ident
    #define PDB_GET_EVENTS(EVENT)		EVENT.filter
    #define PDB_GET_FLAGS(EVENT)    EVENT.fflags
    #define PDB_CHECK_EVENT(EVENT, MASK)	(PDB_GET_EVENTS(EVENT) == (MASK))
    
    /* NOTE: Error events are signalled in the flags field. They thus
     *       must be checked for before the ordinary events.
     */
    #define PDB_CHECK_ERROR_EVENT(EVENT, MASK)	(EVENT.flags & (MASK))
    
    int pdb_MY_FD_CLR(int *pfd, int fd, int filter)
    {
      struct kevent ev;
    
      /* Note: Use EV_DISABLE in preference to EV_DELETE, since
       *       odds are that the fd will be reenabled, and the
       *       filter is deleted anyway when the fd is closed.
       */
      EV_SET(&ev, fd, filter, EV_DISABLE, 0, 0, 0);
      
      return kevent(*pfd, &ev, 1, NULL, 0, NULL);
    }
    
    #define pdb_MY_FD_SET(PFD, FD, FILTER) pdb_MY_FD_SET2(PFD, FD, FILTER, 0)
    
    int pdb_MY_FD_SET2(int *pfd, int fd, int filter, int fflags)
    {
      struct kevent ev[2];
    
     /* VNODE filters seem to need ONESHOT mode, else they just repeat endlessly. */ 
     if(filter == EVFILT_VNODE)
       EV_SET(ev, fd, filter, EV_ADD|EV_ENABLE|EV_CLEAR, fflags, 0, 0);
     else
       EV_SET(ev, fd, filter, EV_ADD|EV_ENABLE, fflags, 0, 0);
      
      return kevent(*pfd, ev, 1, NULL, 0, NULL);
    }
    
    #define pdb_MY_FD_CLR_RDBAND(SET, FD)
    #define pdb_MY_FD_CLR_WRBAND(SET, FD)
    
    #endif
    
    /*
     * Backend using select(2)
     *
     * This is used on most older BSD-style systems, and WIN32.
     */
    
    #define MY_READSET	0
    #define MY_WRITESET	1
    #define MY_EXCEPTSET	2
    /* except == incoming OOB data (or error according to POSIX)
     * outgoing OOB data is multiplexed on write
     */
    
    struct sb_selectors
    {
      int max_fd;
      my_fd_set sets[3];
    };
    
    struct sb_active_selectors
    {
      fd_set asets[3];
      int max_fd;
    };
    
    #define SB_SELECT(SET, TIMEOUT)					\
      fd_select((SET).max_fd + 1,					\
    	    (SET).asets + MY_READSET,				\
    	    (SET).asets + MY_WRITESET,				\
    	    (SET).asets + MY_EXCEPTSET,				\
    	    (TIMEOUT).tv_sec >= 100000000 ? NULL : &(TIMEOUT))
    
    void sb_MY_FD_CLR(struct sb_selectors *me, int fd, int setno)
    {
      if(fd > me->max_fd) return;
      my_FD_CLR(fd, me->sets + setno);
      if(fd == me->max_fd)
      {
        while(me->max_fd >=0 &&
    	  !my_FD_ISSET(me->max_fd, me->sets + MY_READSET) &&
    	  !my_FD_ISSET(me->max_fd, me->sets + MY_WRITESET)
    	  && !my_FD_ISSET(me->max_fd, me->sets + MY_EXCEPTSET)
          )
          me->max_fd--;
      }
    }
    
    void sb_MY_FD_SET(struct sb_selectors *me, int fd, int setno)
    {
      my_FD_SET(fd, me->sets + setno);
      if(fd > me->max_fd) me->max_fd=fd;
    }
    
    static void sb_copy_selectors(struct sb_active_selectors *to,
    			      struct sb_selectors *from)
    {
      fd_copy_my_fd_set_to_fd_set(to->asets + MY_READSET,
    			      from->sets + MY_READSET, from->max_fd+1);
      fd_copy_my_fd_set_to_fd_set(to->asets + MY_WRITESET,
    			      from->sets + MY_WRITESET, from->max_fd+1);
      fd_copy_my_fd_set_to_fd_set(to->asets + MY_EXCEPTSET,
    			      from->sets + MY_EXCEPTSET, from->max_fd+1);
      to->max_fd=from->max_fd;
    }
    
    #ifndef POLL_SET_SIZE
    #define POLL_SET_SIZE		32
    #endif /* !POLL_SET_SIZE */
    
    #define PB_CHECK_EVENT(EVENT, MASK)	(PB_GET_EVENTS(EVENT) & (MASK))
    #ifndef PDB_CHECK_EVENT
    #define PDB_CHECK_EVENT(EVENT, MASK)	(PDB_GET_EVENTS(EVENT) & (MASK))
    #endif /* PB_CHECK_EVENT */
    
    #define PB_CHECK_ERROR_EVENT(EVENT, MASK)	PB_CHECK_EVENT(EVENT, MASK)
    #ifndef PDB_CHECK_ERROR_EVENT
    #define PDB_CHECK_ERROR_EVENT(EVENT, MASK)	PDB_CHECK_EVENT(EVENT, MASK)
    #endif /* PB_CHECK_ERROR_EVENT */
    
    #ifdef RDBAND_IS_SPECIAL
    #  define pb_MY_FD_CLR_RDBAND(SET, FD) pb_MY_FD_CLR (SET, FD, MY_POLLRDBAND)
    #else
    #  define pb_MY_FD_CLR_RDBAND(SET, FD)
    #endif
    
    #ifdef WRBAND_IS_SPECIAL
    #  define pb_MY_FD_CLR_WRBAND(SET, FD) pb_MY_FD_CLR (SET, FD, MY_POLLWRBAND)
    #else
    #  define pb_MY_FD_CLR_WRBAND(SET, FD)
    #endif
    
    #ifndef MY_POLLERR
    #define MY_POLLERR	POLLERR
    #endif
    
    #ifndef MY_POLLHUP
    #define MY_POLLHUP	POLLHUP
    #endif
    
    #ifndef MY_POLLFSEVENT
    #define MY_POLLFSEVENT	0
    #endif
    
    #ifndef MY_POLLSIGNAL
    #define MY_POLLSIGNAL	0
    #endif
    
    #if defined(BACKEND_USES_POLL_DEVICE) || defined(BACKEND_USES_KQUEUE)
    
    /*! @class PollDeviceBackend
     *! @inherit __Backend
     *!
     *! @[Backend] implemented with @tt{/dev/poll@} (Solaris, OSF/1 and IRIX),
     *! @tt{epoll(2)@} (Linux) or @tt{kqueue(2)@} (MacOS X, FreeBSD, OpenBSD, etc).
     *!
     *! @seealso
     *!   @[Backend]
     */
    PIKECLASS PollDeviceBackend
    {
      INHERIT Backend;
    
      /* Helpers to find the above inherit. */
      static ptrdiff_t pdb_offset = 0;
      CVAR struct Backend_struct *backend;
    
      /* 
       * POLL/SELECT fd sets
       */
      CVAR int set;
    #if defined(BACKEND_USES_CFRUNLOOP)
      CVAR int go_cf;
      CVAR int external_run;
      CVAR CFFileDescriptorRef fdref;
      CVAR CFRunLoopSourceRef source;
      CVAR CFRunLoopObserverRef beObserver;
      CVAR CFRunLoopTimerRef beTimer;
      CVAR int event_count;
      CVAR int gil_released;
      CVAR struct thread_state * thread_state; /* used by external runloops. */
    #endif /* BACKEND_USES_CFRUNLOOP */
    #ifdef DECLARE_POLL_EXTRAS
        /* Declare any extra variables needed by MY_POLL(). */
      CVAR struct kevent* poll_fds;
    #endif /* DECLARE_POLL_EXTRAS */
    
      DECLARE_STORAGE
    
      /*! @decl void set_signal_event_callback(int signum, function cb)
       *!
       *! @note
       *!   This function is a noop except for the @tt{kqueue@} case.
       */
      PIKEFUN void set_signal_event_callback(int signum, function cb)
      {
        int q;
    #ifdef BACKEND_USES_KQUEUE
        struct kevent ev[2];
        EV_SET(ev, signum, MY_POLLSIGNAL, EV_ADD, 0, 0, 0);
        q = kevent(THIS->set, ev, 1, NULL, 0, NULL);
    #endif
        pop_n_elems(args);
      }
    
      /*
       * FD set handling
       */
    
      static void pdb_UPDATE_BLACK_BOX(struct PollDeviceBackend_struct *me, int fd,
    				   int wanted_events)
      {
    #ifdef BACKEND_USES_POLL_DEVICE
        INT32 events = 0;
    
        if (wanted_events & PIKE_BIT_FD_READ) {
          events |= MY_POLLIN;
        }
        if (wanted_events & PIKE_BIT_FD_WRITE) {
          events |= MY_POLLOUT;
        }
        if (wanted_events & PIKE_BIT_FD_READ_OOB) {
          events |= MY_POLLRDBAND;
        }
        if (wanted_events & PIKE_BIT_FD_WRITE_OOB) {
          events |= MY_POLLWRBAND;
        }
        if (wanted_events & PIKE_BIT_FD_FS_EVENT) {
          events |= MY_POLLFSEVENT;
        }
    
        IF_PD(fprintf(stderr, "UPDATE_BLACK_BOX(%d, %d) ==> events: 0x%08x\n",
    		  me->set, fd, events));
        POLL_DEVICE_SET_EVENTS(me->backend, me->set, fd, events);
    #elif defined(BACKEND_USES_KQUEUE)
    
        /* Note: Only used by REOPEN_POLL_DEVICE on a freshly opened kqueue. */
        struct kevent ev[3];
        int nev = 0;
        if (wanted_events & PIKE_BIT_FD_READ ||
    	wanted_events & PIKE_BIT_FD_READ_OOB) {
          EV_SET(ev, fd, MY_POLLIN, EV_ADD, 0, 0, 0);
          nev++;
        }
        if (wanted_events & PIKE_BIT_FD_WRITE ||
    	wanted_events & PIKE_BIT_FD_WRITE_OOB) {
          EV_SET(ev+nev, fd, MY_POLLOUT, EV_ADD, 0, 0, 0);
          nev++;
        }
        if (wanted_events & PIKE_BIT_FD_FS_EVENT) {
    // kqueue TODO generate fflags from the high bits of the wanted_events argument.
          EV_SET(ev+nev, fd, MY_POLLFSEVENT, EV_ADD, 0, 0, 0);
          nev++;
        }
        if (nev)
          kevent(me->set, ev, nev, NULL, 0, NULL);
    #endif /* BACKEND_USES_POLL_DEVICE */
      }
    
    
    #if defined(BACKEND_USES_CFRUNLOOP)
      /* we place this declaration here rather than at the top in order to avoid struct-y unpleasantness */
      int init_cf(struct PollDeviceBackend_struct *me, int i);
      int init_external_cfrl(struct PollDeviceBackend_struct *me, int i);
      int low_my_kqueue(struct PollDeviceBackend_struct *me);
    
      int my_kqueue(struct PollDeviceBackend_struct *me)
      {
         int i;
         
         i = low_my_kqueue(me);
         
         return i;
      }
      
      int low_my_kqueue(struct PollDeviceBackend_struct *me)
      {
        int i;
    
        i = kqueue();
        if(me->go_cf)
          return init_cf(me, i);
        else return i;
      }
    
      /* arg i is the kqueue. */
      int init_external_cfrl(struct PollDeviceBackend_struct *me, int i)
      {
        /* we assume that init_cf() has already been called. */
        
        CFRunLoopObserverRef beObserver = NULL;
        
        int myActivities = kCFRunLoopBeforeWaiting | kCFRunLoopAfterWaiting | kCFRunLoopBeforeTimers | 
                                    kCFRunLoopBeforeSources | kCFRunLoopExit;
    
        CFRunLoopObserverContext context = {0, me, NULL, NULL, NULL};
    
        beObserver = CFRunLoopObserverCreate(NULL, myActivities, 1,
                /* repeat */ 1, cfObserverCallback, &context);
    
        if (beObserver)
        {
          CFRetain(beObserver);
          CFRunLoopAddObserver(CFRunLoopGetCurrent(), beObserver,
            kCFRunLoopDefaultMode);
        }
    
        return 0;
      }
    
      /* arg i is the kqueue. */
      int exit_external_cfrl(struct PollDeviceBackend_struct *me)
      {
        /* we assume that init_cf() has already been called. */
        CFRunLoopObserverRef beObserver = me->beObserver;
        CFRunLoopTimerRef beTimer = me->beTimer;    
        if (beObserver)
        {
          CFRunLoopRemoveObserver(CFRunLoopGetCurrent(), beObserver,
            kCFRunLoopDefaultMode);
          CFRelease(beObserver);
        }    
        if(beTimer)
        {
          CFRelease(beTimer);
        }
    
        return 0;
      }
    
      /* arg i is the kqueue. */
      int init_cf(struct PollDeviceBackend_struct *me, int i)
      {
        CFFileDescriptorContext context = {0, me, NULL, NULL, NULL};
    
        me->fdref = CFFileDescriptorCreate(kCFAllocatorDefault, i, true, noteEvents, &context);
        CFRetain(me->fdref);
    
        me->source = CFFileDescriptorCreateRunLoopSource(kCFAllocatorDefault, me->fdref, 0);
        CFFileDescriptorEnableCallBacks(me->fdref, kCFFileDescriptorReadCallBack);	
        CFRetain(me->source);
    
        return i;
      }
    
      void exit_cf(struct PollDeviceBackend_struct *me)
      {
        if(me->source)
        {
          CFRunLoopSourceInvalidate(me->source);
          CFRelease(me->source);
        }
    
        if(me->fdref)
          CFRelease(me->fdref);
      }
    
    #endif /* BACKEND_USES_CFRUNLOOP */
    
      /* This is called in the child process to restore
       * poll state after fork() in case of detaching.
       */
      static void pdb_REOPEN_POLL_DEVICE(struct PollDeviceBackend_struct *me)
      {
        int fd;
    
        while ((close(me->set) < 0) && (errno == EINTR))
          ;
        while (((fd = OPEN_POLL_DEVICE(me)) < 0) && (errno == EINTR))
          ;
        if (fd < 0) {
          Pike_fatal("Failed to reopen " PIKE_POLL_DEVICE
    		 " after fork (errno: %d).\n", errno);
        }
        if (fd != me->set) {
          int e;
          while (((e = dup2(fd, me->set)) < 0) && (errno == EINTR))
    	;
          if (e < 0) {
    	/* We hope we can use the fd at the new location... */
    	me->set = fd;
          } else {
    	while ((close(fd) < 0) && (errno == EINTR))
    	  ;
          }
        }
        set_close_on_exec(me->set, 1);
    
        /* Restore the poll-state for all the fds. */
        {FOR_EACH_ACTIVE_FD_BOX (me->backend, box) {
    	pdb_UPDATE_BLACK_BOX (me, box->fd, box->events);
          }}
    
      }
    
    #if defined(BACKEND_USES_CFRUNLOOP)
    static void noteEvents(CFFileDescriptorRef fdref, CFOptionFlags UNUSED(callBackTypes), void *info) {
        struct kevent kev;
        struct timespec tv;
        struct PollDeviceBackend_struct * this_backend;
        int fd;
    	
        tv.tv_sec = 0;
        tv.tv_nsec = 0;
        this_backend = (struct PollDeviceBackend_struct *)info;
        fd = CFFileDescriptorGetNativeDescriptor(fdref);
        kevent(fd, NULL, 0, this_backend->poll_fds, POLL_SET_SIZE, &tv);
        this_backend->event_count = POLL_SET_SIZE;
    }
    #endif /* BACKEND_USES_CFRUNLOOP */
    
      static struct PollDeviceBackend_struct **pdb_backends = NULL;
      static int num_pdb_backends = 0;
      static int pdb_backends_size = 0;
    
      /* Called from the init callback. */
      static void register_pdb_backend(struct PollDeviceBackend_struct *me)
      {
        if (num_pdb_backends == pdb_backends_size) {
          struct PollDeviceBackend_struct **new_backends =
    	realloc(pdb_backends,
    		(pdb_backends_size+1) *
    		sizeof(struct PollDeviceBackend_struct *)*2);
          if (!new_backends) {
    	Pike_error("Out of memory.\n");
          }
          pdb_backends = new_backends;
          pdb_backends_size = (pdb_backends_size+1)*2;
        }
        pdb_backends[num_pdb_backends++] = me;
      }
    
      /* Called from the exit callback. */
      static void unregister_pdb_backend(struct PollDeviceBackend_struct *me)
      {
        int i = num_pdb_backends;
        /* Search backwards since new backends are more likely to be destructed
         * than old backends.
         */
        while (i--) {
          if (pdb_backends[i] == me) {
    	pdb_backends[i] = pdb_backends[--num_pdb_backends];
    	pdb_backends[num_pdb_backends] = NULL;
    	return;	/* A backend is only supposed to be registered once. */
          }
        }
      }
    
      /* Called in the child after fork(). */
      static void reopen_all_pdb_backends(struct callback *UNUSED(cb), void *UNUSED(a), void *UNUSED(b))
      {
        int i;
        for (i=0; i < num_pdb_backends; i++) {
          pdb_REOPEN_POLL_DEVICE(pdb_backends[i]);
        }
      }
    
      static void pdb_update_fd_set(struct Backend_struct *me,
    				struct PollDeviceBackend_struct *pdb, int fd,
    				int old_events, int new_events, 
    #ifdef BACKEND_USES_POLL_DEVICE
    				int PDUNUSED(flags)
    #else
    				int flags
    #endif
    				)
      {
        int changed_events = old_events ^ new_events;
    
        IF_PD(fprintf (stderr, "[%d]BACKEND[%d]: pdb_update_fd_set(.., %d, %d, %d, %d):\n",
    		   THR_NO, me->id, fd, old_events, new_events, flags));
        
    
        if (changed_events) {
    
    #ifdef BACKEND_USES_POLL_DEVICE
    
          pdb_UPDATE_BLACK_BOX(pdb, fd, new_events);
    
    #else  /* !BACKEND_USES_POLL_DEVICE */
          if (changed_events & PIKE_BIT_FD_READ) {
    	if (new_events & PIKE_BIT_FD_READ) {
    	  pdb_MY_FD_SET(&pdb->set, fd, MY_POLLIN);
    	  /* Got to enable the exception set to get errors (at least
    	   * according to POSIX). */
    	  pdb_MY_FD_SET(&pdb->set, fd, MY_POLLEXCEPT);
    	}
    	else {
    	  pdb_MY_FD_CLR(&pdb->set, fd, MY_POLLIN);
    	  if (!(new_events & PIKE_BIT_FD_READ_OOB) &&
    	      !(new_events & PIKE_BIT_FD_WRITE))
    	    /* Exceptions might cause calls to read, read_oob and write. */
    	    pdb_MY_FD_CLR(&pdb->set, fd, MY_POLLEXCEPT);
    	}
          }
    
          if (changed_events & PIKE_BIT_FD_READ_OOB) {
    	if (new_events & PIKE_BIT_FD_READ_OOB)
    	  pdb_MY_FD_SET(&pdb->set, fd, MY_POLLRDBAND);
    	else {
    	  if (!(new_events & PIKE_BIT_FD_READ)) {
    	    if (!(new_events & PIKE_BIT_FD_WRITE))
    	      /* Exceptions might cause calls to read, read_oob and write. */
    	      pdb_MY_FD_CLR(&pdb->set, fd, MY_POLLEXCEPT);
    	  } else {
    	    pdb_MY_FD_CLR_RDBAND(&pdb->set, fd);
    	  }
    	}
          }
    
          if (changed_events & PIKE_BIT_FD_WRITE) {
    	if (new_events & PIKE_BIT_FD_WRITE) {
    	  pdb_MY_FD_SET(&pdb->set, fd, MY_POLLOUT);
    	  /* Got to enable the exception set to get errors (at least
    	   * according to POSIX). */
    	  pdb_MY_FD_SET(&pdb->set, fd, MY_POLLEXCEPT);
    	}
    	else {
    	  if (!(new_events & PIKE_BIT_FD_WRITE_OOB)) {
    	    pdb_MY_FD_CLR(&pdb->set, fd, MY_POLLOUT);
    	    if (!(new_events & PIKE_BIT_FD_READ) &&
    		!(new_events & PIKE_BIT_FD_READ_OOB))
    	      /* Exceptions might cause calls to read, read_oob and write. */
    	      pdb_MY_FD_CLR(&pdb->set, fd, MY_POLLEXCEPT);
    	  }
    	}
          }
    
          if (changed_events & PIKE_BIT_FD_WRITE_OOB) {
    	if (new_events & PIKE_BIT_FD_WRITE_OOB)
    	  pdb_MY_FD_SET(&pdb->set, fd, MY_POLLWRBAND);
    	else {
    	  if (!(new_events & PIKE_BIT_FD_WRITE)) {
    	    pdb_MY_FD_CLR(&pdb->set, fd, MY_POLLWREXCEPT);
    	  } else {
    	    pdb_MY_FD_CLR_WRBAND(&pdb->set, fd);
    	  }
    	}
          }
    
    // TODO kqueue ADD fflags
          if (changed_events & PIKE_BIT_FD_FS_EVENT) {
    	if (new_events & PIKE_BIT_FD_FS_EVENT)
    	  pdb_MY_FD_SET2(&pdb->set, fd, MY_POLLFSEVENT, flags);
    	else {
    	    pdb_MY_FD_CLR(&pdb->set, fd, MY_POLLFSEVENT);
    	}
          }
    
    #endif	/* !BACKEND_USES_POLL_DEVICE */
    
          if (new_events & ~old_events)
    	/* New events were added. */
    	backend_wake_up_backend(me);
        }
      }
    
      /* Mapping of events to flags and callbacks.
       *
       * Event	select	poll		kqueue			callback
       *
       * data_in	read	POLLIN		EVFILT_READ		READ
       *			POLLRDNORM	EVFILT_READ[EOF]      
       *
       * data_out	write	POLLOUT		EVFILT_WRITE		WRITE
       *			POLLWRNORM
       *
       * oob_in	except	POLLPRI		(EVFILT_READ)		READ_OOB
       *			POLLRDBAND
       *
       * oob_out	write	POLLWRBAND	(EVFILT_WRITE)		WRITE_OOB
       *
       * close_in	read	POLLIN		EVFILT_READ[EOF]	READ
       *                    POLLHUP(Linux pipe)
       *
       * close_out	write	POLLHUP		EVFILT_WRITE[EOF]	WRITE
       *			POLLERR(Linux pipe)			>WRITE_OOB
       *
       * conn_ok	write	POLLOUT		EVFILT_WRITE		WRITE
       *
       * conn_fail	read	POLLIN		EVFILT_READ[EOF]	READ
       *		except						(READ_OOB)
       *
       * new_conn	read	POLLIN		EVFILT_READ		READ
       *
       * sock_err	except	POLLERR		EVFILT_READ[ERR]	ERROR
       *								>READ
       *
       * sock_err	except	POLLERR		EVFILT_WRITE[ERR]	ERROR
       *								>WRITE
       */
    
    #ifdef POLL_DEBUG
      static void pdb_describe_event(struct Backend_struct *me, POLL_EVENT event)
      {
    #ifdef BACKEND_USES_KQUEUE
        fprintf(stderr, "[%d]BACKEND[%d]: fd:%d filter:%d flags:0x%08x",
    	    THR_NO, me->id, PDB_GET_FD(event), PDB_GET_EVENTS(event),
    	    event.flags);
        if (PDB_CHECK_EVENT(event, MY_POLLIN)) {
          fprintf(stderr, "  EVFILT_READ");
        } else if (PDB_CHECK_EVENT(event, MY_POLLOUT)) {
          fprintf(stderr, "  EVFILT_WRITE");
        } else if (PDB_CHECK_EVENT(event, MY_POLLFSEVENT)) {
          fprintf(stderr, "  POLLFSEVENT");
        } else {
          fprintf(stderr, "  UNKNOWN");
        }
        if (event.flags & EV_ERROR) {
          fprintf(stderr, "[ERROR]");
        }
        if (event.flags & EV_EOF) {
          fprintf(stderr, "[EOF]");
        }
        fprintf(stderr, "(%d)\n", event.data);
    #else /* !BACKEND_USES_KQUEUE */
        fprintf(stderr, "[%d]BACKEND[%d]: fd:%d events:0x%04x",
    	    THR_NO, me->id, PDB_GET_FD(event), PDB_GET_EVENTS(event));
        if (PDB_CHECK_EVENT(event, MY_POLLNVAL)) {
          fprintf(stderr, "  POLLNVAL");
        }
        if (PDB_CHECK_EVENT(event, MY_POLLERR)) {
          fprintf(stderr, "  POLLERR");
        }
        if (PDB_CHECK_EVENT(event, MY_POLLHUP)) {
          fprintf(stderr, "  POLLHUP");
        }
        if (PDB_CHECK_EVENT(event, MY_POLLRDBAND)) {
          fprintf(stderr, "  POLLRDBAND");
        }
        if (PDB_CHECK_EVENT(event, MY_POLLIN)) {
          fprintf(stderr, "  POLLIN");
        }
        if (PDB_CHECK_EVENT(event, MY_POLLWRBAND)) {
          fprintf(stderr, "  POLLWRBAND");
        }
        if (PDB_CHECK_EVENT(event, MY_POLLOUT)) {
          fprintf(stderr, "  POLLOUT");
        }
        fprintf(stderr, "\n");
    #endif /* BACKEND_USES_KQUEUE */
      }
    #else /* !POLL_DEBUG */
    #define pdb_describe_event(BACKEND, EVENT)
    #endif /* POLL_DEBUG */
    
      /* A negative tv_sec in timeout turns it off. If it ran until the
       * timeout without calling any callbacks or call outs (except those
       * on backend_callbacks) then tv_sec will be set to -1. Otherwise it
       * will be set to the time spent. */
      static void pdb_low_backend_once(struct PollDeviceBackend_struct *pdb,
    				   struct timeval *timeout)
      {
        ONERROR uwp;
        int i, done_something = 0;
        struct timeval start_time = *timeout;
        struct Backend_struct *me = pdb->backend;
    
    #ifdef DECLARE_POLL_EXTRAS
        /* Declare any extra variables needed by MY_POLL(). */
        DECLARE_POLL_EXTRAS;
    #endif /* DECLARE_POLL_EXTRAS */
    
        SET_ONERROR(uwp, low_backend_cleanup, me);
        low_backend_once_setup(pdb->backend, &start_time);
    
        if (TYPEOF(me->before_callback) != T_INT)
          call_backend_monitor_cb (me, &me->before_callback);
    
        {
    #ifdef BACKEND_USES_CFRUNLOOP
    	  double cf_timeout;
    #endif /* BACKEND_USES_CFRUNLOOP */
    
    #ifdef TIMEOUT_IS_MILLISECONDS
          int poll_timeout;
    #elif defined(TIMEOUT_IS_TIMEVAL)
          struct timeval poll_timeout;
    #elif defined(TIMEOUT_IS_TIMESPEC)
          struct timespec poll_timeout;
    #else
    #error Unknown timeout method.
    #endif /* TIMEOUT_IS_* */
          struct timeval *next_timeout = &pdb->backend->next_timeout;
    
          me->may_need_wakeup = 1;
    
    #ifdef TIMEOUT_IS_MILLISECONDS
          if (next_timeout->tv_sec >= 100000000)
    	/* Take this as waiting forever. */
    	poll_timeout = -1;
          else if(next_timeout->tv_sec < 0)
    	poll_timeout = 0;
          else if(next_timeout->tv_sec > (INT_MAX/1002)) /* about 24 days.*/
    	poll_timeout = INT_MAX/1002;
          else
            poll_timeout = MAXIMUM((next_timeout->tv_sec*1000) +
    			       next_timeout->tv_usec/1000,2);
    #elif defined(TIMEOUT_IS_TIMEVAL)
          poll_timeout = *next_timeout;
    #elif defined(TIMEOUT_IS_TIMESPEC)
          poll_timeout.tv_sec = next_timeout->tv_sec;
          poll_timeout.tv_nsec = next_timeout->tv_usec*1000;
    #else
    #error Unknown timeout method.
    #endif /* TIMEOUT_IS_* */
    
          IF_PD(fprintf (stderr, "[%d]BACKEND[%d]: Doing poll on fds:\n",
    		     THR_NO, me->id));
    
          check_threads_etc();
          THREADS_ALLOW();
    
          /* Note: The arguments to MY_POLL may be evaluated multiple times. */
    
    #ifdef BACKEND_USES_CFRUNLOOP
        if(pdb->go_cf)
        {
          cf_timeout = next_timeout->tv_sec + (next_timeout->tv_usec / 1000000.0);
    
          pdb->event_count = 0;
          pdb->poll_fds = (poll_fds);
          
          CFFileDescriptorEnableCallBacks(pdb->fdref, kCFFileDescriptorReadCallBack);
          CFRunLoopAddSource(CFRunLoopGetCurrent(), pdb->source, kCFRunLoopDefaultMode);
          CFRunLoopRunInMode(kCFRunLoopDefaultMode, cf_timeout, true);
    
          i = pdb->event_count;
          pdb->poll_fds = NULL;
    }
    else
    #endif /* BACKEND_USES_CFRUNLOOP */
          i = PDB_POLL(pdb->set, poll_timeout);
    
          IF_PD(fprintf(stderr, " => %d (timeout was: %d)\n", i, poll_timeout));
    
          THREADS_DISALLOW();
          check_threads_etc();
          me->may_need_wakeup = 0;
          INVALIDATE_CURRENT_TIME();
        }
    
        if (TYPEOF(me->after_callback) != T_INT)
          call_backend_monitor_cb (me, &me->after_callback);
    
        if (!i) {
          /* Timeout */
        } else if (i>0) {
          int num_active = i;
          struct fd_callback_box fd_list = {
    	me, NULL, &fd_list,
    	-1, 0, 0,
            0, 0, NULL
          };
          struct fd_callback_box *box;
          ONERROR free_fd_list;
    
          SET_ONERROR(free_fd_list, do_free_fd_list, &fd_list);
    
          done_something = 1;
    
    
    #if 0
          /* First clear revents for all the fds.
           *
           * FIXME: This is done for paranoia reasons. If all code that
           *        messes with fds clears revents, this isn't needed.
           *
           * Note: This needs to be a separate loop, since kqueue sends
           *       read and write in two separate events.
           */
          while(i--)
          {
    	int fd = PDB_GET_FD(poll_fds[i]);
    
    #ifdef BACKEND_USES_KQUEUE
          if(poll_fds[i].filter == MY_POLLSIGNAL)
          {
            continue;
          }
    #endif /* BACKEND_USES_KQUEUE */
    
    	box = SAFE_GET_ACTIVE_BOX (me, fd);
    	if (box) {
    	  check_box (box, fd);
    	  box->revents = 0;
              box->rflags = 0;
    	}
          }
    #endif
    
          /* Then flag the active events.
           */
          i = num_active;
          while(i--)
          {
    	int fd = PDB_GET_FD(poll_fds[i]);
    
    	pdb_describe_event(me, poll_fds[i]);
    
    	if (!(box = SAFE_GET_ACTIVE_BOX (me, fd))) {
    	  /* The box is no longer active. */
    	  continue;
    	}
    
    #ifdef MY_POLLNVAL
    	if(PDB_CHECK_ERROR_EVENT(poll_fds[i], MY_POLLNVAL))
    	{
    	  struct pollfd fds;
    	  int ret;
    	  /* NOTE: /dev/poll returns POLLNVAL for closed descriptors. */
    	  IF_PD(fprintf(stderr, "[%d]BACKEND[%d]: POLLNVAL on %d\n",
    			THR_NO, me->id, fd));
    #ifdef PIKE_DEBUG
    #ifdef HAVE_POLL
    	  /* FIXME */
    
    	  fds.fd=fd;
    	  fds.events=POLLIN;
    	  fds.revents=0;
    	  ret=poll(&fds, 1,1 );
    	  if(fds.revents & POLLNVAL)
    	    Pike_fatal("Bad filedescriptor %d to poll().\n", fd);
    #endif
    	  /* Don't do anything further with this fd. */
    	  continue;
    #endif /* PIKE_DEBUG */
    	}
    #endif /* MY_POLLNVAL */
    
    	check_box (box, fd);
    
    	{
    #ifdef PIKE_DEBUG
    	  int handled = 0;
    #endif /* PIKE_DEBUG */
    	  if (PDB_CHECK_ERROR_EVENT(poll_fds[i], MY_POLLERR)) {
    	    /* Errors are signalled on the first available callback. */
    	    IF_PD(fprintf(stderr, "[%d]BACKEND[%d]: POLLERR on %d\n",
    			  THR_NO, me->id, fd));
    	    box->revents |= PIKE_BIT_FD_ERROR;
    	    /* Note that Linux pipe's signal close in the write-direction
    	     * with POLLERR.
    	     *
    	     * FIXME: Signal on write-direction?
    	     */
    #ifdef BACKEND_USES_KQUEUE
    	    /* kqueue signals errors as read or write events but
    	     * with an additional error flag, so we must take care
    	     * to not set any read/write bits if it's a sole error
    	     * event. */
    	    if (!poll_fds[i].data) goto next_fd;
    #endif
    	  }
    	  if (PDB_CHECK_ERROR_EVENT(poll_fds[i], MY_POLLHUP)) {
    	    IF_PD(fprintf(stderr, "[%d]BACKEND[%d]: POLLHUP on %d\n",
    			  THR_NO, me->id, fd));
    	    /* Linux signals close in the read-direction of pipes
    	     * and fifos with POLLHUP. */
    	    box->revents |= PIKE_BIT_FD_READ|PIKE_BIT_FD_READ_OOB;
    	    /* For historical reasons we also signal on the write-drection. */
    	    box->revents |= PIKE_BIT_FD_WRITE|PIKE_BIT_FD_WRITE_OOB;
    	    DO_IF_DEBUG(handled = 1);
    	  }
    	  if (PDB_CHECK_EVENT(poll_fds[i], MY_POLLRDBAND)) {
    	    IF_PD(fprintf(stderr, "[%d]BACKEND[%d]: POLLRDBAND on %d\n",
    			  THR_NO, me->id, fd));
    	    box->revents |= PIKE_BIT_FD_READ_OOB;
    	    DO_IF_DEBUG(handled = 1);
    	  }
    	  if (PDB_CHECK_EVENT(poll_fds[i], MY_POLLIN)) {
    	    IF_PD(fprintf(stderr,
    			  "[%d]BACKEND[%d]: POLLRDNORM|POLLIN on %d\n",
    			  THR_NO, me->id, fd));
    	    box->revents |= PIKE_BIT_FD_READ;
    	    DO_IF_DEBUG(handled = 1);
    	  }
    	  if (PDB_CHECK_EVENT(poll_fds[i], MY_POLLWRBAND)) {
    	    IF_PD(fprintf(stderr,
    			  "[%d]BACKEND[%d]: POLLWRBAND on %d\n",
    			  THR_NO, me->id, fd));
    	    box->revents |= PIKE_BIT_FD_WRITE_OOB;
    	    DO_IF_DEBUG(handled = 1);
    	  }
    	  if (PDB_CHECK_EVENT(poll_fds[i], MY_POLLOUT)) {
    	    IF_PD(fprintf(stderr, "[%d]BACKEND[%d]: POLLOUT on %d\n",
    			  THR_NO, me->id, fd));
    	    box->revents |= PIKE_BIT_FD_WRITE;
    	    DO_IF_DEBUG(handled = 1);
    	  }
    	  if (PDB_CHECK_EVENT(poll_fds[i], MY_POLLFSEVENT)) {
    	    IF_PD(fprintf(stderr, "[%d]BACKEND[%d]: POLLFSEVENT on %d\n",
    			  THR_NO, me->id, fd));
    	    box->revents |= PIKE_BIT_FD_FS_EVENT;
    	    DO_IF_DEBUG(handled = 1);
    	  }
    #ifdef PIKE_DEBUG
    	  if (!handled && PDB_GET_EVENTS(poll_fds[i])) {
    	    fprintf(stderr, "[%d]BACKEND[%d]: fd %ld has revents 0x%08lx, "
    		    "but hasn't been handled.\n", THR_NO, me->id,
    		    (long)PDB_GET_FD(poll_fds[i]),
    		    (long)PDB_GET_EVENTS(poll_fds[i]));
    	    pdb_describe_event(me, poll_fds[i]);
    	  }
    #endif /* PIKE_DEBUG */
    	}
    	if (box->revents) {
    	  if (!(box->revents & (box->events | PIKE_BIT_FD_ERROR))) {
    	    /* Robustness paranoia; we've only received events that we
    	     * aren't interested in. Unregister the unwanted events
    	     * in case we are out of sync with the poll device.
    	     * Otherwise we risk entering a busy loop.
    	     */
    	    IF_PD(fprintf(stderr,
    			  "[%d]BACKEND[%d]: Backend is out of sync for fd %d\n"
    			  "[%d]BACKEND[%d]: Wanted: 0x%04x Received: 0x%04x\n",
    			  THR_NO, me->id, fd,
    			  THR_NO, me->id, box->events, box->revents));
    	    pdb_update_fd_set(me, pdb, fd, box->revents|box->events,
    			      box->events, box->flags);
    	  }
    	next_fd:
    	  /* Hook in the box on the fd_list. */
    	  if (!box->next) {
    	    IF_PD(fprintf(stderr,
    			  "[%d]BACKEND[%d]: hooking in box for fd %d\n",
    			  THR_NO, me->id, fd));
    	    box->next = fd_list.next;
    	    fd_list.next = box;
    	    if (box->ref_obj) add_ref(box->ref_obj);
    	  } else {
    	    IF_PD(fprintf(stderr,
    			  "[%d]BACKEND[%d]: fd %d already in list.\n",
    			  THR_NO, me->id, fd));
    	  }
    	}
          }
    
          /* Common code for all variants.
           *
           * Call callbacks for the active events.
           */
          if (backend_call_active_callbacks(&fd_list, me)) {
    	CALL_AND_UNSET_ONERROR(free_fd_list);
    	goto backend_round_done;
          }
    
          CALL_AND_UNSET_ONERROR(free_fd_list);
    
          /* Must be up-to-date for backend_do_call_outs. */
          INVALIDATE_CURRENT_TIME();
        }else{
          switch(errno)
          {
    #ifdef __NT__
          default:
    	Pike_fatal("Error in backend %d\n",errno);
    	break;
    #endif
    	    
          case EINVAL:
    	Pike_fatal("Invalid timeout to select().\n");
    	break;
    	    
    #ifdef WSAEINTR
          case WSAEINTR:
    #endif
          case EINTR:		/* ignore */
    	break;
    	    
    #ifdef WSAEBADF
          case WSAEBADF:
    #endif
    #ifdef ENOTSOCK
          case ENOTSOCK:
    #endif
    #ifdef WSAENOTSOCK
          case WSAENOTSOCK:
    #endif
          case EBADF:
    	/* TODO: Fix poll version! */
    	break;
    	    
          }
        }
    
        {
          int call_outs_called =
    	backend_do_call_outs(me); /* Will update current_time after calls. */
          if (call_outs_called)
    	done_something = 1;
          if (call_outs_called < 0)
    	goto backend_round_done;
        }
    
        call_callback(&me->backend_callbacks, NULL);
    
      backend_round_done:
        if (!done_something)
          timeout->tv_sec = -1;
        else {
          struct timeval now;
          INACCURATE_GETTIMEOFDAY(&now);
          timeout->tv_sec = now.tv_sec;
          timeout->tv_usec = now.tv_usec;
          my_subtract_timeval (timeout, &start_time);
        }
    	
        me->exec_thread = 0;
        UNSET_ONERROR (uwp);
      }
    
    #ifdef BACKEND_USES_CFRUNLOOP
      void cfTimerCallback(CFRunLoopTimerRef timer, void * info)
      {
        struct PollDeviceBackend_struct *me;
        struct timeval timeout;
        me = (struct PollDeviceBackend_struct *)info; 
        
        timeout.tv_sec = 0;
        timeout.tv_usec = 0;
    
        pdb_low_backend_once(me, &timeout);    
      }
    
      /* our external CFRunLoop has received events or timed out. We should
         do a once-through the runloop to find out if there's anything to do. */
      void cfObserverCallback(CFRunLoopObserverRef observer,
          CFRunLoopActivity activity, void* info)
      {
        struct timeval timeout;
        struct thread_state *cur_ts__;
        struct PollDeviceBackend_struct *me;
        me = (struct PollDeviceBackend_struct *)info; 
    
        switch(activity)
        {
          case kCFRunLoopEntry:
            /* release the interpreter lock. */
            me->gil_released = 1;
            {
              cur_ts__ = Pike_interpreter.thread_state;  
              me->thread_state = cur_ts__; /* for use later. */
              pike_threads_allow (cur_ts__ COMMA_DLOC);
              HIDE_GLOBAL_VARIABLES();
            }
            break;
    
          case kCFRunLoopBeforeTimers:
          case kCFRunLoopBeforeSources:
            /* gain the interpreter lock, if released. */
            if(me->gil_released)
            {
              cur_ts__ = me->thread_state;
              me->gil_released = 0;
              REVEAL_GLOBAL_VARIABLES();
              pike_threads_disallow (cur_ts__ COMMA_DLOC);
              me->thread_state = 0;
            }
            break;
          
          case kCFRunLoopExit:
            if(me->gil_released)
            {
              cur_ts__ = me->thread_state;
              me->gil_released = 0;
              REVEAL_GLOBAL_VARIABLES();
              pike_threads_disallow (cur_ts__ COMMA_DLOC);
              me->thread_state = 0;
            }
            timeout.tv_sec = 0;
            timeout.tv_usec = 0;
            pdb_low_backend_once(me, &timeout);
            break;
        }
    
      }
    #endif /* BACKEND_USES_CFRUNLOOP */
    
      /*! @decl float|int(0..0) `()(void|float|int(0..0) sleep_time)
       *!   Perform one pass through the backend.
       *!
       *!   Calls any outstanding call-outs and non-blocking I/O
       *!   callbacks that are registred in this backend object.
       *!
       *! @param sleep_time
       *!   Wait at most @[sleep_time] seconds. The default when
       *!   unspecified or the integer @expr{0@} is no time limit.
       *!
       *! @returns
       *!   If the backend did call any callbacks or call outs then the
       *!   time spent in the backend is returned as a float. Otherwise
       *!   the integer @expr{0@} is returned.
       *!
       *! @seealso
       *!   @[Pike.DefaultBackend], @[main()]
       */
      PIKEFUN float|int(0..0) `()(void|float|int(0..0) sleep_time)
      {
        struct timeval timeout;	/* Got bogus gcc warning on timeout.tv_usec. */
    
        timeout.tv_sec = 0;
        timeout.tv_usec = 0;
    
        if (sleep_time && TYPEOF(*sleep_time) == PIKE_T_FLOAT) {
          timeout.tv_sec = (long) floor (sleep_time->u.float_number);
          timeout.tv_usec =
    	(long) ((sleep_time->u.float_number - timeout.tv_sec) * 1e6);
        }
        else if (sleep_time && TYPEOF(*sleep_time) == T_INT &&
    	     sleep_time->u.integer) {
          SIMPLE_BAD_ARG_ERROR("`()", 1, "float|int(0..0)");
        }
        else
          timeout.tv_sec = -1;
    
        pdb_low_backend_once(THIS, &timeout);
    
        pop_n_elems (args);
        if (timeout.tv_sec < 0)
          push_int (0);
        else
          push_float (DO_NOT_WARN ((FLOAT_TYPE)
    			       (DO_NOT_WARN ((double) timeout.tv_sec) +
    				DO_NOT_WARN ((double) timeout.tv_usec) / 1e6)));
      }
    
    #ifdef BACKEND_USES_CFRUNLOOP
    /*! @decl int enable_core_foundation(int(0..1) enable)
     *!   On systems with CoreFoundation (OSX, iOS, etc), use CoreFoundation
     *!   to poll for events. This enables system level technologies that rely
     *!   on CoreFoundation Runloops to function properly.
     *!
     *!  @param enable
     *!    enable or disable this functionality
     *!
     *!  @returns
     *!    the previous value of this setting.
     *!
     */
    PIKEFUN int enable_core_foundation(int enable)
    {
      int x = THIS->go_cf;
    
      if(enable && !THIS->go_cf)
      {
        THIS->go_cf = 1;
        init_cf(THIS, THIS->set);
      }
      else if(!enable && THIS->go_cf)
      {
        THIS->go_cf = 0;
        if(THIS->external_run)
          exit_external_cfrl(THIS);
        exit_cf(THIS);
      }
      pop_stack();
    
      push_int(x);
    }
    
    /*! @decl int query_core_foundation_enabled()
     *!
     *! On systems with CoreFoundation (OSX, iOS, etc), indicate whether
     *! CoreFoundation is being used by this backend to poll for events.
     *!
     *! @returns
     *! the current state of CoreFoundation polling: 1=enabled, 0=disabled
     *!
     */
    PIKEFUN int query_core_foundation_enabled()
    {
      int x = THIS->go_cf;
    
      push_int(x);
      return;
    }
    
    /*! @decl int enable_external_runloop(int(0..1) enable)
     *!   On systems with CoreFoundation (OSX, iOS, etc), delegate 
     *!   running of the Pike Backend to the main runloop of the 
     *!   process (such as a Cocoa application's NSRunLoop). 
     *!
     *!   Enabling the external runloop allows Pike callouts and 
     *!   callback-based I/O to function normally while greatly reducing 
     *!   cpu utilization compared to running the external runloop
     *!   manually.
     *!
     *!  @param enable
     *!    enable or disable this functionality
     *!
     *!  @returns
     *!    the previous value of this setting.
     *!
     */
    PIKEFUN int enable_external_runloop(int enable)
    {
      int x = THIS->external_run;
    
      if(enable && !THIS->external_run)
      {
        THIS->external_run = 1;
        
        if(!THIS->go_cf)
          init_cf(THIS, THIS->set);
          
        init_external_cfrl(THIS, THIS->set);
      }
      else if(!enable && THIS->external_run)
      {
        THIS->external_run = 0;
        exit_external_cfrl(THIS);
      }
      pop_stack();
    
      push_int(x);
    }
    
    #endif /* BACKEND_USES_CFRUNLOOP */
    
    
      EXTRA
      {
        pdb_offset = Pike_compiler->new_program->inherits[1].storage_offset -
          Pike_compiler->new_program->inherits[0].storage_offset;
    
        /* /dev/poll and kqueue fds get invalidated at fork. */
        dmalloc_accept_leak(add_to_callback(&fork_child_callback,
    					reopen_all_pdb_backends, NULL, NULL));
    
    #ifdef BACKEND_USES_CFRUNLOOP
        add_integer_constant("HAVE_CORE_FOUNDATION", 1, 0);
    #endif /* BACKEND_USES_CFRUNLOOP */
    
    #ifdef BACKEND_USES_KQUEUE
        add_integer_constant("HAVE_KQUEUE", 1, 0);
    #endif /* BACKEND_USES_KQUEUE */
    
      }
    
      INIT
      {
        struct Backend_struct *me =
          THIS->backend = (struct Backend_struct *)(((char *)THIS) + pdb_offset);
    
        IF_PD (fprintf (stderr, "[%d]BACKEND[%d]: Registering device backend...\n",
    		    THR_NO, me->id));
    
        me->update_fd_set_handler = (update_fd_set_handler_fn *) pdb_update_fd_set;
        me->handler_data = THIS;
    
        IF_PD(fprintf(stderr, "[%d]BACKEND[%d]: Registering backend...\n",
    		  THR_NO, me->id));
        register_pdb_backend(THIS);
    
        IF_PD(fprintf(stderr, "[%d]BACKEND[%d]: Opening poll device...\n",
    		  THR_NO, me->id));
        if ((THIS->set = OPEN_POLL_DEVICE(THIS)) < 0) {
          Pike_error("Failed to open poll device (errno:%d)\n", errno);
        }
        set_close_on_exec(THIS->set, 1);
      }
      
      EXIT
        gc_trivial;
      {
        struct Backend_struct *me = THIS->backend;
    
        IF_PD (fprintf (stderr, "[%d]BACKEND[%d]: Closing poll device...\n",
    		    THR_NO, me->id));
    
    #ifdef BACKEND_USES_CFRUNLOOP 
        if(THIS->external_run)
          exit_external_cfrl(THIS);
        exit_cf(THIS);
    #endif /* BACKEND_USES_CFRUNLOOP */
    
        if (THIS->set >= 0)
          fd_close(THIS->set);
    
        unregister_pdb_backend(THIS);
      }
    }
    
    /*! @endclass
     */
    
    #endif /* BACKEND_USES_POLL_DEVICE || BACKEND_USES_KQUEUE */
    
    #ifdef BACKEND_USES_CFRUNLOOP
    static void check_set_timer(struct timeval tmp)
    {
      {
        /* register a timer if the backend is using external cfrl. */
        struct external_variable_context loc;
        struct PollDeviceBackend_struct * pdb = NULL;
        char * _st = NULL;
        loc.o = Pike_fp->current_object;
        loc.parent_identifier = 0;
        loc.inherit = Pike_fp->context;
        
        find_external_context(&loc, 1);
        
        if (!loc.o->prog)
        {
          Pike_error ("Cannot access storage of destructed parent object.\n");
        }
        
        _st = get_storage(loc.o, PollDeviceBackend_program);
    
        pdb = (struct PollDeviceBackend_struct *)_st;
    
        if(pdb && pdb->external_run)
        {
          CFRunLoopTimerContext context = {0, pdb, NULL, NULL, NULL};
          CFRunLoopTimerRef timer; 
          CFTimeInterval when = 0.0;
    
          when = (CFTimeInterval)tmp.tv_sec + (1.0E-6 * (CFTimeInterval) tmp.tv_usec);
          timer = CFRunLoopTimerCreate(NULL, CFAbsoluteTimeGetCurrent() + when,0, 0, 0, &cfTimerCallback, &context);
    
          if(timer)
          {
            /* set the timer. */
            CFRunLoopAddTimer(CFRunLoopGetCurrent(), timer, kCFRunLoopDefaultMode);
          }
          else
          {
            Pike_error("ERROR: unable to create run loop timer!\n");
          }
        }
      }
    }
    #endif /* BACKEND_USES_CFRUNLOOP */
    
    #ifdef HAVE_POLL
    
    /*! @class PollBackend
     *! @inherit __Backend
     *!
     *! @[Backend] implemented with @tt{poll(2)@} (SVr4, POSIX).
     *!
     *! @seealso
     *!   @[Backend]
     */
    PIKECLASS PollBackend
    {
      INHERIT Backend;
    
      /* Helpers to find the above inherit. */
      static ptrdiff_t pb_offset = 0;
      CVAR struct Backend_struct *backend;
    
      /* 
       * POLL/SELECT fd sets
       */
      CVAR struct pb_selectors set;
      CVAR struct pb_selectors active_set;
    
      DECLARE_STORAGE
    
      /*
       * FD set handling
       */
    
      static void pb_update_fd_set (struct Backend_struct *me,
    				struct PollBackend_struct *pb, int fd,
    				int old_events, int new_events, int UNUSED(flags))
      {
        int changed_events = old_events ^ new_events;
    
        IF_PD(fprintf (stderr, "[%d]BACKEND[%d]: pb_update_fd_set(.., %d, %d, %d):\n",
    		   THR_NO, me->id, fd, old_events, new_events));
        
    
        if (changed_events) {
    
          if (changed_events & PIKE_BIT_FD_READ) {
    	if (new_events & PIKE_BIT_FD_READ) {
    	  pb_MY_FD_SET(&pb->set, fd, MY_POLLIN);
    	}
    	else {
    	  pb_MY_FD_CLR(&pb->set, fd, MY_POLLIN);
    	  if (new_events & PIKE_BIT_FD_READ_OOB)
    	  {
    	    pb_MY_FD_SET(&pb->set, fd, MY_POLLRDBAND);
    	  }
    	}
          }
    
          if (changed_events & PIKE_BIT_FD_READ_OOB) {
    	if (new_events & PIKE_BIT_FD_READ_OOB)
    	  pb_MY_FD_SET(&pb->set, fd, MY_POLLRDBAND);
    	else {
    	  if (!(new_events & PIKE_BIT_FD_READ)) {
    	    pb_MY_FD_CLR(&pb->set, fd, MY_POLLRDBAND);
    	  } else {
    	    pb_MY_FD_CLR_RDBAND(&pb->set, fd);
    	  }
    	}
          }
    
          if (changed_events & PIKE_BIT_FD_WRITE) {
    	if (new_events & PIKE_BIT_FD_WRITE) {
    	  pb_MY_FD_SET(&pb->set, fd, MY_POLLOUT);
    	}
    	else {
    	  if (!(new_events & PIKE_BIT_FD_WRITE_OOB)) {
    	    pb_MY_FD_CLR(&pb->set, fd, MY_POLLOUT);
    	  }
    	}
          }
    
          if (changed_events & PIKE_BIT_FD_WRITE_OOB) {
    	if (new_events & PIKE_BIT_FD_WRITE_OOB)
    	  pb_MY_FD_SET(&pb->set, fd, MY_POLLWRBAND);
    	else {
    	  if (!(new_events & PIKE_BIT_FD_WRITE)) {
    	    pb_MY_FD_CLR(&pb->set, fd, MY_POLLWREXCEPT);
    	  } else {
    	    pb_MY_FD_CLR_WRBAND(&pb->set, fd);
    	  }
    	}
          }
    
          if (new_events & ~old_events)
    	/* New events were added. */
    	backend_wake_up_backend (me);
        }
      }
    
    #ifdef PIKE_DEBUG
    
      static void pb_backend_do_debug(struct Backend_struct *me,
    				  struct PollBackend_struct *pb)
        {
          int e;
    
          /* FIXME: OOB? */
          for(e=0;e<pb->set.num_in_poll;e++)
          {
    	PIKE_STAT_T tmp;
    	int ret;
    	int fd = pb->set.poll_fds[e].fd;
    
    	if (fd >= fd_map_size || fd_map[fd] != me)
    	  Pike_fatal ("Isn't referenced from fd_map for fd %d at %d in poll set.\n",
    		      fd, e);
    
    	do {
    	  ret=fd_fstat(fd, &tmp);
    	  /* FIXME: Perhaps do check_threads_etc() here? */
    	}while(ret < 0 && errno == EINTR);
    
    	if(ret<0)
    	{
    	  switch(errno)
    	  {
    	    case EBADF:
    	      Pike_fatal("Backend filedescriptor %d is bad.\n", fd);
    	      break;
    	    case ENOENT:
    	      Pike_fatal("Backend filedescriptor %d is not.\n", fd);
    	      break;
    	  }
    	}
          }
        }
    
    #endif	/* PIKE_DEBUG */
    
      /* Mapping of events to flags and callbacks.
       *
       * Event	select	poll		kqueue			callback
       *
       * data_in	read	POLLIN		EVFILT_READ		READ
       *			POLLRDNORM	EVFILT_READ[EOF]      
       *
       * data_out	write	POLLOUT		EVFILT_WRITE		WRITE
       *			POLLWRNORM
       *
       * oob_in	except	POLLPRI		(EVFILT_READ)		READ_OOB
       *			POLLRDBAND
       *
       * oob_out	write	POLLWRBAND	(EVFILT_WRITE)		WRITE_OOB
       *
       * close_in	read	POLLIN		EVFILT_READ[EOF]	READ
       *
       * close_out	write	POLLHUP		EVFILT_WRITE[EOF]	WRITE
       *								>WRITE_OOB
       *
       * conn_ok	write	POLLOUT		EVFILT_WRITE		WRITE
       *
       * conn_fail	read	POLLIN		EVFILT_READ[EOF]	READ
       *		except						(READ_OOB)
       *
       * new_conn	read	POLLIN		EVFILT_READ		READ
       *
       * sock_err	except	POLLERR		EVFILT_READ[ERR]	ERROR
       *								>READ
       *
       * sock_err	except	POLLERR		EVFILT_WRITE[ERR]	ERROR
       *								>WRITE
       * fs_event	NONE	NONE		EVFILT_VNODE	FSEVENT
       */
    
    
      /* A negative tv_sec in timeout turns it off. If it ran until the
       * timeout without calling any callbacks or call outs (except those
       * on backend_callbacks) then tv_sec will be set to -1. Otherwise it
       * will be set to the time spent. */
      static void pb_low_backend_once(struct PollBackend_struct *pb,
    				  struct timeval *timeout)
      {
        ONERROR uwp;
        int i, done_something = 0;
        struct timeval start_time = *timeout;
        struct Backend_struct *me = pb->backend;
    #ifdef DECLARE_POLL_EXTRAS
        /* Declare any extra variables needed by MY_POLL(). */
        DECLARE_POLL_EXTRAS;
    #endif /* DECLARE_POLL_EXTRAS */
    
        SET_ONERROR(uwp, low_backend_cleanup, THIS->backend);
        low_backend_once_setup(pb->backend, &start_time);
    
        if (TYPEOF(me->before_callback) != T_INT)
          call_backend_monitor_cb (me, &me->before_callback);
    
        {
          int poll_timeout;
          struct timeval *next_timeout = &pb->backend->next_timeout;
    
          me->may_need_wakeup = 1;
    
          if (next_timeout->tv_sec >= 100000000)
    	/* Take this as waiting forever. */
    	poll_timeout = -1;
          else if(next_timeout->tv_sec < 0)
    	poll_timeout = 0;
          else if(next_timeout->tv_sec > (INT_MAX/1002))
    	poll_timeout = INT_MAX;
          else
    	poll_timeout = (next_timeout->tv_sec*1000) +
    	  next_timeout->tv_usec/1000;
    
          pb_copy_selectors(& pb->active_set, &pb->set);
    
          IF_PD(fprintf (stderr, "[%d]BACKEND[%d]: Doing poll on fds:\n",
    		     THR_NO, me->id));
    #ifdef POLL_DEBUG
          {
    	int i;
    	for (i = 0; i < pb->active_set.num_in_poll; i++) {
    	  fprintf (stderr,
    		   "[%d]BACKEND[%d]:   fd %4d: %-4s %-5s %-8s %-9s: 0x%04x\n",
    		   THR_NO, me->id,
    		   pb->active_set.poll_fds[i].fd,
    		   pb->active_set.poll_fds[i].events & (POLLRDNORM|POLLIN) ? "read" : "",
    		   pb->active_set.poll_fds[i].events & POLLOUT ? "write" : "",
    		   pb->active_set.poll_fds[i].events & POLLRDBAND ? "read_oob" : "",
    		   pb->active_set.poll_fds[i].events & POLLWRBAND ? "write_oob" : "",
    		   pb->active_set.poll_fds[i].events);
    	}
          }
          fprintf(stderr, "[%d]BACKEND[%d]: poll(%p, %d, %d)...", THR_NO, me->id,
    	      pb->active_set.poll_fds,
    	      pb->active_set.num_in_poll,
    	      poll_timeout);
    #endif /* POLL_DEBUG */
    
          check_threads_etc();
          THREADS_ALLOW();
    
          /* Note: The arguments to MY_POLL may be evaluated multiple times. */
          i = PB_POLL(pb->active_set, poll_timeout);
    
          IF_PD(fprintf(stderr, " => %d\n", i));
    
          THREADS_DISALLOW();
          check_threads_etc();
          me->may_need_wakeup = 0;
          INVALIDATE_CURRENT_TIME();
        }
    
        if (TYPEOF(me->after_callback) != T_INT)
          call_backend_monitor_cb (me, &me->after_callback);
    
        if (!i) {
          /* Timeout */
        } else if (i>0) {
          int num_active = i;
          struct fd_callback_box fd_list = {
    	NULL, NULL, NULL,
    	-1, 0, 0,
    	0, 0, NULL,
          };
          struct fd_callback_box *box;
          ONERROR free_fd_list;
    
          fd_list.backend = me;
          fd_list.next = &fd_list;
    
          SET_ONERROR(free_fd_list, do_free_fd_list, &fd_list);
    
          done_something = 1;
    
    
          /* First clear revents for all the fds.
           *
           * FIXME: This is done for paranoia reasons. If all code that
           *        messes with fds clears revents, this isn't needed.
           *
           * Note: this needs to be a separate loop, since kqueue sends
           *       read and write in two separate events.
           */
          for(i=0; i<pb->active_set.num_in_poll; i++)
          {
    	int fd = PB_GET_FD(pb->active_set.poll_fds[i]);
    	box = SAFE_GET_ACTIVE_BOX (me, fd);
    	if (box) {
    	  check_box (box, fd);
    	  box->revents = 0;
              box->flags = 0;
    	}
          }
    
          /* Then flag the active events.
           */
          for(i=0; i<pb->active_set.num_in_poll; i++)
          {
    	int fd = PB_GET_FD(pb->active_set.poll_fds[i]);
    
    	if (!(box = SAFE_GET_ACTIVE_BOX (me, fd))) {
    	  /* The box is no longer active. */
    	  continue;
    	}
    
    #ifdef MY_POLLNVAL
    	if(PB_CHECK_ERROR_EVENT(pb->active_set.poll_fds[i], MY_POLLNVAL))
    	{
    	  struct pollfd fds;
    	  int ret;
    	  /* NOTE: /dev/poll returns POLLNVAL for closed descriptors. */
    	  IF_PD(fprintf(stderr, "[%d]BACKEND[%d]: POLLNVAL on %d\n",
    			THR_NO, me->id, fd));
    #ifdef PIKE_DEBUG
    	  /* FIXME */
    
    	  fds.fd=fd;
    	  fds.events=POLLIN;
    	  fds.revents=0;
    	  ret=poll(&fds, 1,1 );
    	  if(fds.revents & POLLNVAL)
    	    Pike_fatal("Bad filedescriptor %d to poll().\n", fd);
    	  /* Don't do anything further with this fd. */
    	  continue;
    #endif /* PIKE_DEBUG */
    	}
    #endif /* MY_POLLNVAL */
    
    	check_box (box, fd);
    
    #if 0
        if(PDB_CHECK_EVENT(poll_fds[i], MY_POLLSIGNAL))
        {
          fprintf(stderr, "SIGNAL EVENT RECEIVED!\n");
        }
    #endif
    
    	{
    #ifdef PIKE_DEBUG
    	  int handled = 0;
    #endif /* PIKE_DEBUG */
    	  if (PB_CHECK_ERROR_EVENT(pb->active_set.poll_fds[i], MY_POLLERR)) {
    	    /* Errors are signalled on the first available callback. */
    	    IF_PD(fprintf(stderr, "[%d]BACKEND[%d]: POLLERR on %d\n",
    			  THR_NO, me->id, fd));
    	    box->revents |= PIKE_BIT_FD_ERROR;
    	  }
    
    	  if (PB_CHECK_EVENT(pb->active_set.poll_fds[i], MY_POLLRDBAND)) {
    	    IF_PD(fprintf(stderr, "[%d]BACKEND[%d]: POLLRDBAND on %d\n",
    			  THR_NO, me->id, fd));
    	    box->revents |= PIKE_BIT_FD_READ_OOB;
    	    DO_IF_DEBUG(handled = 1);
    	  }
    	  if (PB_CHECK_EVENT(pb->active_set.poll_fds[i], MY_POLLIN)) {
    	    IF_PD(fprintf(stderr,
    			  "[%d]BACKEND[%d]: POLLRDNORM|POLLIN on %d\n",
    			  THR_NO, me->id, fd));
    	    box->revents |= PIKE_BIT_FD_READ;
    	    DO_IF_DEBUG(handled = 1);
    	  }
    	  if (PB_CHECK_EVENT(pb->active_set.poll_fds[i], MY_POLLWRBAND) ||
    	      PB_CHECK_ERROR_EVENT(pb->active_set.poll_fds[i], MY_POLLHUP)) {
    	    IF_PD(fprintf(stderr,
    			  "[%d]BACKEND[%d]: POLLWRBAND|POLLHUP on %d\n",
    			  THR_NO, me->id, fd));
    	    box->revents |= PIKE_BIT_FD_WRITE_OOB;
    	    DO_IF_DEBUG(handled = 1);
    	  }
    	  if (PB_CHECK_EVENT(pb->active_set.poll_fds[i], MY_POLLOUT) ||
    	      PB_CHECK_ERROR_EVENT(pb->active_set.poll_fds[i], MY_POLLHUP)) {
    	    IF_PD(fprintf(stderr, "[%d]BACKEND[%d]: POLLOUT|POLLHUP on %d\n",
    			  THR_NO, me->id, fd));
    	    box->revents |= PIKE_BIT_FD_WRITE;
    	    DO_IF_DEBUG(handled = 1);
    	  }
    	  if (PDB_CHECK_EVENT(pb->active_set.poll_fds[i], MY_POLLFSEVENT)) {
    	    IF_PD(fprintf(stderr, "[%d]BACKEND[%d]: POLLFSEVENT on %d\n",
    			  THR_NO, me->id, fd));
    	    box->rflags = PDB_GET_FLAGS(poll_fds[i]);
    	    box->revents |= PIKE_BIT_FD_FS_EVENT;
    	    DO_IF_DEBUG(handled = 1);
    	  }
    #ifdef PIKE_DEBUG
    	  if (!handled && PB_GET_EVENTS(pb->active_set.poll_fds[i])) {
    	    fprintf(stderr, "[%d]BACKEND[%d]: fd %ld has revents 0x%08lx, "
    		    "but hasn't been handled.\n", THR_NO, me->id,
    		    (long)PB_GET_FD(pb->active_set.poll_fds[i]),
    		    (long)PB_GET_EVENTS(pb->active_set.poll_fds[i]));
    	    /* pdb_describe_event(me, pb->active_set.poll_fds[i]); */
    	  }
    #endif /* PIKE_DEBUG */
    	}
    	if (box->revents) {
    	next_fd:
    	  /* Hook in the box on the fd_list. */
    	  if (!box->next) {
    	    IF_PD(fprintf(stderr,
    			  "[%d]BACKEND[%d]: hooking in box for fd %d\n",
    			  THR_NO, me->id, fd));
    	    box->next = fd_list.next;
    	    fd_list.next = box;
    	    if (box->ref_obj) add_ref(box->ref_obj);
    	  } else {
    	    IF_PD(fprintf(stderr,
    			  "[%d]BACKEND[%d]: fd %d already in list.\n",
    			  THR_NO, me->id, fd));
    	  }
    	}
          }
    
          /* Common code for all variants.
           *
           * Call callbacks for the active events.
           */
          if (backend_call_active_callbacks(&fd_list, me)) {
    	CALL_AND_UNSET_ONERROR(free_fd_list);
    	goto backend_round_done;
          }
    
          CALL_AND_UNSET_ONERROR(free_fd_list);
    
          /* Must be up-to-date for backend_do_call_outs. */
          INVALIDATE_CURRENT_TIME();
        }else{
          switch(errno)
          {
    #ifdef __NT__
          default:
    	Pike_fatal("Error in backend %d\n",errno);
    	break;
    #endif
    	    
          case EINVAL:
    	Pike_fatal("Invalid timeout to select().\n");
    	break;
    	    
    #ifdef WSAEINTR
          case WSAEINTR:
    #endif
          case EINTR:		/* ignore */
    	break;
    	    
    #ifdef WSAEBADF
          case WSAEBADF:
    #endif
    #ifdef ENOTSOCK
          case ENOTSOCK:
    #endif
    #ifdef WSAENOTSOCK
          case WSAENOTSOCK:
    #endif
          case EBADF:
    	/* TODO: Fix poll version! */
    	break;
    	    
          }
        }
    
        {
          int call_outs_called =
    	backend_do_call_outs(me); /* Will update current_time after calls. */
          if (call_outs_called)
    	done_something = 1;
          if (call_outs_called < 0)
    	goto backend_round_done;
        }
    
        call_callback(&me->backend_callbacks, NULL);
    
      backend_round_done:
        if (!done_something)
          timeout->tv_sec = -1;
        else {
          struct timeval now;
          INACCURATE_GETTIMEOFDAY(&now);
          timeout->tv_sec = now.tv_sec;
          timeout->tv_usec = now.tv_usec;
          my_subtract_timeval (timeout, &start_time);
        }
    
        me->exec_thread = 0;
        UNSET_ONERROR (uwp);
      }
    
      /*! @decl float|int(0..0) `()(void|float|int(0..0) sleep_time)
       *!   Perform one pass through the backend.
       *!
       *!   Calls any outstanding call-outs and non-blocking I/O
       *!   callbacks that are registred in this backend object.
       *!
       *! @param sleep_time
       *!   Wait at most @[sleep_time] seconds. The default when
       *!   unspecified or the integer @expr{0@} is no time limit.
       *!
       *! @returns
       *!   If the backend did call any callbacks or call outs then the
       *!   time spent in the backend is returned as a float. Otherwise
       *!   the integer @expr{0@} is returned.
       *!
       *! @seealso
       *!   @[Pike.DefaultBackend], @[main()]
       */
      PIKEFUN float|int(0..0) `()(void|float|int(0..0) sleep_time)
      {
        struct timeval timeout;	/* Got correct gcc warning on timeout.tv_usec. */
    
        if (sleep_time && TYPEOF(*sleep_time) == PIKE_T_FLOAT) {
          timeout.tv_sec = (long) floor (sleep_time->u.float_number);
          timeout.tv_usec =
    	(long) ((sleep_time->u.float_number - timeout.tv_sec) * 1e6);
        }
        else if (sleep_time && TYPEOF(*sleep_time) == T_INT &&
    	     sleep_time->u.integer) {
          SIMPLE_BAD_ARG_ERROR("`()", 1, "float|int(0..0)");
        }
        else
        {
          timeout.tv_sec = -1;
          timeout.tv_usec = 0;
        }
    
        pb_low_backend_once(THIS, &timeout);
    
        pop_n_elems (args);
        if (timeout.tv_sec < 0)
          push_int (0);
        else
          push_float (DO_NOT_WARN ((FLOAT_TYPE)
    			       (DO_NOT_WARN ((double) timeout.tv_sec) +
    				DO_NOT_WARN ((double) timeout.tv_usec) / 1e6)));
      }
    
      EXTRA
      {
        pb_offset = Pike_compiler->new_program->inherits[1].storage_offset -
          Pike_compiler->new_program->inherits[0].storage_offset;
    
        IF_PD(fprintf(stderr,
    		  "MY_POLLIN:       0x%04x\n"
    		  "MY_POLLOUT:      0x%04x\n"
    		  "MY_POLLEXCEPT:   0x%04x\n"
    		  "MY_POLLRDBAND:   0x%04x\n"
    		  "MY_POLLWREXCEPT: 0x%04x\n"
    		  "MY_POLLWRBAND:   0x%04x\n",
    		  MY_POLLIN, MY_POLLOUT,
    		  MY_POLLEXCEPT, MY_POLLRDBAND,
    		  MY_POLLWREXCEPT, MY_POLLWRBAND));
      }
    
      INIT
      {
        struct Backend_struct *me =
          THIS->backend = (struct Backend_struct *)(((char *)THIS) + pb_offset);
    
        IF_PD (fprintf (stderr, "[%d]BACKEND[%d]: init generic\n",
    		    THR_NO, me->id));
    
    #ifdef PIKE_DEBUG
        me->debug_handler = (debug_handler_fn *) pb_backend_do_debug;
    #endif
        me->update_fd_set_handler = (update_fd_set_handler_fn *) pb_update_fd_set;
        me->handler_data = THIS;
    
        THIS->set.poll_fds=0;
        THIS->set.poll_fd_size=0;
        THIS->set.num_in_poll=0;
    
        THIS->active_set.poll_fds=0;
        THIS->active_set.poll_fd_size=0;
        THIS->active_set.num_in_poll=0;
      }
      
      EXIT
        gc_trivial;
      {
        struct Backend_struct *me = THIS->backend;
        int e;
    
        IF_PD (fprintf (stderr, "[%d]BACKEND[%d]: exit generic backend\n",
    		    THR_NO, me->id));
    
        if (THIS->set.poll_fds) {
          free(THIS->set.poll_fds);
          THIS->set.poll_fds = NULL;
          THIS->set.poll_fd_size = 0;
          THIS->set.num_in_poll = 0;
        }
        if (THIS->active_set.poll_fds) {
          free(THIS->active_set.poll_fds);
          THIS->active_set.poll_fds = NULL;
          THIS->active_set.poll_fd_size = 0;
          THIS->active_set.num_in_poll = 0;
        }
      }
    }
    
    /*! @endclass
     */
    
    #else /* HAVE_POLL */
    /*! @class SelectBackend
     *! @inherit __Backend
     *!
     *! Backend based on the classic @tt{select(2)@} system call from BSD.
     */
    PIKECLASS SelectBackend
    {
      INHERIT Backend;
    
      /* Helpers to find the above inherit. */
      static ptrdiff_t sb_offset = 0;
      CVAR struct Backend_struct *backend;
    
      /* 
       * POLL/SELECT fd sets
       */
      CVAR struct sb_selectors set;
      CVAR struct sb_active_selectors active_set;
    
      DECLARE_STORAGE
    
      /*
       * FD set handling
       */
    
      static void sb_update_fd_set (struct Backend_struct *me,
    				struct SelectBackend_struct *sb, int fd,
    				int old_events, int new_events)
      {
        int changed_events = old_events ^ new_events;
    
        IF_PD(fprintf (stderr, "[%d]BACKEND[%d]: sb_update_fd_set(.., %d, %d, %d):\n",
    		   THR_NO, me->id, fd, old_events, new_events));
        
    
        if (changed_events) {
          if (changed_events & PIKE_BIT_FD_READ) {
    	if (new_events & PIKE_BIT_FD_READ) {
    	  sb_MY_FD_SET(&sb->set, fd, MY_READSET);
    	  /* Got to enable the exception set to get errors (at least
    	   * according to POSIX). */
    	  sb_MY_FD_SET(&sb->set, fd, MY_EXCEPTSET);
    	}
    	else {
    	  sb_MY_FD_CLR(&sb->set, fd, MY_READSET);
    	  if (!(new_events & PIKE_BIT_FD_READ_OOB) &&
    	      !(new_events & PIKE_BIT_FD_WRITE))
    	    /* Exceptions might cause calls to read, read_oob and write. */
    	    sb_MY_FD_CLR(&sb->set, fd, MY_EXCEPTSET);
    	}
          }
    
          if (changed_events & PIKE_BIT_FD_READ_OOB) {
    	if (new_events & PIKE_BIT_FD_READ_OOB)
    	  sb_MY_FD_SET(&sb->set, fd, MY_EXCEPTSET);
    	else {
    	  if (!(new_events & PIKE_BIT_FD_READ)) {
    	    if (!(new_events & PIKE_BIT_FD_WRITE))
    	      /* Exceptions might cause calls to read, read_oob and write. */
    	      sb_MY_FD_CLR(&sb->set, fd, MY_EXCEPTSET);
    	  }
    	}
          }
    
          if (changed_events & PIKE_BIT_FD_WRITE) {
    	if (new_events & PIKE_BIT_FD_WRITE) {
    	  sb_MY_FD_SET(&sb->set, fd, MY_WRITESET);
    	  /* Got to enable the exception set to get errors (at least
    	   * according to POSIX). */
    	  sb_MY_FD_SET(&sb->set, fd, MY_EXCEPTSET);
    	}
    	else {
    	  if (!(new_events & PIKE_BIT_FD_WRITE_OOB)) {
    	    sb_MY_FD_CLR(&sb->set, fd, MY_WRITESET);
    	    if (!(new_events & PIKE_BIT_FD_READ) &&
    		!(new_events & PIKE_BIT_FD_READ_OOB))
    	      /* Exceptions might cause calls to read, read_oob and write. */
    	      sb_MY_FD_CLR(&sb->set, fd, MY_EXCEPTSET);
    	  }
    	}
          }
    
          if (changed_events & PIKE_BIT_FD_WRITE_OOB) {
    	if (new_events & PIKE_BIT_FD_WRITE_OOB)
    	  sb_MY_FD_SET(&sb->set, fd, MY_WRITESET);
    	else {
    	  if (!(new_events & PIKE_BIT_FD_WRITE)) {
    	    sb_MY_FD_CLR(&sb->set, fd, MY_WRITESET);
    	  }
    	}
          }
    
          if (new_events & ~old_events)
    	/* New events were added. */
    	backend_wake_up_backend (me);
        }
      }
    
    #ifdef PIKE_DEBUG
    
      static void sb_backend_do_debug(struct Backend_struct *me,
    				  struct SelectBackend_struct *sb)
        {
          int e;
          PIKE_STAT_T tmp;
    
          /* FIXME: OOB? */
          for(e=0;e<=sb->set.max_fd;e++)
          {
    	if(my_FD_ISSET(e, sb->set.sets + MY_READSET)
    	   || my_FD_ISSET(e, sb->set.sets + MY_WRITESET)
    	   || my_FD_ISSET(e, sb->set.sets + MY_EXCEPTSET)
    	  )
    	{
    	  int ret;
    
    	  if (e >= fd_map_size || fd_map[e] != me)
    	    Pike_fatal ("Isn't referenced from fd_map for fd %d in select set.\n", e);
    
    	  do {
    	    ret = fd_fstat(e, &tmp);
    	    /* FIXME: Perhaps do check_threads_etc() here? */
    	  }while(ret < 0 && errno == EINTR);
    
    	  if(ret<0)
    	  {
    	    switch(errno)
    	    {
    	      case EBADF:
    		Pike_fatal("Backend filedescriptor %d is bad.\n",e);
    		break;
    	      case ENOENT:
    		Pike_fatal("Backend filedescriptor %d is not.\n",e);
    		break;
    	    }
    	  }
    	}
          }
        }
    
    #endif	/* PIKE_DEBUG */
    
      /* A negative tv_sec in timeout turns it off. If it ran until the
       * timeout without calling any callbacks or call outs (except those
       * on backend_callbacks) then tv_sec will be set to -1. Otherwise it
       * will be set to the time spent. */
      static void sb_low_backend_once(struct SelectBackend_struct *sb,
    				  struct timeval *timeout)
      {
        ONERROR uwp;
        int i, done_something = 0;
        struct timeval start_time = *timeout;
        struct Backend_struct *me = sb->backend;
    #ifdef DECLARE_POLL_EXTRAS
        /* Declare any extra variables needed by MY_POLL(). */
        DECLARE_POLL_EXTRAS;
    #endif /* DECLARE_POLL_EXTRAS */
    
        SET_ONERROR(uwp, low_backend_cleanup, THIS->backend);
        low_backend_once_setup(sb->backend, &start_time);
    
        if (TYPEOF(me->before_callback) != T_INT)
          call_backend_monitor_cb (me, &me->before_callback);
    
        {
          struct timeval poll_timeout;
          struct timeval *next_timeout = &sb->backend->next_timeout;
    
          me->may_need_wakeup = 1;
    
          poll_timeout = *next_timeout;
    
          sb_copy_selectors(& sb->active_set, &sb->set);
    
          IF_PD(fprintf (stderr, "[%d]BACKEND[%d]: Doing poll on fds:\n",
    		     THR_NO, me->id));
    
          check_threads_etc();
          THREADS_ALLOW();
    
          /* Note: The arguments to MY_POLL may be evaluated multiple times. */
          i = SB_SELECT(sb->active_set, poll_timeout);
    
          IF_PD(fprintf(stderr, " => %d\n", i));
    
          THREADS_DISALLOW();
          check_threads_etc();
          me->may_need_wakeup = 0;
          INVALIDATE_CURRENT_TIME();
        }
    
        if (TYPEOF(me->after_callback) != T_INT)
          call_backend_monitor_cb (me, &me->after_callback);
    
        if (!i) {
          /* Timeout */
        } else if (i>0) {
          int num_active = i;
          struct fd_callback_box fd_list = {
    	me, NULL, &fd_list,
    	-1, 0, 0,
    	0, 0, NULL
          };
          struct fd_callback_box *box;
          ONERROR free_fd_list;
    
          SET_ONERROR(free_fd_list, do_free_fd_list, &fd_list);
    
          done_something = 1;
    
          for(i=0; i <= sb->active_set.max_fd; i++)
          {
    	box = SAFE_GET_ACTIVE_BOX(me, i);
    	if (!box) continue;
    	check_box(box, i);
    
    	box->revents = 0;
    	box->flags = 0;
    
    	if(fd_FD_ISSET(i, sb->active_set.asets + MY_EXCEPTSET)) {
    	  /* Check for errors. GNU libc says this isn't set on error, but
    	   * POSIX does. FIXME: What bits will be set for errors on GNU
    	   * systems, then? Should we always check for that? */
    	  int err = 0;
    	  ACCEPT_SIZE_T len = sizeof (err);
    	  IF_PD(fprintf(stderr, "[%d]BACKEND[%d]: exception on %d\n",
    			THR_NO, me->id, i));
    	  if (!getsockopt (i, SOL_SOCKET, SO_ERROR, (void *)&err, &len) &&
    	      err) {
    	    IF_PD (fprintf (stderr, "[%d]BACKEND[%d]: error on %d, error=%d\n",
    			    THR_NO, me->id, i, err));
    	    box->revents |= PIKE_BIT_FD_ERROR;
    	  } else {
    	    box->revents |= PIKE_BIT_FD_READ_OOB;
    	  }
    	}
    	  
    	if(fd_FD_ISSET(i, sb->active_set.asets + MY_READSET)) {
    	  IF_PD(fprintf(stderr, "[%d]BACKEND[%d]: read on %d\n",
    			THR_NO, me->id, i));
    	  box->revents |= PIKE_BIT_FD_READ;
    	}
    	  
    	if(fd_FD_ISSET(i, sb->active_set.asets + MY_WRITESET)) {
    	  IF_PD(fprintf(stderr, "[%d]BACKEND[%d]: write on %d\n",
    			THR_NO, me->id, i));
    	  /* OOB can by BSD definition always be written, so if we can
    	   * write normal data it's reasonable to assume OOB can be
    	   * written too without too much risk of being thrown away. */
    	  box->revents |= PIKE_BIT_FD_WRITE | PIKE_BIT_FD_WRITE_OOB;
    	}
    
    	if (box->revents) {
    	  /* Hook in the box on the fd_list. */
    	  if (!box->next) {
    	    box->next = fd_list.next;
    	    fd_list.next = box;
    	    if (box->ref_obj) add_ref(box->ref_obj);
    	  }
    	}
          }
    
          /* Common code for all variants.
           *
           * Call callbacks for the active events.
           */
          if (backend_call_active_callbacks(&fd_list, me)) {
    	CALL_AND_UNSET_ONERROR(free_fd_list);
    	goto backend_round_done;
          }
    
          CALL_AND_UNSET_ONERROR(free_fd_list);
    
          /* Must be up-to-date for backend_do_call_outs. */
          INVALIDATE_CURRENT_TIME();
        }else{
          switch(errno)
          {
    #ifdef __NT__
          default:
    	Pike_fatal("Error in backend %d\n",errno);
    	break;
    #endif
    	    
          case EINVAL:
    	Pike_fatal("Invalid timeout to select().\n");
    	break;
    	    
    #ifdef WSAEINTR
          case WSAEINTR:
    #endif
          case EINTR:		/* ignore */
    	break;
    	    
    #ifdef WSAEBADF
          case WSAEBADF:
    #endif
    #ifdef ENOTSOCK
          case ENOTSOCK:
    #endif
    #ifdef WSAENOTSOCK
          case WSAENOTSOCK:
    #endif
          case EBADF:
    	/* TODO: Fix poll version! */
    
    	sb_copy_selectors(&sb->active_set, &sb->set);
    
    	timeout->tv_usec=0;
    	timeout->tv_sec=0;
    	if(SB_SELECT(sb->active_set, *timeout) < 0)
    	{
    	  switch(errno)
    	  {
    #ifdef WSAEBADF
    	  case WSAEBADF:
    #endif
    #ifdef ENOTSOCK
    	  case ENOTSOCK:
    #endif
    #ifdef WSAENOTSOCK
    	  case WSAENOTSOCK:
    #endif
    	  case EBADF:
    	    {
    	      FOR_EACH_ACTIVE_FD_BOX (me, box) {
    		fd_FD_ZERO(sb->active_set.asets + MY_READSET);
    		fd_FD_ZERO(sb->active_set.asets + MY_WRITESET);
    		fd_FD_ZERO(sb->active_set.asets + MY_EXCEPTSET);
    		    
    		if(my_FD_ISSET(box->fd, sb->set.sets + MY_READSET))
    		  fd_FD_SET(box->fd, sb->active_set.asets + MY_READSET);
    		if(my_FD_ISSET(box->fd, sb->set.sets + MY_WRITESET))
    		  fd_FD_SET(box->fd, sb->active_set.asets + MY_WRITESET);
    		if(my_FD_ISSET(box->fd, sb->set.sets + MY_EXCEPTSET))
    		  fd_FD_SET(box->fd, sb->active_set.asets + MY_EXCEPTSET);
    		    
    		timeout->tv_usec=0;
    		timeout->tv_sec=0;
    		    
    		if(SB_SELECT(sb->active_set, *timeout) < 0)
    		{
    		  switch(errno)
    		  {
    #ifdef __NT__
    		  default:
    #endif
    		  case EBADF:
    #ifdef WSAEBADF
    		  case WSAEBADF:
    #endif
    #ifdef ENOTSOCK
    		  case ENOTSOCK:
    #endif
    #ifdef WSAENOTSOCK
    		  case WSAENOTSOCK:
    #endif
    
    #ifdef DEBUG_MALLOC
    		    debug_malloc_dump_fd(box->fd);
    #endif
    		    Pike_fatal("Filedescriptor %d (%s) caused fatal error %d in backend.\n",box->fd,fd_info(box->fd),errno);
    			  
    		  case EINTR:
    		    break;
    		  }
    		}
    	      }
    	    }
    	  }
    #ifdef _REENTRANT
    	  /* FIXME: Extra stderr messages should not be allowed.../Hubbe */
    	  write_to_stderr("Bad filedescriptor to select().\n"
    			  "fd closed in another thread?\n", 62);
    #else /* !_REENTRANT */
    	  Pike_fatal("Bad filedescriptor to select().\n");
    #endif /* _REENTRANT */
    	}
    	break;
    	    
          }
        }
    
        {
          int call_outs_called =
    	backend_do_call_outs(me); /* Will update current_time after calls. */
          if (call_outs_called)
    	done_something = 1;
          if (call_outs_called < 0)
    	goto backend_round_done;
        }
    
        call_callback(&me->backend_callbacks, NULL);
    
      backend_round_done:
        if (!done_something)
          timeout->tv_sec = -1;
        else {
          struct timeval now;
          INACCURATE_GETTIMEOFDAY(&now);
          timeout->tv_sec = now.tv_sec;
          timeout->tv_usec = now.tv_usec;
          my_subtract_timeval (timeout, &start_time);
        }
    
        me->exec_thread = 0;
        UNSET_ONERROR (uwp);
      }
    
      /*! @decl float|int(0..0) `()(void|float|int(0..0) sleep_time)
       *!   Perform one pass through the backend.
       *!
       *!   Calls any outstanding call-outs and non-blocking I/O
       *!   callbacks that are registred in this backend object.
       *!
       *! @param sleep_time
       *!   Wait at most @[sleep_time] seconds. The default when
       *!   unspecified or the integer @expr{0@} is no time limit.
       *!
       *! @returns
       *!   If the backend did call any callbacks or call outs then the
       *!   time spent in the backend is returned as a float. Otherwise
       *!   the integer @expr{0@} is returned.
       *!
       *! @seealso
       *!   @[Pike.DefaultBackend], @[main()]
       */
      PIKEFUN float|int(0..0) `()(void|float|int(0..0) sleep_time)
      {
        struct timeval timeout;	/* Got bogus gcc warning on timeout.tv_usec. */
    
        if (sleep_time && TYPEOF(*sleep_time) == PIKE_T_FLOAT) {
          timeout.tv_sec = (long) floor (sleep_time->u.float_number);
          timeout.tv_usec =
    	(long) ((sleep_time->u.float_number - timeout.tv_sec) * 1e6);
        }
        else if (sleep_time && TYPEOF(*sleep_time) == T_INT &&
    	     sleep_time->u.integer) {
          SIMPLE_BAD_ARG_ERROR("`()", 1, "float|int(0..0)");
        }
        else
          timeout.tv_sec = -1;
    
        sb_low_backend_once(THIS, &timeout);
    
        pop_n_elems (args);
        if (timeout.tv_sec < 0)
          push_int (0);
        else
          push_float (DO_NOT_WARN ((FLOAT_TYPE)
    			       (DO_NOT_WARN ((double) timeout.tv_sec) +
    				DO_NOT_WARN ((double) timeout.tv_usec) / 1e6)));
      }
    
      EXTRA
      {
        sb_offset = Pike_compiler->new_program->inherits[1].storage_offset -
          Pike_compiler->new_program->inherits[0].storage_offset;
      }
    
      INIT
      {
        struct Backend_struct *me =
          THIS->backend = (struct Backend_struct *)(((char *)THIS) + sb_offset);
    
        IF_PD (fprintf (stderr, "[%d]BACKEND[%d]: init generic\n",
    		    THR_NO, me->id));
    
    #ifdef PIKE_DEBUG
        me->debug_handler = (debug_handler_fn *) sb_backend_do_debug;
    #endif
        me->update_fd_set_handler = (update_fd_set_handler_fn *) sb_update_fd_set;
        me->handler_data = THIS;
    
        THIS->set.max_fd=0;
        my_FD_ZERO(THIS->set.sets + MY_READSET);
        my_FD_ZERO(THIS->set.sets + MY_WRITESET);
        my_FD_ZERO(THIS->set.sets + MY_EXCEPTSET);
        /* FIXME: Should there be something else here? */
        /* me->set.num_fds=0; */
      }
      
      EXIT
        gc_trivial;
      {
        struct Backend_struct *me = THIS->backend;
        IF_PD (fprintf (stderr, "[%d]BACKEND[%d]: exit generic backend\n",
    		    THR_NO, me->id));
      }
    }
    
    /*! @endclass
     */
    #endif
    
    /*! @module DefaultBackend
     *!   This is the @[Backend] object that files and call_outs are
     *!   handled by by default.
     *!
     *!   This is also the @[Backend] object that will be used if @[main()]
     *!   returns @expr{-1@}.
     *!
     *! @seealso
     *!   @[Backend], @[Stdio.File()->set_nonblocking()], @[call_out()]
     */
    
    /*! @endmodule
     */
    
    /*! @endmodule
     */
    
    /*! @decl mixed call_out(function f, float|int delay, mixed ... args)
     *! @decl void _do_call_outs()
     *! @decl int find_call_out(function f)
     *! @decl int find_call_out(mixed id)
     *! @decl int remove_call_out(function f)
     *! @decl int remove_call_out(function id)
     *! @decl array(array) call_out_info()
     *!   These are aliases for the corresponding functions in
     *!   @[Pike.DefaultBackend].
     *!
     *! @seealso
     *!   @[Pike.Backend()->call_out()], @[Pike.Backend()->_do_call_outs()],
     *!   @[Pike.Backend()->find_call_out()], @[Pike.Backend()->remove_call_out()],
     *!   @[Pike.Backend()->call_out_info()]
     */
    
    /* This doesn't need to be here */
    PMOD_EXPORT int write_to_stderr(char *a, size_t len)
    {
    #ifdef __NT__
      size_t e;
      for(e=0;e<len;e++)
        putc(a[e],stderr);
    #else
      int nonblock=0;
      size_t pos;
      int tmp;
      
      if(!len) return 1;
      
      for(pos=0;pos<len;pos+=tmp)
      {
        tmp=write(2,a+pos,len-pos);
        if(tmp<0)
        {
          tmp=0;
          switch(errno)
          {
    #ifdef EWOULDBLOCK
    	case EWOULDBLOCK:
    	  nonblock=1;
    	  set_nonblocking(2,0);
    	  continue;
    #endif
    	  
    	case EINTR:
    	  check_threads_etc();
    	  continue;
          }
          break;
        }
      }
      
      if(nonblock)
        set_nonblocking(2,1);
      
    #endif
      return 1;
    }
    
    PMOD_EXPORT struct object *get_backend_obj_for_fd (int fd)
    {
      struct Backend_struct *b = really_get_backend_for_fd (fd);
      if (!b) return NULL;
      return b->backend_obj;
    }
    
    PMOD_EXPORT void set_backend_for_fd (int fd, struct Backend_struct *new)
    {
      struct Backend_struct *old = get_backend_for_fd (fd);
    
      IF_PD (fprintf (stderr, "Changing backend from %d to %d for fd %d\n",
    		  old ? old->id : -1, new ? new->id : -1, fd));
    
      if (!old)
        low_set_backend_for_fd (fd, new);
      else if (old != new) {
        struct fd_callback_box *box = SAFE_GET_ACTIVE_BOX (old, fd);
        if (box) {
          if (new)
    	change_backend_for_box (box, new);
          else {
    	int is_compat_box = box->callback == compat_box_dispatcher;
    	unhook_fd_callback_box (box);
    	if (is_compat_box)
    	  really_free_compat_cb_box ((struct compat_cb_box *) box);
          }
        }
        low_set_backend_for_fd (fd, new);
      }
    }
    
    /* Compat stuff for old backend interface. */
    
    struct compat_cb_box
    {
      struct fd_callback_box box;	/* Must be first. */
      file_callback read, write, read_oob, write_oob, fs_event;
      void *read_data, *write_data, *read_oob_data, *write_oob_data, *fs_event_data;
      int flags; /* fs event flags */
    };
    
    #undef DMALLOC_DESCRIBE_BLOCK
    #define DMALLOC_DESCRIBE_BLOCK(X) do {					\
        fprintf (stderr, "  backend: %p, fd: %d, events: 0x%x\n",		\
    	     X->box.backend, X->box.fd, X->box.events);			\
      } while (0)
    
    static struct block_allocator compat_cb_allocator = BA_INIT_PAGES(sizeof(struct compat_cb_box), 1);
    
    static struct compat_cb_box * alloc_compat_cb_box() {
        return ba_alloc(&compat_cb_allocator);
    }
    
    static void really_free_compat_cb_box(struct compat_cb_box * b) {
        ba_free(&compat_cb_allocator, b);
    }
    
    void count_memory_in_compat_cb_boxs(size_t * n, size_t * s) {
        ba_count_all(&compat_cb_allocator, n, s);
    }
    
    void free_all_compat_cb_box_blocks() {
        ba_destroy(&compat_cb_allocator);
    }
    
    static int compat_box_dispatcher (struct fd_callback_box *box, int event)
    {
      struct compat_cb_box *cbox = (struct compat_cb_box *) box;
      switch (event) {
        case PIKE_FD_READ:
          IF_PD (fprintf (stderr, "[%d]BACKEND[%d]: compat_box_dispatcher for "
    		      "PIKE_FD_READ to %p %p\n", THR_NO,
    		      cbox->box.backend->id, cbox->read, cbox->read_data));
          return cbox->read (cbox->box.fd, cbox->read_data);
        case PIKE_FD_WRITE:
          IF_PD (fprintf (stderr, "[%d]BACKEND[%d]: compat_box_dispatcher for "
    		      "PIKE_FD_WRITE to %p %p\n", THR_NO,
    		      cbox->box.backend->id, cbox->write, cbox->write_data));
          return cbox->write (cbox->box.fd, cbox->write_data);
        case PIKE_FD_READ_OOB:
          IF_PD (fprintf (stderr, "[%d]BACKEND[%d]: compat_box_dispatcher for "
    		      "PIKE_FD_READ_OOB to %p %p\n", THR_NO,
    		      cbox->box.backend->id, cbox->read_oob, cbox->read_oob_data));
          return cbox->read_oob (cbox->box.fd, cbox->read_oob_data);
        case PIKE_FD_WRITE_OOB:
          IF_PD (fprintf (stderr, "[%d]BACKEND[%d]: compat_box_dispatcher for "
    		      "PIKE_FD_WRITE_OOB to %p %p\n", THR_NO,
    		      cbox->box.backend->id, cbox->write_oob, cbox->write_oob_data));
          return cbox->write_oob (cbox->box.fd, cbox->write_oob_data);
        case PIKE_FD_FS_EVENT:
          IF_PD (fprintf (stderr, "[%d]BACKEND[%d]: compat_box_dispatcher for "
    		      "PIKE_FD_FS_EVENT to %p %p\n", THR_NO,
    		      cbox->box.backend->id, cbox->fs_event, cbox->fs_event_data));
          return cbox->fs_event (cbox->box.fd, cbox->fs_event_data);
        default:
    #ifdef PIKE_DEBUG
          Pike_fatal ("Unexpected event type %d.\n", event);
    #endif
          return 0;			/* To keep gcc happy. */
      }
    }
    
    #define WRAP(CB, EVENT_BIT)						\
      void PIKE_CONCAT3(set_, CB, _callback) (int fd, file_callback cb, void *data)	\
      {									\
        struct Backend_struct *b = really_get_backend_for_fd (fd);		\
        struct fd_callback_box *box = SAFE_GET_ACTIVE_BOX (b, fd);		\
        struct compat_cb_box *cbox;						\
    									\
        IF_PD (fprintf (stderr, "[%d]BACKEND[%d]: set_" #CB "_callback (%d, %p, %p)\n", \
    		    THR_NO, b->id, fd, cb, data));			\
    									\
        if (box) {								\
          check_box (box, fd);						\
          DO_IF_DEBUG (							\
    	if (box->callback != compat_box_dispatcher)			\
    	  Pike_fatal ("Mixing old and new style "			\
    		      "backend interfaces for fd %d.\n", fd);		\
          );								\
          cbox = (struct compat_cb_box *) box;				\
        }									\
        else {								\
          if (!cb) return;							\
          cbox = alloc_compat_cb_box();					\
          INIT_FD_CALLBACK_BOX (&cbox->box, b, NULL,			\
    			    fd, 0, compat_box_dispatcher, 0);		\
        }									\
    									\
        cbox->CB = cb;							\
        cbox->PIKE_CONCAT (CB, _data) = data;				\
    									\
        if (cb)								\
          set_fd_callback_events (&cbox->box, cbox->box.events | EVENT_BIT, cbox->flags); \
        else {								\
          set_fd_callback_events (&cbox->box, cbox->box.events & ~EVENT_BIT, cbox->flags); \
          if (!cbox->box.events) {						\
    	unhook_fd_callback_box (&cbox->box);				\
    	really_free_compat_cb_box (cbox);				\
          }									\
        }									\
      }									\
      									\
      file_callback PIKE_CONCAT3(query_, CB, _callback) (int fd)		\
      {									\
        struct Backend_struct *b=get_backend_for_fd (fd);			\
        struct fd_callback_box *box;					\
        struct compat_cb_box *cbox;						\
    									\
        if (!b) return NULL;						\
        if (!(box = SAFE_GET_ACTIVE_BOX (b, fd))) return NULL;		\
        check_box (box, fd);						\
        DO_IF_DEBUG (							\
          if (box->callback != compat_box_dispatcher)			\
    	Pike_fatal ("Mixing old and new style "				\
    		    "backend interfaces for fd %d.\n", fd);		\
        );									\
    									\
        cbox = (struct compat_cb_box *) box;				\
        if (!(cbox->box.events & EVENT_BIT)) return NULL;			\
        return cbox->CB;							\
      }									\
      									\
      void *PIKE_CONCAT3(query_, CB, _callback_data) (int fd)		\
      {									\
        struct Backend_struct *b=get_backend_for_fd (fd);			\
        struct fd_callback_box *box;					\
        struct compat_cb_box *cbox;						\
    									\
        if (!b) return NULL;						\
        if (!(box = SAFE_GET_ACTIVE_BOX (b, fd))) return NULL;		\
        check_box (box, fd);						\
        DO_IF_DEBUG (							\
          if (box->callback != compat_box_dispatcher)			\
    	Pike_fatal ("Mixing old and new style "				\
    		    "backend interfaces for fd %d.\n", fd);		\
        );									\
    									\
        cbox = (struct compat_cb_box *) box;				\
        if (!(cbox->box.events & EVENT_BIT)) return NULL;			\
        return cbox->PIKE_CONCAT (CB, _data);				\
      }
    
    #define WRAP2(CB, EVENT_BIT)						\
      void PIKE_CONCAT3(set_, CB, _callback) (int fd, file_callback cb, void *data, int flags)	\
      {									\
        struct Backend_struct *b = really_get_backend_for_fd (fd);		\
        struct fd_callback_box *box = SAFE_GET_ACTIVE_BOX (b, fd);		\
        struct compat_cb_box *cbox;						\
    									\
        IF_PD (fprintf (stderr, "[%d]BACKEND[%d]: set_" #CB "_callback (%d, %p, %p)\n", \
    		    THR_NO, b->id, fd, cb, data));			\
    									\
        if (box) {								\
          check_box (box, fd);						\
          DO_IF_DEBUG (							\
    	if (box->callback != compat_box_dispatcher)			\
    	  Pike_fatal ("Mixing old and new style "			\
    		      "backend interfaces for fd %d.\n", fd);		\
          );								\
          cbox = (struct compat_cb_box *) box;				\
        }									\
        else {								\
          if (!cb) return;							\
          cbox = alloc_compat_cb_box();					\
          INIT_FD_CALLBACK_BOX (&cbox->box, b, NULL,			\
    			    fd, 0, compat_box_dispatcher, flags);		\
        }									\
    									\
        cbox->CB = cb;							\
        cbox->PIKE_CONCAT (CB, _data) = data;				\
    									\
        if (cb)								\
          set_fd_callback_events (&cbox->box, cbox->box.events | EVENT_BIT, cbox->flags); \
        else {								\
          set_fd_callback_events (&cbox->box, cbox->box.events & ~EVENT_BIT, cbox->flags); \
          if (!cbox->box.events) {						\
    	unhook_fd_callback_box (&cbox->box);				\
    	really_free_compat_cb_box (cbox);				\
          }									\
        }									\
      }									\
      									\
      file_callback PIKE_CONCAT3(query_, CB, _callback) (int fd)		\
      {									\
        struct Backend_struct *b=get_backend_for_fd (fd);			\
        struct fd_callback_box *box;					\
        struct compat_cb_box *cbox;						\
    									\
        if (!b) return NULL;						\
        if (!(box = SAFE_GET_ACTIVE_BOX (b, fd))) return NULL;		\
        check_box (box, fd);						\
        DO_IF_DEBUG (							\
          if (box->callback != compat_box_dispatcher)			\
    	Pike_fatal ("Mixing old and new style "				\
    		    "backend interfaces for fd %d.\n", fd);		\
        );									\
    									\
        cbox = (struct compat_cb_box *) box;				\
        if (!(cbox->box.events & EVENT_BIT)) return NULL;			\
        return cbox->CB;							\
      }									\
      									\
      void *PIKE_CONCAT3(query_, CB, _callback_data) (int fd)		\
      {									\
        struct Backend_struct *b=get_backend_for_fd (fd);			\
        struct fd_callback_box *box;					\
        struct compat_cb_box *cbox;						\
    									\
        if (!b) return NULL;						\
        if (!(box = SAFE_GET_ACTIVE_BOX (b, fd))) return NULL;		\
        check_box (box, fd);						\
        DO_IF_DEBUG (							\
          if (box->callback != compat_box_dispatcher)			\
    	Pike_fatal ("Mixing old and new style "				\
    		    "backend interfaces for fd %d.\n", fd);		\
        );									\
    									\
        cbox = (struct compat_cb_box *) box;				\
        if (!(cbox->box.events & EVENT_BIT)) return NULL;			\
        return cbox->PIKE_CONCAT (CB, _data);				\
      }
    
    WRAP(read, PIKE_BIT_FD_READ)
    WRAP(write, PIKE_BIT_FD_WRITE)
    WRAP(read_oob, PIKE_BIT_FD_READ_OOB)
    WRAP(write_oob, PIKE_BIT_FD_WRITE_OOB)
    WRAP2(fs_event, PIKE_BIT_FD_FS_EVENT)
    
    PMOD_EXPORT struct callback *debug_add_backend_callback(callback_func call,
    							void *arg,
    							callback_func free_func)
    {
      return backend_debug_add_backend_callback(default_backend,
    					    call,
    					    arg,
    					    free_func);
    }
    
    void wake_up_backend(void)
    {
      if(default_backend)
        backend_wake_up_backend(default_backend);
    }
    
    void do_call_outs(void)
    {
      if(default_backend) {
        INVALIDATE_CURRENT_TIME();
        backend_do_call_outs(default_backend);
      }
    }
    
    #ifdef PIKE_DEBUG
    long do_debug_cycle=1;
    long current_do_debug_cycle=0;
    void do_debug(void)
    {
      extern void check_all_arrays(void);
      extern void check_all_mappings(void);
      extern void check_all_programs(void);
      extern void check_all_objects(void);
      extern void verify_shared_strings_tables(void);
      extern void slow_check_stack(void);
    
      if(current_do_debug_cycle) return;
      current_do_debug_cycle=++do_debug_cycle;
    
      if (d_flag > 2) {
        verify_shared_strings_tables();
        slow_check_stack();
        check_all_arrays();
        check_all_mappings();
        check_all_programs();
        check_all_objects();
      }
    
      call_callback(& do_debug_callbacks, 0);
    
      if(default_backend)
        backend_do_debug(default_backend);
    
      if(d_flag>3) do_gc(NULL, 1);
    
      current_do_debug_cycle=0;
    }
    
    PMOD_EXPORT void debug_check_fd_not_in_use (int fd)
    {
      if (fd < 0) Pike_fatal ("Invalid fd: %d\n", fd);
      if (fd < fd_map_size && fd_map[fd])
        Pike_fatal ("fd %d already in use by backend %d.\n", fd, fd_map[fd]->id);
    }
    
    #endif /* PIKE_DEBUG */
    
    static struct callback *mem_callback;
    
    void init_backend(void)
    {
      IF_PD(fprintf(stderr, "BACKEND: Init compat callback boxes...\n"));
      IF_PD(fprintf(stderr, "BACKEND: INIT...\n"));
      INIT;
      IF_PD(fprintf(stderr, "BACKEND: Creating default backend...\n"));
      {
        /* Select something suitable. */
    #ifdef OPEN_POLL_DEVICE
        /* Note that creation of a poll device backend may fail. */
        JMP_BUF recovery;
        if (SETJMP(recovery)) {
    #ifdef HAVE_POLL
          default_backend_obj = clone_object(PollBackend_program, 0);
    #else
          default_backend_obj = clone_object(SelectBackend_program, 0);
    #endif
        } else {
          default_backend_obj = clone_object(PollDeviceBackend_program, 0);
        }
        UNSETJMP(recovery);
    #elif defined(HAVE_POLL)
        default_backend_obj = clone_object(PollBackend_program, 0);
    #else
        default_backend_obj = clone_object(SelectBackend_program, 0);
    #endif
        default_backend = (struct Backend_struct *)
          get_storage(default_backend_obj, Backend_program);
    
        mem_callback=add_memory_usage_callback(count_memory_in_call_outs,0,0);
        
        add_object_constant("__backend", default_backend_obj, 0);
        add_program_constant("DefaultBackendClass", default_backend_obj->prog, 0);
      }
    }
    
    #ifdef DO_PIKE_CLEANUP
    void exit_backend(void)
    {
      /* Note: The mem_callback has already been freed
       *       by exit_builtin_efuns() at this point.
       */
      /* if (mem_callback) remove_callback(mem_callback); */
      free_object(default_backend_obj);
      default_backend = 0;
      EXIT;
    }
    
    /* Note: This is called when the last backend object exits, which might be
     * after exit_backend if there's garbage. */
    static void backend_cleanup()
    {
    #ifdef OPEN_POLL_DEVICE
      if (pdb_backends) {
        free(pdb_backends);
        num_pdb_backends = 0;
      }
    #endif /* OPEN_POLL_DEVICE */
      free_all_compat_cb_box_blocks();
      if(fd_map)
      {
        free(fd_map);
        fd_map=0;
        fd_map_size=0;
      }
    #ifdef HAVE_BROKEN_F_SETFD
      cleanup_close_on_exec();
    #endif /* HAVE_BROKEN_F_SETFD */
    }
    #endif