struct kiobuf *iobuf;

ret = alloc_kiovec(1, &iobuf);
   if (ret)
      return ret;
   bufadr=((unsigned long)buf) & PAGE_MASK;
   bufofs=((unsigned long)buf) & ~PAGE_MASK;

   ret = map_user_kiobuf(READ, iobuf, bufadr, 
                         sizeof(struct mailbox) + bufofs);
   if (ret) {
      free_kiovec(1, &iobuf);
      return ret;
   }
   ret = lock_kiovec(1, &iobuf, 1);
   if (ret) {
      unmap_kiobuf(iobuf);
      free_kiovec(1, &iobuf);
      return ret;
   }
   pageadr[0] = kmap(iobuf->maplist[0]);
   if (iobuf->nr_pages > 1)
      pageadr[1] = kmap(iobuf->maplist[1]);
   spin_lock_irq(&apbs[IndexCard].mutex);

   from = (char *)pageadr[0] + bufofs;
   to = (unsigned long) apbs[IndexCard].VirtIO + RAM_FROM_PC;

   for (i = 0; i < sizeof(struct mailbox); i++) {
      writeb(*(from++), to++);

      if (!(((unsigned long)from) & PAGE_MASK))
         from = (char *)pageadr[1];
   }

   spin_unlock_irq(&apbs[IndexCard].mutex);

   kunmap(iobuf->maplist[0]);
   if (iobuf->nr_pages > 1)
      kunmap(iobuf->maplist[1]);
   unlock_kiovec(1, &iobuf);
   unmap_kiobuf(iobuf);
   free_kiovec(1, &iobuf);
