diff --git a/mkiso.py b/mkiso.py
new file mode 100644
index 0000000000000000000000000000000000000000..ba027b4b02c5f7bd9ab70fb29665f58c7d91c060
--- /dev/null
+++ b/mkiso.py
@@ -0,0 +1,157 @@
+#!/usr/bin/env python
+# Create a ISO file system.
+
+import os
+
+import MySQLdb
+
+
+def cmdline(bases, files, media_id, cmd):
+    """Create a command line for mkisofs.
+    """
+    mf = open("/tmp/mkiso.map", "w")
+    md5f = open("/tmp/md5.txt", "w")
+    sha1f = open("/tmp/sha1.txt", "w")
+    for (file_id, filename, dir_id, size, md5sum, sha1sum) in files:
+
+        # FIXME: Quote "=" as "\=" and "\" as "\\".
+        mf.write("%s/%s=%s/%s\n" % (
+            dir_id, filename, bases[dir_id], filename))
+
+        md5f. write("%s  %s/%s\n" % (md5sum,  dir_id, filename))
+        sha1f.write("%s  %s/%s\n" % (sha1sum, dir_id, filename))
+
+    mf.close()
+    md5f.close()
+    sha1f.close()
+
+    r = []
+    r.append("mkisofs")
+    r.append("-hide-joliet-trans-tbl")
+    r.append("-hide-rr-moved")
+    r.append("-graft-points")
+    r.append("-J")
+    r.append("-R")
+    r.append("-T")
+    r.append("-V 'ceder-backup.disk-%d'" % media_id)
+    r.append("-path-list /tmp/mkiso.map")
+    r.append("-quiet")
+    r.append(cmd)
+    r.append("md5.txt=/tmp/md5.txt")
+    r.append("sha1.txt=/tmp/sha1.txt")
+    return ' '.join(r)
+
+def iso_size(bases, files, media_id):
+    """Run mkisofs to find out how big an ISO image would be created.
+    """
+    fp = os.popen(cmdline(bases, files, media_id, "-print-size"), "r")
+    res = fp.read()
+    used = int(res)
+    fp.close()
+    return used
+
+def run_mkisofs(bases, files, media_id):
+    """Create an ISO image.
+    """
+    fp = os.popen(cmdline(bases, files, media_id,
+                          "-o /usr/tmp/%s.iso" % media_id),
+                  "r")
+    fp.close()
+
+def mk_iso(DBH):
+    """Select files and put them on an ISO image.
+    """
+    cursor = DBH.cursor()
+
+    cursor.execute("SELECT batch_id, permanent, capacity, blocksize, speed,"
+                   " label"
+                   " FROM media_batch"
+                   " ORDER BY permanent, speed, label")
+    batches = cursor.fetchall()
+    batch_by_id = {}
+    for [batch_id, permanent, capacity, blocksize, speed, label] in batches:
+        print " %2d: %2dx %s" % (batch_id, speed, label)
+        batch_by_id[batch_id] = (permanent, capacity, blocksize)
+    used_batch = int(raw_input("Select batch: "))
+    (permanent, capacity, blocksize) = batch_by_id[used_batch]
+    if permanent:
+        print "CD-R",
+    else:
+        print "CD-RW",
+    print capacity, "blocks of", blocksize, "bytes"
+
+    bases = {}
+    cursor.execute("SELECT dir_id, dir_name"
+                   " FROM base")
+    for [dir_id, dir_name] in cursor.fetchall():
+        bases[dir_id] = dir_name
+
+    cursor.execute("INSERT INTO media (batch_id, written, broken)"
+                   " VALUES (%s, NOW(), 0)",
+                   used_batch)
+    cursor.execute('SELECT LAST_INSERT_ID()')
+    media_id = cursor.fetchone()[0]
+
+    cursor.execute("SELECT file.file_id, file.filename, file.dir_id,"
+                   " file.size, file.md5sum, file.sha1sum"
+                   " FROM file"
+                   " LEFT JOIN contents ON file.file_id = contents.file"
+                   " WHERE contents.file IS NULL"
+                   " ORDER BY RAND()")
+
+    files = []
+    acc = 0
+    nr_files = 0
+
+    # Fetch more files until we fill the CD (ignoring the filesystem overhead).
+    while acc <= capacity * 2048:
+        rows = cursor.fetchmany()
+        if len(rows) == 0:
+            break
+        files += rows
+        while acc <= capacity * 2048 and nr_files < len(files):
+            acc += files[nr_files][3]
+            nr_files += 1
+
+    # Discard more and more files until we have something that fits (not
+    # ignoring the overhead).
+    backtrack = 4
+    while iso_size(bases, files[:nr_files - backtrack], media_id) > capacity:
+        backtrack *= 2
+        if backtrack > nr_files:
+            backtrack = nr_files
+            break
+
+    # Use an interval search to find the largest possible fit.
+    min_files = nr_files - backtrack
+    max_files = nr_files
+    while min_files < max_files:
+        avg_files = (min_files + max_files + 1) // 2
+        used = iso_size(bases, files[:avg_files], media_id)
+        if used <= capacity:
+            min_files = avg_files
+        else:
+            max_files = avg_files - 1
+
+    # There is now a margin of unused space, smaller than the next
+    # file in files.  It is likely that there exists a smaller file
+    # that we could fit in that space.  We could search for it, but
+    # it is hardly worth the effort.
+    used = iso_size(bases, files[:min_files], media_id)
+    print "Storing %d files. Margin: %d" % (min_files, capacity - used)
+
+    run_mkisofs(bases, files[:min_files], media_id)
+
+    ids = []
+    for f in files[:min_files]:
+        ids.append(f[0])
+    cursor.executemany("INSERT INTO contents (media, file)"
+                       "VALUES (%s, %%s)" % media_id,
+                       ids)
+
+def main():
+    DBH = MySQLdb.connect(db='isoonline')
+    mk_iso(DBH)
+
+if __name__ == '__main__':
+    main()