summaryrefslogtreecommitdiff
path: root/hurd/translator
diff options
context:
space:
mode:
Diffstat (limited to 'hurd/translator')
-rw-r--r--hurd/translator/auth.mdwn13
-rw-r--r--hurd/translator/cvsfs.mdwn52
-rw-r--r--hurd/translator/devfs.mdwn20
-rw-r--r--hurd/translator/emailfs.mdwn287
-rw-r--r--hurd/translator/examples.mdwn93
-rw-r--r--hurd/translator/exec.mdwn12
-rw-r--r--hurd/translator/ext2fs.mdwn37
-rw-r--r--hurd/translator/ext2fs/large_stores.txt510
-rw-r--r--hurd/translator/ext2fs/ogi-fosdem2005.mgp165
-rw-r--r--hurd/translator/fatfs.mdwn13
-rw-r--r--hurd/translator/gopherfs.mdwn16
-rw-r--r--hurd/translator/hostmux.mdwn31
-rw-r--r--hurd/translator/magic.mdwn21
-rw-r--r--hurd/translator/mboxfs.mdwn11
-rw-r--r--hurd/translator/netio.mdwn17
-rw-r--r--hurd/translator/nsmux.mdwn121
-rw-r--r--hurd/translator/pfinet.mdwn35
-rw-r--r--hurd/translator/pfinet/implementation.mdwn13
-rw-r--r--hurd/translator/pfinet/ipv6.mdwn57
-rw-r--r--hurd/translator/pflocal.mdwn13
-rw-r--r--hurd/translator/procfs.mdwn19
-rw-r--r--hurd/translator/procfs/htop.mdwn25
-rw-r--r--hurd/translator/procfs/killall.mdwn23
-rw-r--r--hurd/translator/procfs/procps.mdwn23
-rw-r--r--hurd/translator/procfs/top.mdwn18
-rw-r--r--hurd/translator/random.mdwn70
-rw-r--r--hurd/translator/random/mbox.bz2bin0 -> 106158 bytes
-rw-r--r--hurd/translator/short-circuiting.mdwn88
-rw-r--r--hurd/translator/storeio.mdwn30
-rw-r--r--hurd/translator/stowfs.mdwn11
-rw-r--r--hurd/translator/tarfs.mdwn25
-rw-r--r--hurd/translator/tmpfs.mdwn29
-rw-r--r--hurd/translator/tmpfs/notes_bing.mdwn101
-rw-r--r--hurd/translator/tmpfs/notes_various.mdwn218
-rw-r--r--hurd/translator/tmpfs/tmpfs_vs_defpager.mdwn73
-rw-r--r--hurd/translator/unionfs.mdwn155
-rw-r--r--hurd/translator/unionmount.mdwn11
-rw-r--r--hurd/translator/wishlist_1.mdwn129
-rw-r--r--hurd/translator/wishlist_2.mdwn191
-rw-r--r--hurd/translator/writing/example.mdwn303
-rw-r--r--hurd/translator/xmlfs.mdwn11
41 files changed, 3090 insertions, 0 deletions
diff --git a/hurd/translator/auth.mdwn b/hurd/translator/auth.mdwn
new file mode 100644
index 00000000..d9e70ec2
--- /dev/null
+++ b/hurd/translator/auth.mdwn
@@ -0,0 +1,13 @@
+[[!meta copyright="Copyright © 2008 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled
+[[GNU Free Documentation License|/fdl]]."]]"""]]
+
+[[*The_Authentication_Server*|documentation/auth]], the transcript of a talk
+about the details of the authentication mechanisms in the Hurd by Wolfgang
+Jährling.
diff --git a/hurd/translator/cvsfs.mdwn b/hurd/translator/cvsfs.mdwn
new file mode 100644
index 00000000..f5f1a9e0
--- /dev/null
+++ b/hurd/translator/cvsfs.mdwn
@@ -0,0 +1,52 @@
+[[!meta copyright="Copyright © 2007, 2008 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled
+[[GNU Free Documentation License|/fdl]]."]]"""]]
+
+## Setting up cvsfs on GNU/Hurd - A step by step process
+
+### Description of cvsfs
+
+cvsfs is a virtual ([[libnetfs]] based) filesystem allowing you to mount
+remotely located CVS modules into your local filesystem. The version
+controlled files will appear to you just like regular ones. If you just want
+to view one file (or a small bunch) you furthermore save a lot of network
+bandwidth since only these files will be downloaded. The usual way to do so
+would be to check out the whole tree and deleting it after using.
+
+## Step by Step process in installing cvsfs
+
+Download and prepare the source files from the CVS repositiory and build them.
+
+ $ cvs -z3 -d:pserver:anonymous@cvs.savannah.nongnu.org:/sources/hurdextras co cvsfs
+ $ cd cvsfs/
+ $ autoreconf -i
+ $ ./configure
+ $ make
+ $ make install
+
+Set up the translator and start grazing.
+
+ $ mkdir -p cvsfs_test
+ $ settrans -a cvsfs_test /hurd/cvsfs cvs.sourceforge.net /cvsroot/projectname modulename
+
+Example to mount the cvsfs module on hurdextras to a local directory.
+
+ $ mkdir cvs.d
+ $ settrans -ac cvs.d/cvsfs /hurd/cvsfs cvs.savannah.nongnu.org sources/hurdextras cvsfs
+
+Now change to that directory and start using ls, emacs, and whatever you feel
+like. :-)
+
+Happy Hacking.
+
+
+## References
+
+ * <http://www.nongnu.org/hurdextras/>
+ * <http://cvs.sv.nongnu.org/viewcvs/*checkout*/cvsfs/README?root=hurdextras>
diff --git a/hurd/translator/devfs.mdwn b/hurd/translator/devfs.mdwn
new file mode 100644
index 00000000..27df23aa
--- /dev/null
+++ b/hurd/translator/devfs.mdwn
@@ -0,0 +1,20 @@
+[[!meta copyright="Copyright © 2009 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled
+[[GNU Free Documentation License|/fdl]]."]]"""]]
+
+`devfs` is a translator sitting on `/dev` and providing what is to be provided
+in there in a dynamic fashion -- as compared to static passive translator
+settings as they're used now.
+
+`devfs` has not yet been written.
+
+---
+
+If applicable, it has to be taken care that all code concerning the page-in
+path is resident at all times.
diff --git a/hurd/translator/emailfs.mdwn b/hurd/translator/emailfs.mdwn
new file mode 100644
index 00000000..80e2b150
--- /dev/null
+++ b/hurd/translator/emailfs.mdwn
@@ -0,0 +1,287 @@
+[[!meta copyright="Copyright © 2007, 2008 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled
+[[GNU Free Documentation License|/fdl]]."]]"""]]
+
+# How cool it would be if the email becomes similar to snail mail?
+
+## Let see how the snail mail works
+
+* You write the letter with a pen and paper
+* You write the "To" address
+* Post it -> Put it in a Post Box
+
+## How Email works
+
+* You have your email client (and there comes the limitation, you can't use
+ your favourite editor)
+* Fill in destination email address
+* Send it (May be a send button or a keyboard shortcut)
+
+## What are the problems
+
+If you want to use a wordprocessor for sending email, it should implement a
+feature like mail merge, suppose it doesn't have such a feature then? You copy
+and paste it to your email client and if it doesn't support HTML? Well you have
+work arounds for all these, but how cool it would be if you have something like
+this:
+
+* You create a file, use any wordprocessor or text editor.
+* Right click and see the properties
+* Set the "To" field
+* Drag it to the Post Box icon on your panel next to Trash
+
+## How can we implement it?
+
+An SMTP translator which uses extended atributes
+
+## Comments
+
+IRC Logs about the discussion on #hurd
+
+<!-- That smileys are rendered the wrong way below is a ikiwiki bug. -->
+
+>>>>>>> 5384ccd0a47e900fbdae993143110538248517a2:emailfs.mdwn
+
+ <manuel>j4v4m4n: isn't the HHG a good enough libtrivfs tutorial?
+ <manuel> the problem I have with mail, blog, ..., translators is that you want to save mails before sending them, so you can't edit directly into the translator
+ <j4v4m4n> manuel, may be we want a simpler one, all of us are beginners except AB
+ <manuel> they're not stream-based so unless you save it into memory and wait for "sync" to send mails, it doesn't seem well suited
+ <manuel> who's AB?
+ <j4v4m4n> manuel, create any file with your favourite editor and copy it to the directory where SMTP translator is sitting
+ <j4v4m4n> manuel, Anand Babu
+ <manuel> sure, but how is that better than sending it via the "mail" command then?
+ <manuel> except it's less hype, of course.
+ <j4v4m4n> manuel, http://savannah.gnu.org/users/ab
+ <j4v4m4n> manuel, it would be cool :-)
+ <manuel> still not convinced :)
+ * schlesix (n=thomas@xdsl-81-173-230-219.netcologne.de) has joined #hurd
+ <j4v4m4n> manuel, set up SMTP translator on the desktop and may be add it next to the Trash :-)
+ <j4v4m4n> manuel, have a nice postbox icon
+ <j4v4m4n> manuel, drag your files to it
+ <j4v4m4n> manuel, it would be closer to the real world and snail mail
+ <bvk> j4v4m4n: To whom do they go?
+ <manuel> bvk: the file must be preformatted, probably
+ <j4v4m4n> bvk, in snail mail you will write to address on top of the envelop, right?
+ <manuel> j4v4m4n: yeah well, it could make sense in a desktop envronment
+ <j4v4m4n> bvk, here we can have it as the first line of the file
+ <manuel> not sure
+ <bvk> j4v4m4n: i never used snail :(
+ <j4v4m4n> manuel, that is what I have in mind
+ * j4v4m4n like snail mail that email
+ <manuel> bvk: you never sent a mail via snail mail?! :)
+ * j4v4m4n like snail mail more than email
+ <bvk> manuel: nope :( whats that btw?
+ <j4v4m4n> manuel, or why not have it as the file property itself??
+ <bvk> manuel: you know its first time i hear it *snail mail* :((
+ <j4v4m4n> bvk, the normal mails which a postman delivers :-)
+ <j4v4m4n> manuel, you create a file text, open document or whatver format
+ <manuel> j4v4m4n: I'm quite sure it'd make things more complicated in the end, using file properties, dragging files etc.
+ <j4v4m4n> manuel, righ click and see properties and set the to field
+ <bvk> Oh, these english words... :X
+ <j4v4m4n> manuel, we can use the xtended atributes
+ <j4v4m4n> manuel, which really showcase the power of hurd
+ <j4v4m4n> manuel, it becomes closer to the real world
+ <bvk> actually, is X working on hurd?
+ <j4v4m4n> bvk, well it used to work and the new Xorg release has som
+ <j4v4m4n> bvk, well it used to work and the new Xorg release has some broken packages
+ <j4v4m4n> bvk, if you use an old snapshot repository it will work (xorg 6.9)
+ * marco_g (n=marco@gnu/the-hurd/marco) has joined #hurd
+ <marco_g> hi
+ <j4v4m4n> marco_g, hi
+ * bvk watching MIT lecture video on 'structure and interpretation of computer programs'
+ <manuel> bvk: yeah, X was ported on GNU/Hurd five-six years ago or so
+ <j4v4m4n> manuel, see http://hurd.in/bin/view/Main/EmailFS
+ <j4v4m4n> manuel, add your comments, if you like
+ <manuel> j4v4m4n: how would you convert the .odt to a mail?
+ <j4v4m4n> manuel, attachment
+ <manuel> with an empty mail?
+ <manuel> that won't get through *most* spam filters :)
+ <j4v4m4n> manuel, or may be convert it to HTML
+ <manuel> well converting it to text or HTML would require a set of rules to convert from any format to text/HTML, like a2ps has (some sort of mailcap file)
+ <j4v4m4n> manuel, it can be flexible, a parameter to the translator as to what should be done with each formats
+ <manuel> and there's no convenient way to convert ODT to text AFAIK, you need to use ooffice with a batch program, which you need to provide too
+ <manuel> well that's really complex
+ <j4v4m4n> manuel, well how will you send me a CD by post?
+ <j4v4m4n> manuel, or say a bed?
+ <j4v4m4n> manuel, courier or parcel, right? so attachment is fine
+ <manuel> sure but you'll add a note saying "this is a bed from Praveen"
+ <ness> why not add a note to such a mail
+ <ness> you could even move multiple files simultaneously to the mail translator
+ <manuel> hm
+ <manuel> so how is the translator supposed to know that all the files I move are to be sent in a single mail and not on separate mails?
+ <manuel> and how'll you be able to add a note to such a mail? I mean, of course you can set it on the xattr but that's quite strange (the attachment is supposed to be an attr of the mail, not the other way) and not convenient at all
+ <manuel> I'm quite sure using a MUA is still easier
+ <ness> you could move a complete directory to the mail trans
+ <ness> (and the desktop icon can do this transparently)
+ <manuel> hmm so you have to create a directory, write a text file on it (with a special filename, I guess, since you could also have text files as attachments) and add the attachments to the directory
+ <manuel> and then drag & drop it
+ * manuel thinks things are getting more and more complicated :)
+ <ness> the special file name or attribute thing is right
+ <ness> but you not necisirily need to create a dir
+ <ness> s/necisirily/necessarily/
+ <ness> you just drag 'n' drop multiple files to the icon
+ <manuel> and how is the translator supposed to know they are dragged at the same time and not one after the other?
+ <ness> I do not know if it is viable
+ * antrik (n=olaf@port-212-202-210-130.dynamic.qsc.de) has joined #hurd
+ <manuel> AFACS, dragging multiple files just make the desktop issue multiple rename()
+ <moritz> manuel: however the desktop handles that - it would be a rather easy thing to fix, i guess.
+ * schlesix has quit (Remote closed the connection)
+ <manuel> moritz: how is the desktop supposed to handle that?
+ <moritz> if this mail translator approach, is primarily to be used in desktop environments, one could implement the whole thing on the desktop environment layer, not with Hurd translators.
+ <moritz> manuel: i think it would be rather easy for the desktop to distinguish between actions like "ONE file is dragged" and "MULTIPLE files are dragged".
+ * schlesix (n=schlesix@xdsl-81-173-230-219.netcologne.de) has joined #hurd
+ <manuel> oh yeah, but then you loose the transparency, and there's no point in making it a translator. I think we agree on that :)
+ <moritz> i see rather little point in making it a translator anyway, since only god knows wether we have similar concept to translators in hurd-ng.
+ <manuel> yeah sure, but praveen wasn't planning it for HurdNG AIUI
+ <moritz> in that case it would probably be toy project. fine.
+ <moritz> i need to do some maths. see you.
+ <manuel> hmm well, you can't write anything else than toy projects, then
+ <ness> moritz: you shouldn't be too sure about success of ngHurg
+ <ness> it is an experiment
+ <antrik> sdschulze: ping
+ * antrik has quit (Remote closed the connection)
+ * antrik (n=olaf@port-212-202-210-130.dynamic.qsc.de) has joined #hurd
+ * bddebian (n=bdefrees@71.224.172.103) has joined #hurd
+ <j4v4m4n> manuel, This is a lot of input, let me sink these all first :-)
+ <bddebian> Heya folks
+ <schlesix> heya bddebian!
+ <j4v4m4n> it is ofcoures a "nice to have" feature.
+ <j4v4m4n> These are quite intersting inputs as well
+ <bddebian> Hi schlesix
+ <j4v4m4n> manual in the real wprld how will you send multiple things, say you want to send a CD and a bed
+ <j4v4m4n> manuel, you will package it (files) and then one parcel containing all these things (folder)
+ <manuel> j4v4m4n: well you want to make sending emails easier than sending real mails :-)
+ <j4v4m4n> manuel, it won't substitute MUAs
+ <j4v4m4n> manuel, we need it as the backend
+ <diocles> geekoe: You asked about GFS yesterday; well, glibc compiled. :) I've not done much more after that.
+ <antrik> regarding mail translator: take a look at Plan9, they have been doing it for years
+ <j4v4m4n> manuel, sorry not MUA I meant MTA
+ * syamajala (n=syamajal@c-24-147-61-120.hsd1.ma.comcast.net) has joined #hurd
+ <manuel> ah yes sure, but MUA will still be easier to use afaics
+ <j4v4m4n> manuel, people who are used to Windows say GNU/Linux is tough to use
+ <j4v4m4n> manuel, but when they start with GNOME or KDE they don't have any issues
+ <j4v4m4n> antrik, that is a great info I will look into it
+ <j4v4m4n> manuel, sorry not MUA I meant MTA
+ * syamajala (n=syamajal@c-24-147-61-120.hsd1.ma.comcast.net) has joined #hurd
+ <manuel> ah yes sure, but MUA will still be easier to use afaics
+ <j4v4m4n> manuel, people who are used to Windows say GNU/Linux is tough to use
+ <j4v4m4n> manuel, but when they start with GNOME or KDE they don't have any issues
+ <j4v4m4n> antrik, that is a great info I will look into it
+ <ness> j4v4m4n: they do it quite differently
+ <manuel> this doesn't answer to the basic question: how is it better than what we have now
+ <j4v4m4n> manuel, it is different, better is always debatable
+ <j4v4m4n> manuel, GNOME might work for but some doesn't use X at all
+ <j4v4m4n> manuel, whether it is good will be depending on the implemetation
+ <Jeroen> people who used to GNU/Linux say Windows is tough to use
+ <Jeroen> +are
+ <unlink> GNU/Linux is at least tougher to say
+ <Jeroen> no, people have less experience with GNU/Linux
+ <manuel> "to say", Jeroen
+ <j4v4m4n> manuel, better and easier are always relative
+ <j4v4m4n> manuel, there a lot of people still using mutt when you have thunderbird
+ <manuel> well because they have reasons to say mutt is easier than thunderbird
+ <Jeroen> the only thing is that you've to learn a few shortcuts when you want to use mutt, you can't just click around
+ <j4v4m4n> manuel, exactly
+ <j4v4m4n> manuel, consider this, you want to send a document across to someone
+ * Blackmore has quit (Read error: 104 (Connection reset by peer))
+ * koollman has quit (Remote closed the connection)
+ <j4v4m4n> manuel, now you open a MUA add the attachment send it
+ * koollman (n=samson_t@gsv95-1-82-233-13-27.fbx.proxad.net) has joined #hurd
+ <j4v4m4n> manuel, if you just have to drag it to an icon, would that be easier?
+ * Casanova (n=prash@unaffiliated/casanova) has joined #hurd
+ <j4v4m4n> manuel, chmod +to:manuel@somehost doc.pdf ; cp doc.pdf postbox/
+ <Jeroen> yeah
+ <Jeroen> chmod is for setting permissions...
+ <j4v4m4n> manuel, I am not sure how to set xattr
+ <manuel> well, setfattr
+ <Jeroen> well
+ <j4v4m4n> manuel, ok
+ <Jeroen> how do you type your subject?
+ <Jeroen> and there message itself?
+ <Jeroen> s/there/the/
+ <Jeroen> how do you encrypt+sign it with pgp?
+ <manuel> j4v4m4n: well the problem is still the same you know. OK for to/subject : they'd be extended attributes. but how do you type the message itself?
+ <antrik> I don't think using xattr for such stuff is a good idea
+ <antrik> after all, it's not a property of the document
+ <j4v4m4n> antrik, we can use it only on a particular directory on which our translator sit
+ <j4v4m4n> manuel, create a folder
+ <manuel> that'd mean mkdir message; ln -s doc.pdf message/; cat >message/message <<EOF; setfattr -n to -v mmenal@hurdfr.org; setfattr -n subject -v document; mv message postbox
+ <antrik> the reason why having translators for such stuff is that this way you have a generic service for sending mail, whether you use it through a special UI (MUA), directly with file commands, from a script, or from some other program that just sends mails as a side functionality
+ * mheath has quit (Connection reset by peer)
+ <j4v4m4n> manuel, that looks scary :-(
+ <manuel> not sure it's easier than "mutt; m; mmenal@hurdfr.org; document; >typing the message<; a; doc.pdf; y"
+ <antrik> manuel: it is easier in some situations
+ <antrik> (and again, I would not use xattr for such stuff)
+ <j4v4m4n> manuel, now how do you use mutt on GNOME?
+ <antrik> in fact, Plan9 explicitely does *not* have any xattr and stuff
+ <manuel> antrik: well xattr on the directory that represents the message is not illogical
+ * mheath (n=mheath@c-67-182-231-23.hsd1.co.comcast.net) has joined #hurd
+ <j4v4m4n> antrik, may be we can think of some other way if you don't wanna xattr
+ <manuel> j4v4m4n: well I just used the CLI because it's easier to describe, but try to explain the steps in a GUI and you'll see it's the same problem
+ <j4v4m4n> manuel, right click on desktop -> create folder -> drag the files to the folder -> set attributes to the folder-> drag it to postbox
+ <j4v4m4n> manuel, it is quite logical step
+ <manuel> sure, but how is it easier than click on the MUA icon -> create mail -> drag the files to the mail window ; type the attrbutes + contents ; click on send mail
+ <manuel> looks quite similar to me :-)
+ <j4v4m4n> manuel, or if you already have the folder just drag it
+ <kilobug> a POP or IMAP translator would be more useful IMHO (but well, I didn't read all the backlog, so I may be off topic)
+ <j4v4m4n> manuel, you don't have a MUA here :-) just files and folders
+ <kilobug> to read mails, I mean
+ <j4v4m4n> kilobug, that is even easier IMAP->mabox and then mboxfs
+ <manuel> j4v4m4n: well you have a MUA : that's the translator
+ <j4v4m4n> kilobug, mboxfs is already available
+ <j4v4m4n> kilobug, I think someone already wrote IMAP to mbox as well but couldn't find it
+ <kilobug> j4v4m4n: well, imapfs could work both way, writing changes on the imap server too ;)
+ <antrik> manuel: the difference is not how it is used; the difference is how it is implemented
+ <antrik> manuel: if you have a generic mail translator, you have most functionality already there with the file manager; all you need to add is some scripts for better comfort
+ <antrik> j4v4m4n: the way I would do it (and I guess Plan9 does, though I haven't checked) is either having a file in the mail directory with the headers, or a subdirectory with a single file for each header (probably the latter)
+ <j4v4m4n> antrik, that would make it too complicated IMHO, it would be close to how it is for snail mail
+ <antrik> j4v4m4n: I don't see how this would be more complicated than xattr
+ <j4v4m4n> manuel, you can write your own scripts to automate it for whatver way you want
+ * azor (n=azor@62-43-135-201.user.ono.com) has joined #hurd
+ <manuel> antrik: having the functionality in the filesystem is useful because programs can use this functionality without patching; the protocol to use the mail translator is so specific that you either need to be a real user (but then a MUA is a lot more useful) or have a patched program (but then you could use a lib)
+ <j4v4m4n> antrik, right clicking a file and setting u p to and subject seems easier that creating more files
+ <antrik> j4v4m4n: I don't think so. maybe it is in gnome, but than I'd consider it a shortcoming of gnome
+ <antrik> j4v4m4n: in shell "cat foo@example.invalid headers/to" is about as simple as you can get
+ <manuel> >
+ <antrik> erm... I mean echo
+ <antrik> and >, yes
+ <antrik> sorry
+ * yoj (n=jao@200.163.8.72) has joined #hurd
+ <manuel> "echo foo@example.invalid > headers/to" is not easier than "setfattr -n to -v foo@example.invalid" AFAICS.
+ <antrik> echo foo@example.invalid >headers/to
+ * yoj (n=jao@200.163.8.72) has left #hurd
+ * yoj (n=jao@200.163.8.72) has joined #hurd
+ <kilobug> manuel: it is a tiny bit if your "foo@example.invalid" is the output of a command, mycomplexcommand > headers/to is a bit easier than setfattr -n to -v `mycomplexcommand`
+ <kilobug> manuel: but it's the same for a value you type directly
+ <antrik> manuel: objectively it is not simpler, but it uses a generic mechanism users now well, instead of obscure xattr stuff
+ <antrik> know well
+ <j4v4m4n> antrik, ok we can think of that, but how about a desktop user?
+ abeaumont andar antrik arnau azeem azor
+ <j4v4m4n> antrik, he has to use more clicks and more head aches
+ <j4v4m4n> antrik, just right click and add to address and subject just you write on the envelop
+ <kilobug> j4v4m4n: that's good ! it makes him buy more medicine, drug corporations will sponsor you then !
+ * kilobug runs away
+ * j4v4m4n chases kilobug
+ <j4v4m4n> kilobug, better way would be making outlook run on GNU :-)
+ <marco_g> Or GNU on outlook \o/
+ * yoj (n=jao@200.163.8.72) has left #hurd
+ <kilobug> this channel is becoming insnae :p
+ <j4v4m4n> kilobug, or is it the members ?? :-)
+ <marco_g> I agree with kilobug, we should stop those weirdos here :-/
+ * whr` (i=whr@acy238.neoplus.adsl.tpnet.pl) has joined #hurd
+ <antrik> hm... anyone have the marcus quote at hand?
+ <j4v4m4n> i got to go as well
+ <j4v4m4n> bye
+ <kilobug> bye j4v4m4n
+
+## Interesting??
+
+Join the project -- Add yourself to the list below
+
+* [[Praveen A]]
diff --git a/hurd/translator/examples.mdwn b/hurd/translator/examples.mdwn
new file mode 100644
index 00000000..ee766fbf
--- /dev/null
+++ b/hurd/translator/examples.mdwn
@@ -0,0 +1,93 @@
+[[!meta copyright="Copyright © 2007, 2008 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled
+[[GNU Free Documentation License|/fdl]]."]]"""]]
+
+## Setting up translators - HowTo
+
+Translators can be got from hurd-extras <http://www.nongnu.org/hurdextras/>
+
+ cvs -z3 -d:pserver:anonymous@cvs.savannah.nongnu.org:/sources/hurdextras co <modulename>
+
+* httpfs translator
+
+<!-- Prevent ikiwiki / Markdown rendering bug. -->
+
+ $ settrans -a tmp/ /hurd/httpfs www.hurd-project.com/
+
+or
+
+ $ settrans -a tmp/ /hurd/httpfs www.hurd-project.com/ --proxy=<proxy> --port=<port>
+ $ cd tmp/
+ $ ls -l
+
+* ftpfs translator
+
+<!-- Prevent ikiwiki / Markdown rendering bug. -->
+
+ $ settrans -cgap ftp /hurd/hostmux /hurd/ftpfs /
+ $ cd ftp
+ ftp$ ls
+ ftp$ cd ftp.fr.debian.org
+ ftp/ftp.fr.debian.org $ ls
+
+* tarfs translator (needs uatime fix, 2010-08-25 → [git repo](http://github.com/giselher/tarfs))
+
+You can use tarfs to mount (almost) any tar file (currently broken, 2010-08-25):
+
+ $ settrans -ca a /hurd/tarfs -z myfile.tar.gz
+ $ settrans -ca b /hurd/tarfs -y myfile.tar.bz2
+ $ settrans -ca c /hurd/tarfs myfile.tar
+
+You can even use it to create new tar files:
+
+ $ settrans -ca new /hurd/tarfs -cz newfile.tar.gz
+ $ cp -r all my files new/
+ $ syncfs new
+
+This is not as fast as `tar czvf newfile.tar.gz all my files`, but at least it's more original. ;)
+
+* cvsfs translator
+
+<!-- Prevent ikiwiki / Markdown rendering bug. -->
+
+ $ settrans -ac cvsfs_testing /hurd/cvsfs cvs.savannah.nongnu.org /sources/hurdextras
+ $ cd cvsfs_testing
+
+* pfinet translator -- configuring your network interface
+
+<!-- Prevent ikiwiki / Markdown rendering bug. -->
+
+ $ settrans -fgca /servers/socket/2 /hurd/pfinet -i <interface> -a <ip address> -m <subnet mask> -g <gateway ip>
+
+* Console translator -- setting up virtual consoles
+
+<!-- Prevent ikiwiki / Markdown rendering bug. -->
+
+ $ console -d vga -d pc_mouse -d pc_kbd -d generic_speaker /dev/vcs
+
+* iso9660fs translator -- 'mounting' your cdrom
+
+<!-- Prevent ikiwiki / Markdown rendering bug. -->
+
+ $ settrans -ac /cdrom /hurd/iso9660fs /dev/<cdrom device file>
+
+* ext2fs translator -- 'mounting' an ext2fs partition
+
+<!-- Prevent ikiwiki / Markdown rendering bug. -->
+
+ $ settrans -ac /linux /hurd/ext2fs /dev/<partition device file>
+
+* unionfs translator
+
+To join "foo/" "bar/" and "baz/" in the directory "quux/", just do:
+
+ $ settrans -capfg quux/ /hurd/unionfs foo/ bar/ baz/
+
+If you want to join even quux/ contents in the union itself, add -u as a translator argument.
+You can add filesystems at run-time with the fsysopts command.
diff --git a/hurd/translator/exec.mdwn b/hurd/translator/exec.mdwn
new file mode 100644
index 00000000..d5b6bfbc
--- /dev/null
+++ b/hurd/translator/exec.mdwn
@@ -0,0 +1,12 @@
+[[!meta copyright="Copyright © 2009 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled [[GNU Free Documentation
+License|/fdl]]."]]"""]]
+
+The *exec* server, listening on `/servers/exec`, is responsible for
+preparing the execution of processes.
diff --git a/hurd/translator/ext2fs.mdwn b/hurd/translator/ext2fs.mdwn
new file mode 100644
index 00000000..305576b8
--- /dev/null
+++ b/hurd/translator/ext2fs.mdwn
@@ -0,0 +1,37 @@
+[[!meta copyright="Copyright © 2007, 2008, 2010 Free Software Foundation,
+Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled
+[[GNU Free Documentation License|/fdl]]."]]"""]]
+
+# Large Stores
+
+The `ext2fs` translator from the upstream Hurd code base can only handle file
+systems with sizes of less than roughly 2 GiB.
+
+[[!tag open_issue_hurd]]
+
+## Ognyan's Work
+
+ * Ognyan Kulev, [[*Supporting Large ext2 File Systems in the
+ Hurd*|ogi-fosdem2005.mgp]], 2005, at FOSDEM
+
+ * Ognyan Kulev, [[large_stores]]
+
+ * <http://kerneltrap.org/node/4429>
+
+Ognyan's patch lifts this limitation (and is being used in the
+[[Debian_GNU/Hurd_distribution|running/debian]]), but it introduces another
+incompatibility: `ext2fs` then only supports block sizes of 4096 bytes.
+Smaller block sizes are commonly automatically selected by `mke2fs` when using
+small backend stores, like floppy devices.
+
+
+# Documentation
+
+<http://www.nongnu.org/ext2-doc/>
diff --git a/hurd/translator/ext2fs/large_stores.txt b/hurd/translator/ext2fs/large_stores.txt
new file mode 100644
index 00000000..e17a02a5
--- /dev/null
+++ b/hurd/translator/ext2fs/large_stores.txt
@@ -0,0 +1,510 @@
+This is -*- mode: outline -*-
+
+* Introduction
+
+Here is a try to describe the ext2fs patch for the Hurd. This patch
+allows using partitions/stores larger that approximately 1.5G by not
+memory mapping the whole store to address space.
+
+As a guideline, the changelog of RC1 (Release Candidate 1) is
+followed, so I hope nothing is missed. During writing of this text,
+some questions arised and they are marked with XXX. An effort will be
+made to fix all these for RC2.
+
+ Ognyan Kulev <ogi@fmi.uni-sofia.bg>
+
+* The block layer and its purpose
+
+The basic unit of ext2 filesystem is "block". All filesystem
+operation work on blocks which are read, and sometimes modified and
+written back. Possible block sizes are 1K, 2K and 4K, but current
+implementation works reliably only on 4K blocks (= page size of i386).
+
+So the two basic operations on blocks are "reading" block and
+"writing" block.
+
+* Current implementation
+
+** Reading
+
+Currently, the whole store is memory mapped into address space of
+ext2fs process. The is called "disk image", although "store image"
+would be more accurate. The address of the start of the disk image is
+stored in pager.c:disk_image. So "reading" block is easy: just
+calculate byte offset of block and add it to disk_image. The resulting
+address points to the start of the desired block.
+
+The macro ext2fs.h:bptr has exactly this purpose: given block number,
+it returns pointer to block. Sometimes we have pointer somewhere in
+the block, and we want the block number. This is calculated by
+ext2fs.h:bptr_block.
+
+There is another set of macros that use byte offsets instead of block
+numbers. These are boffs_ptr (store offset -> memory pointer) and
+bptr_offs (memory pointer -> store offset).
+
+Converting between store offset and block number is easy with macros
+boffs (block -> offset) and boffs_block (offset -> block). Other
+useful macros are trunc_block and round_block.
+
+** Writing
+
+Modifying block and saving it is not that straight-forward as
+reading. For writing, you need to use "pokel" ("poked elements").
+Pokel interface is in ext2fs.h. Implementation is in pokel.c.
+
+The problem is that generally multiple blocks are modified and we want
+all these changes to hit disk at relatively same time. So we can't
+just change block and leave decision when it's going to be written to
+the microkernel.
+
+So there is a pokel for each set of changes and each change should be
+reported to the pokel by calling pokel_add. When this set of changes
+is completed, pokel_sync of pokel_flush is called. (The latter is
+used to ignore changes.)
+
+In practice, there is one indir_pokel for each ext2fs.h:disknode,
+which is used for indirect blocks of ext2fs. The only other pokel
+used is ext2fs.h:global_pokel, where all other changes to metadata are
+registered.
+
+* Proposed implementation
+
+First one must realize that the idea of mapping the whole store is to
+be thrown away. So only parts of the store should be mapped. These
+currently mapped parts of store are collectively called "cache".
+
+In the proposed implementation, the cache has fixed size of
+ext2fs.h:DISK_CACHE_BLOCKS. In RC1, it's 100, but this is only to
+easily catch bugs. In practice, it can be, for example, 512M, or
+(512*1024/4) blocks of 4K. pager.c:disk_cache_size and
+pager.c:disk_cache_blocks are additional variables about that
+information.
+
+The cached blocks are mapped in ext2fs.h:disk_cache and span
+disk_cache_size bytes (= disk_cache_blocks blocks). As in the
+original implementation, this part of address space is handled by
+custom pager.
+
+** Data structures
+
+Blocks in cache aren't consecutive, so we need data structure to hold
+which part of address space represents what block. This is the
+purpose of pager.c:disk_cache_info. Index in this array is "cached
+block index". But this array doesn't help in finding if specific
+block is mapped, and where. This is the purpose of the
+pager.c:disk_cache_bptr ihash which finds cached block index from
+given block number. Both data structures are guarded by
+pager.c:disk_cache_lock.
+
+** Public interface
+
+"Public" interface to the cache are functions disk_cache_block_ref,
+disk_cache_block_ref_ptr, disk_cache_block_deref,
+disk_cache_block_is_ref. disk_cache_block_ref takes block number and
+return pointer to block content. Reference count of this cached block
+is incremented. After finishing work with block,
+disk_cache_block_deref should be called.
+
+In converting original ext2fs code to use this functions, usually call
+to bptr is turned into call to disk_cache_block_ref. In addition,
+after pointer to block content is not used anymore,
+disk_cache_block_deref is called. This simple scheme is only for
+reading from block. For modifying block, see about pokels below.
+
+disk_cache_block_ref_ptr just increments reference count of specified
+block. It's used when we give pointer to block content to somebody
+else that will dereference it (e.g. pokel) and we want to continue to
+use this content.
+
+disk_cache_block_is_ref checks if specified block has reference count
+greater than zero. It's used in assert:s.
+
+*** bptr* and boffs* macros
+
+These macros continue to work as before, but they don't deal with
+reference counting and this should be taken into consideration. In
+addition, bptr_index returns cached block index from given pointer to
+block content. (This function is used internally.)
+
+*** Pokels
+
+When pokel_add is called with pointer to block content, this
+"consumes" reference of block. It's not consumed (decremented by 1)
+immediately, but when pokel_sync or pokel_flush is called. (Reference
+is consumed immediately if the block is already in the pokel. The
+important thing is that you always lose one reference of the block.)
+
+So we have the following code when we read from block:
+
+ char *bh = disk_cache_block_ref (block);
+ ...
+ disk_cache_block_deref (bh);
+
+And the following code when we modify block:
+
+ char *bh = disk_cache_block_ref (block);
+ ...
+ pokel_add (pokel, bh, block_size);
+
+**** Indirect calls to pokel_add
+
+Some functions indirectly call pokel_add, so this should be taken into
+consideration. These are:
+
+ * record_global_poke
+ * record_indir_poke
+
+So these functions should be treated in the same scheme as pokel_add.
+For example:
+
+ char *bh = disk_cache_block_ref (block);
+ ...
+ record_indir_poke (node, bh);
+
+**** Modifying SBLOCK in diskfs_set_hypermetadata
+
+SBLOCK is global variable that points to superblock content. There is
+one reference count for superblock, so before we call
+record_global_poke (which consumes reference),
+disk_cache_block_ref_ptr is called.
+
+**** Modifying GDP
+
+When group descriptor is wanted, usuall group_desc is called and
+result is stored in local variable GDP. After modifying GDP,
+record_global_poke is called. But because record_global_poke is used,
+we need call to disk_cache_block_ref_ptr:
+
+ gdp = group_desc (i);
+ ...
+ disk_cache_block_ref_ptr (gdp);
+ record_global_poke (gdp);
+
+*** More complex use of pointer to block content
+
+In ext2_new_block and ext2_alloc_inode functions, we have local
+pointer variable BH that sometimes points to block content and
+sometimes points to nothing. In order to reduce possible errors, when
+BH points to nothing it's always 0. In some points (goto labels),
+there is assertion if BH is what's expected (pointer to nothing or
+pointer to something).
+
+*** dino
+
+dino function return pointer to struct ext2_inode for given ino_t.
+This uses reference, so corresponding disk_cache_block_deref should be
+called after finishing work with ext2_inode. For convenience, dino is
+renamed to dino_ref, and dino_deref just calls disk_cache_block_deref.
+
+ struct ext2_inode *di = dino_ref (np->cache_id);
+ ...
+ dino_deref (di);
+
+Or
+
+ struct ext2_inode *di = dino_ref (np->cache_id);
+ ...
+ sync_global_ptr (di, 1);
+ dino_deref (di);
+
+Or
+
+ struct ext2_inode *di = dino_ref (np->cache_id);
+ ...
+ record_global_poke (di);
+
+* Internals of the proposed implementation
+
+As said earlier, instead of mapping the whole store of filesystem to
+address space, only part of it is mapped. This part is called "cache"
+or "disk cache" (although "store cache" would be more appropriate).
+Currently, the cache is contiguous area in address space that starts
+at disk_cache. Its size is disk_cache_size which is disk_cache_blocks
+number of blocks of size block_size.
+
+Mapped blocks in disk cache are not fixed -- each block in the cache
+can be replaced at any time with another block. So we need to know
+which blocks are cached currently and where. Information about each
+cached block is stored in disk_cache_info[]. Index is from 0 to
+disk_cache_blocks-1. In this information the block number is stored
+(among some other things, discussed later). The reverse direction,
+getting the index of cached block from block number, is achieved by
+using disk_cache_bptr ihash. Both these data structures are guarded
+by disk_cache_lock.
+
+** Requesting a block
+
+When ext2 code requests block, it calls disk_cache_block_ref. First,
+this block is search with disk_cache_bptr. If its there, the
+reference count is incremented and pointer to block content is
+returned. In this case, there is a call to disk_cache_wait_remapping,
+which is explained a bit later.
+
+It's more interesting when block is not found in disk_cache_bptr. In
+this case, disk_cache_map is called. Again, disk_cache_bptr is
+consulted, because in the meantime another could already have mapped
+this block. If this is the case, the code is essentially the same as
+those in disk_cache_block_ref.
+
+When it's assured that block is not in the cache, we have no choice
+but throw away an already mapped/cached block and put our block in its
+place. Such block has to meet the following conditions:
+
+- Its reference count being 0
+- Not in the core
+- Not being remapped (explained later)
+- Not being forbidden to be remapped ("fixed", explained later)
+
+The last three conditions are actually flags in disk_cache_info:
+DC_INCORE, DC_REMAPPING and DC_FIXED. DC_DONT_REUSE collectively
+gives the condition in which block is not suitable for
+reusing/remapping.
+
+Searching suitable place in cache is linear. As an optimisation, this
+search doesn't start from the beginning, but starts from where last
+time it has ended. This last index is stored in disk_cache_hint. So
+new candidate blocks for replacement are searched "circular".
+
+If suitable place is found, the old mapping is removed, and the new
+mapping is initialized. But we are still not ready to return pointer
+to block content, because this content is not available yet. We mark
+the block as DC_REMAPPING, which makes disk_cache_block_ref for that
+block in other threads to wait until page is completely remapped.
+
+In both cases, when we have found place and when suitable place is not
+found, disk_cache_hint is updated so that next disk_cache_map
+continues searching from where we ended.
+
+When not suitable place is found, we have to use force. First all
+pages in disk cache are touched. This is workaround because of some
+bug in GNU Mach. The patch relies on "precious page" features of
+Mach. Marking a page as precious instructs Mach to always inform us
+about evicting this page. If page is modified, it seems that we are
+always informed. But if page is unmodified and page is evicted,
+sometimes Mach forgets to tell us. It's true that with large disk
+cache, e.g. 512M, this potentially will re-read the whole cache from
+disk. But if we reach this point, the microkernel is telling us that
+all is already read :-)
+
+This is preparation for following calls to pager_return_some. This
+libpager function is called only on cached blocks that has reference
+count of 0. These are the potential candidates for replacement --
+there is no sense in calling pager_return_some when reference count is
+1 or more. One final case is when there is no cached block that has
+reference count of 0. This is bad and we can't do anything about it.
+In this case, we just wait one second hoping that some other thread
+will drop reference count of block to 0. (XXX Currently (in RC1)
+sleep(1) is always executed. It should be executed only when disk
+cache is starving. There is some rationale behind calling sleep(1) even when
+disk cache is not starving. Although pager_return_some(,,,1)
+guarantees that upon return of this function the page is returned, I'm
+not sure that it's guaranteed that pager_notify_pageout is called.
+This is because pager_return_some and
+libpager/data-return.c:_pager_do_write_request are executed in
+different threads and pager_return_some is confirmed before calling
+pager_notify_pageout. This issue is open.)
+
+So, after forcibly evicting all pages (blocks) that can potentially be
+reused, disk_cache_map is called again.
+
+In the case when suitable place is found and all data structures
+(disk_cache_info and disk_cache_bptr) are changed accordingly,
+pager_return_some(,,,1) is called and we wait for pager_read_page to
+clear DC_REMAPPING. The purpose of this flag (DC_REMAPPING) is solely
+this: to forbid any use of this block until we are absolutely sure
+that this page contains exactly the wanted block. If NDEBUG is not
+defined (so we include debug code), flags of the blocks are checked if
+DC_REMAPPING is really cleared.
+
+Is DC_REMAPPING really needed? Is there possibility that between last
+"mutex_unlock (&disk_cache_lock)" and "return bptr" something could go
+wrong? Actually, disk cache just follows protocol set by
+pager_notify_pageout: that between pager_return_some and changing
+internal structures for the remapping no thread may touch the page.
+This is achieved by marking the page as DC_REMAPPING. For
+convenience, function disk_cache_wait_remapping is defined which waits
+for cached block while it's marked as DC_REMAPPING.
+
+XXX XXX: Actually, the sequence used in RC1 is: remap block and
+pager_return_some. The latter seems redundant, as only blocks that
+are evicted are candidates for remapping. I'll try to fix that for
+RC2.
+
+** Modifying blocks and pokels
+
+After block is modified, it should be registered with pokel_add to
+some pokel. Pokel contains list of ranges of cached blocks. All this
+blocks should have reference count at least 1. In pokel_flush and
+pokel_sync, this reference is consumed.
+
+So in pokel_add if added blocks are already in the pokel, their
+references are consumed, because only 1 reference is consumed in
+pokel_{sync,flush}. It's checked if pokel is for disk_cache, because
+pokels are used in file access too, where disk cache layer is not
+used.
+
+pokel_{flush,sync} both use _pokel_exec, so this is the place where
+block references are consumed. (XXX: In RC1, they are consumed
+always, but it's better to check if these pages are in disk_cache.
+Although calling disk_cache_block_deref on non-disk_cache page does no
+harm.)
+
+*** Indirect use of pokel_add
+
+record_global_poke and record_indir_poke use indirectly pokel_add.
+These functions are slightly changed to use public interface of
+disk_cache. Only new precondition is added for them: caller should
+supply "reference" that will be consumed later by pokel_{flush,sync}.
+
+*** Modifying block without using pokels
+
+sync_global_ptr synchronizes given block immediately. No reference is
+consumed. (XXX: This should be changed in RC2 to consuming reference.
+This will make the function similar in use to
+record_{global,indir}_poke and will make the code more nice-looking.)
+
+** Initialization
+
+*** The superblock
+
+To create disk cache, we need the block size of the filesystem. This
+information is in superblock, so we need to read superblock without
+using disk cache. For this purpose get_hypermetadata is changed to
+read the superblock with store_read instead of old bptr. New function
+map_hypermetadata is created that sets sblock global variable to point
+to the already mapped superblock. So to get behavior of old
+get_hypermetadata, first new get_hypermetadata should be called, and
+then map_hypermetadata.
+
+In ext2fs.c:main, instead of calling get_hypermetadata,
+map_hypermetadata is called. The call to get_hypermetadata is in
+pager.c:create_disk_pager.
+
+In ext2fs.c:diskfs_reload_global_state, along with get_hypermetada,
+map_hypermetadata is called.
+
+*** disk_cache
+
+Disk cache data structures are initialized in
+pager.c:create_disk_pager called from ext2fs.c:main. Disk pager is
+still initialized with diskfs_start_disk_pager, but due to block_size
+variable we call get_hypermetadata. Basic parameters of disk cache
+like disk_cache_blocks and disk_cache_size are initialized here. The
+rest of the initialization process is delegated to disk_cache_init.
+
+disk_cache_init initializes the rest of disk cache data structures:
+disk_cache_lock, disk_cache_remapping, disk_cache_bptr,
+disk_cache_info and disk_cache_hint. After that superblock and group
+descriptors are mapped into the cached and are marked as DC_FIXED.
+This forbids reusing those blocks, because Hurd's ext2 code relies on
+these blocks being mapped into fixed location in address space.
+
+** Pager callbacks
+
+disk_pager_read_page and disk_pager_write_page just use disk cache
+data structures to get the right pointers to blocks.
+disk_pager_read_page requests notification of page-out and updates
+DC_INCORE and DC_REMAPPING too. DC_INCORE is set and DC_REMAPPING is
+cleared (because reading the new block finishes its remapping).
+
+disk_pager_notify_pageout just clears DC_INCORE, making that page
+available for remapping.
+
+* libpager changes
+
+Here memory_object_data_ prefix is shorten to m_o_d_. And when it's
+talked about m_o_d_function Mach function, usually its libpager
+handler is meant.
+
+** Notification on eviction
+
+The most important change that is wanted from libpager is supporting
+notification when page is evicted. Mach already has partial support
+for notification on eviction by argument "kcopy" of m_o_d_return. If
+kcopy is 0, then Mach doesn't have copy of this page anymore, so the
+page is "evicted". The problem is that m_o_d_return is usually called
+only when page is modified, and if it's not modified, it's silently
+dropped.
+
+The solutions is marking page as "precious". This has the exact
+semantics we need: when page is evicted, m_o_d_return callback is
+always called with kcopy=0.
+
+*** Implementation details
+
+New argument is added to user callback pager_read_page:
+notify_on_pageout. If it's non-zero and the page is evicted, user
+callback pager_notify_pageout(pager,page) is called. This change ABI
+requires all libpager clients in the Hurd to be changed according to
+the new API.
+
+m_o_d_request stores notify_on_pageout as flag PM_NOTIFY_PAGEOUT.
+
+m_o_d_return no longer just skips non-dirty pages. Local array
+notified[] is build and at the end of the function,
+pager_notify_pageout is called for all pages that are evicted
+(kcopy=0).
+
+** Avoiding libpager optimization
+
+Unfortunately, there is one more problem, this time specific to
+libpager, not Mach. There is an optimization in m_o_d_request when
+page is being paged out. In the beginning of m_o_d_return, all pages
+being return are marked as PM_PAGINGOUT. This mark is cleared after
+m_o_d_supply (which supplies page content to Mach) is called. If
+m_o_d_request is called on page that is marked as PM_PAGINGOUT, this
+page is marked with PM_PAGEINWAIT, and m_o_d_supply inside
+m_o_d_return is not called for this page. This is possible because
+neither of these functions hold pager->interlock during the whole
+execution of function. This lock is temporarily unlocked during call
+to user callbacks pager_read_page and pager_write_page.
+
+So what is the implication of this optimization to our page eviction
+notification? When page is paged out, we get notified and we can
+decide to reuse it. After arranging disk_cache_info, etc, page is
+touched, but if this happens fast enough, the optimization is
+triggered and we get the old content! Reading the page is "optimized"
+and pager_read_page is not called, but instead the content of old
+block is used.
+
+This is solved by marking flushed and synced pages (via
+pager_{flush,sync}{,_some} with PM_FORCEREAD. (These functions call
+lock-object.c:_pager_lock_object which marks pages with PM_FORCEREAD
+if they are already marked with PM_NOTIFY_PAGEOUT.) In handling
+m_o_d_request, pages marked as PM_FORCEREAD are not optimized in this
+way. XXX: Currently, this fine-grained logic is disabled (with #if),
+as it needs more testing. Probably RC2 will use it. For now, all
+pages are considered PM_FORCEREAD and this particular optimization
+never happens.
+
+*** Technical details
+
+As said above, we need guarantee that after pager_{sync,flush}*,
+pager_read_page callback is called. The most convenient place to mark
+these pages as being forced to re-read is
+lock-object.c:_pager_lock_object, because this function is used by all
+pager_{sync,flush}* functions. So there we just mark page as
+PM_FORCEREAD if it's already marked as PM_NOTIFY_PAGEOUT.
+
+First, this mark influences behaviour of m_o_d_request. If page is
+marked with PM_FORCEREAD and PM_PAGINGOUT, then we set PM_PAGEINWAIT
+and wait until related m_o_d_return finishes (unmarks PM_PAGEINWAIT).
+Then we continue with pager_read_page, etc. If page is not marked
+with PM_FORCEREAD and is marked with PM_PAGINGOUT, then old logic is
+used and pager_read_page is not called (because m_o_d_return handler
+will call m_o_d_supply instead of us). (XXX: Again, this logic is
+inside #if 0. Currently, all pages are considered as marked with
+PM_FORCEREAD.)
+
+The other place where PM_FORCEREAD is taken into consideration is
+handler of m_o_d_return. The original code checks if page is marked
+with PM_PAGEINWAIT, and if it is, m_o_d_supply is called for the just
+written page. PM_PAGEINWAIT is used as "delegator" of the
+m_o_d_supply call to Mach.
+
+In patched libpager, there is one more condition for when to call
+m_o_d_supply. It's called when page is marked as PM_PAGEINWAIT and
+not marked as PM_FORCEREAD. If it's marked as PM_FORCEREAD, then we
+leave m_o_d_supply to m_o_d_request handler which gets notified by
+condition pager->wakeup.
diff --git a/hurd/translator/ext2fs/ogi-fosdem2005.mgp b/hurd/translator/ext2fs/ogi-fosdem2005.mgp
new file mode 100644
index 00000000..27b5077c
--- /dev/null
+++ b/hurd/translator/ext2fs/ogi-fosdem2005.mgp
@@ -0,0 +1,165 @@
+# "Supporting Larger ext2 File Systems in the Hurd"
+# Written by Ognyan Kulev for presentation at FOSDEM 2005.
+# Content of this file is in public domain.
+%include "default.mgp"
+%page
+%nodefault
+%center, font "thick", size 5
+
+
+
+
+Supporting Larger ext2 File Systems in the Hurd
+
+
+
+%font "standard", size 4
+Ognyan Kulev
+%size 3
+<ogi@fmi.uni-sofia.bg>
+
+
+%size 4
+FOSDEM 2005
+
+%page
+
+Need for supporting larger file systems
+
+ Active development during 1995-1997
+
+ Hurd 0.2 was released in 1997 and it was very buggy
+
+ Many bugs are fixed since then
+
+ The 2G limit for ext2 file systems becomes more and more annoying
+
+%page
+
+Timeline
+
+ 2002: Time for graduating, fixing the 2G limit in Hurd's ext2fs and implementing ext3fs were chosen for MSc thesis
+
+ 2003: First alfa quality patch
+
+ 2004: Graduation, ext2fs patch in Debian, but ext3fs is unstable
+
+%page
+
+User pager in GNU Mach
+
+ Address space
+ memory_object_data_supply
+ memory_object_data_return
+ Memory object (Mach concept)
+ pager_read_page
+ pager_write_page
+ User-supplied backstore (libpager concept)
+
+%page
+
+Current ext2fs
+
+ Memory mapping of the whole store
+
+ Applies only for metadata!
+
+ bptr (block -> data pointer)
+ = image pointer + block * block_size
+
+ Inode and group descriptor tables are used as if they are continous in memory
+
+%page
+
+Patched ext2fs, part one
+
+ Address space region
+ mapping
+ Array of buffers
+ association
+ Store
+
+ Association of buffers changes (reassocation)
+
+ It's important reassociation to occur on buffers that are not in core
+
+%page
+
+Patched ext2fs, part two
+
+ Always use buffer guarded by
+ disk_cache_block_ref (block -> buffer)
+ disk_cache_block_deref (release buffer)
+
+ Buffer = data + reference count + flags (e.g. INCORE)
+
+ Calling some functions implies releasing buffer:
+ pokel_add (pokels are list of dirty buffers)
+ record_global_poke (use pokel_add)
+ sync_global_ptr (sync immediately)
+ record_indir_poke (use pokel_add)
+
+ Use ihash for mapping block to buffer
+
+%page
+
+When unassociated block is requested
+
+
+%font "typewriter", size 4, cont
+retry:
+ i = hint;
+ while (buffers[i] is referenced or in core) {
+ i = (i + 1) % nbuffers;
+ if (i == hint) {
+ return_unreferenced_buffers ();
+ goto retry;
+ }
+ }
+ hint = i + 1;
+
+ deassociate (buffers[i]);
+ associate (buffers[i], block);
+
+ return buffers[i];
+
+%page
+
+Notification for evicted pages
+
+ Notification is essential for optimal reassociation
+
+ Precious pages in Mach
+
+ Slight change to API and ABI of libpager is required
+
+ Mach sometimes doesn't notify!
+
+%page
+
+Pager optimization
+
+1. Mach returns page to pager without leaving it in core
+
+2. Pager becomes unlocked because of calling callback pager_write_page
+
+3. User task touches the page
+
+4. Mach requests the same page from pager
+
+5. XXX Pager supplies the page that was returned by Mach, instead of calling callback pager_read_page
+
+%page
+
+Future directions
+
+ Committing in the Hurd :-)
+ Block sizes of 1K and 2K
+ Run-time option for buffer array size (?)
+ Compile-time option for memory-mapping the whole store
+ Upgrade of UFS
+ Extended attributes (EAs) and Access control lists (ACLs)
+
+# Local Variables:
+# mgp-options: "-g 640x480"
+# End:
diff --git a/hurd/translator/fatfs.mdwn b/hurd/translator/fatfs.mdwn
new file mode 100644
index 00000000..006fac0b
--- /dev/null
+++ b/hurd/translator/fatfs.mdwn
@@ -0,0 +1,13 @@
+[[!meta copyright="Copyright © 2007, 2008, 2009 Free Software Foundation,
+Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled
+[[GNU Free Documentation License|/fdl]]."]]"""]]
+
+The current `fatfs` translator is read-only, and it has a severe bug:
+[[!GNU_Savannah_bug 25961]].
diff --git a/hurd/translator/gopherfs.mdwn b/hurd/translator/gopherfs.mdwn
new file mode 100644
index 00000000..6c32430f
--- /dev/null
+++ b/hurd/translator/gopherfs.mdwn
@@ -0,0 +1,16 @@
+[[!meta copyright="Copyright © 2010 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled [[GNU Free Documentation
+License|/fdl]]."]]"""]]
+
+`gopherfs` is a virtual filesystem allowing you to access Gopher sites.
+
+
+# Source
+
+incubator, gopherfs/master
diff --git a/hurd/translator/hostmux.mdwn b/hurd/translator/hostmux.mdwn
new file mode 100644
index 00000000..5fab2dc5
--- /dev/null
+++ b/hurd/translator/hostmux.mdwn
@@ -0,0 +1,31 @@
+Multiplexes arbitrary host names, making access to many differnt host fast and easy.
+
+For each host accessed via a directory an new translator is started with the hostname as option. Say, /hostmuxdemo should let you access your favourite host with your translator mytranslatorfs.
+
+<code>**ls /hostmuxdemo/mybox/**</code> would give the result of mytranslatorfs applied to host mybox.
+
+## <a name="Usage"> Usage </a>
+
+Hostmux takes translator options as argument and (in the easiest case ) starts the translator with the given arguments and the hostname as the last argument.
+
+### <a name="ftpfs"> ftpfs </a>
+
+ftpfs is a good example, that is even very usefull. With hostmux and ftpfs you can access anonymous ftp via the filesystem, sparing out complicate use of a ftp client.
+
+We assume you want to access the ftp root at all servers. The example host is ftp.yourbox.com.
+
+Usermux is called via <code>**settrans -fgap /ftp /hurd/hostmux /hurd/ftpfs /**</code> .
+
+* <code>**-fg**</code> makes settrans try hard to remove an existing old translator from <code>**/ftp**</code>
+* <code>**ap**</code> sets an active translator (starts the translator) and a passive translator (stores translator information in the filesystem with which an active translator can be started on access of this node)
+* <code>**/ftp**</code> is where we want to set the translator
+* <code>**/hurd/hostmux**</code> is obviously our hostmux translator that will be started at <code>**/ftp**</code> and handle filesystem operations on <code>**/ftp**</code> and everything below (like <code>**/ftp/ftp.yourbox.com/pub/**</code>)
+* <code>**/hurd/ftpfs /**</code> is the argument to hostmux.
+
+When <code>**/ftp**</code> is accessed, the first directory is interpreted as hostname and a new translator is set up with the <code>**hostmux**</code> arguments:
+
+<code>**ls /ftp/ftp.yourhost.com/pub/**</code> lets hostmux start a new traslator <code>**/hurd/ftpfs / ftp.yourhost.com**</code> and serve it via <code>**/ftp/ftp.yourhos t.com/**</code> as directory. Subsequent the directory <code>**pub/**</code> on <code>**/ftp.yourhost.com/**</code> can be accessed via the new created translator.
+
+You can see the new created translator in the process list: <code>**ps ax | grep ftpsfs**</code> . You shoud see <code>**/hurd/ftpfs / ftp.yourhost.com**</code> .
+
+-- [[Main/PatrickStrasser]] - 13 Jul 2004
diff --git a/hurd/translator/magic.mdwn b/hurd/translator/magic.mdwn
new file mode 100644
index 00000000..06ee798b
--- /dev/null
+++ b/hurd/translator/magic.mdwn
@@ -0,0 +1,21 @@
+[[!meta copyright="Copyright © 2006, 2007, 2008 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled
+[[GNU Free Documentation License|/fdl]]."]]"""]]
+
+The magic translator provides `/dev/fd`.
+
+ $ showtrans /dev/fd
+ /hurd/magic --directory fd
+
+The `/dev/fd` directory holds the open file descriptors for your current
+process. You can't see them with `ls -l /dev/fd/` but you can see them
+individually like this:
+
+ $ ls -l /dev/fd/0
+ crw--w---- 1 bing tty 0, 0 Nov 19 18:00 /dev/fd/0
diff --git a/hurd/translator/mboxfs.mdwn b/hurd/translator/mboxfs.mdwn
new file mode 100644
index 00000000..e357294f
--- /dev/null
+++ b/hurd/translator/mboxfs.mdwn
@@ -0,0 +1,11 @@
+[[!meta copyright="Copyright © 2008 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled
+[[GNU Free Documentation License|/fdl]]."]]"""]]
+
+<http://www.nongnu.org/hurdextras/#mboxfs>
diff --git a/hurd/translator/netio.mdwn b/hurd/translator/netio.mdwn
new file mode 100644
index 00000000..aca9cd69
--- /dev/null
+++ b/hurd/translator/netio.mdwn
@@ -0,0 +1,17 @@
+[[!meta copyright="Copyright © 2010 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled [[GNU Free Documentation
+License|/fdl]]."]]"""]]
+
+`netio` is a translator designed for creating socket ports through the
+filesystem.
+
+
+# Source
+
+incubator, netio/master
diff --git a/hurd/translator/nsmux.mdwn b/hurd/translator/nsmux.mdwn
new file mode 100644
index 00000000..d156772b
--- /dev/null
+++ b/hurd/translator/nsmux.mdwn
@@ -0,0 +1,121 @@
+[[!meta copyright="Copyright © 2009 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled
+[[GNU Free Documentation License|/fdl]]."]]"""]]
+
+# nsmux
+
+`nsmux` implements the simplest use-case of namespace-based translator
+selection (see below).
+
+To use `nsmux` do the following:
+
+ $ settrans -a <node> nsmux <directory>
+
+After this operation `<node>` will be a mirror of `<directory>` with
+namespace-based translator selection functionality enabled.
+
+Please note that due to some details `nsmux` may complain a lot when
+run as a normal user. This matter is the most urgent on the TODO
+list.
+
+## Source
+
+`nsmux` translator can be obtained with the following series of
+commands:
+
+ $ git clone git://git.sv.gnu.org/hurd/incubator.git nsmux
+ $ cd nsmux/
+ $ git checkout -b nsmux origin/nsmux
+
+`filter` translator can be obtained with the following series of
+commands:
+
+ $ git clone git://git.sv.gnu.org/hurd/incubator.git filter
+ $ cd filter/
+ $ git checkout -b filter origin/filter
+
+The filter is not yet working.
+
+## Namespace-based Translator Selection
+
+Namespace-based translator selection is the special technique of using
+"magic" filenames for both accessing the file and setting translators
+on it.
+
+A "magic" filename is a filename which contains an unescaped sequence
+of two commas: ",,". This sequence can be escaped by adding another
+comma: ",,,". In the magic filename the part up to the first double
+commas is interpreted as the filename itself; the remaining segments
+into which the string is split by occurrences of ",," are treated as
+names of translators located under `/hurd/`.
+
+The simplest advantage before traditional way of setting
+translators is shown in the following examples. Compare this
+
+ $ settrans -a file translator1
+ $ settrans -a file translator2
+ $ cat file
+
+to this:
+
+ $ cat file,,translator1,,translator2
+
+One simple command versus three more lengthy ones is an obvious
+improvement. However, this advantage is not the only one and,
+probably, not even the most important.
+
+What is a good candidate for the most important advantage is that
+translators requested via "magic" filenames are session-bound. In
+other words, by running `cat file,,translator` we set a translator
+visible *only* to `cat`, while the original file remains untranslated.
+Such session-specific translators are called **dynamic** and there is
+no (theoretical) way for a client to get a port to a dynamic
+translator requested by another client.
+
+Obviously, dynamic translators can be stacked, similarly to static
+translators. Also, dynamic translator stacks may reside on top of
+static translator stacks.
+
+An important operation of namespace-based translator selection is
+*filtering*. Filtering basically consists in looking up a translator
+by name in the stack and ignoring translators located on top of it.
+Note that filtering does not mean dropping some translators: in the
+current implementation a filter is expected to be a normal dynamic
+translator, included in the dynamic translator stack similarly to
+other translators.
+
+An important detail is that filtering is not limited to dynamic
+translator stacks: a filter should be able to descend into static
+translator stacks as well.
+
+Although the concept of filtering may seem purely abstract in the
+simplest use-case of setting dynamic translators on top of files, the
+situation changes greatly when dynamic translator stacks on top of
+directories are considered. In this case, the implementation of
+namespace-based translator selection is expected to be able to
+propagate the dynamic translators associated with the directory down
+the directory structure. That is, all files located under a directory
+opened with magic syntax, are expected to be translated by the same
+set of translators. In this case having the possibility to
+specifically discard some of the translators set up on top of certain
+files is very useful.
+
+Note that the implementation of propagation of dynamic translators
+down directories is not fully conceived at the moment. The
+fundamental problem is distinguishing between situations when the
+dynamic translators are to be set on the underlying files of the
+directory or on the directory itself.
+
+## Currently Implemented
+
+Currently there a working (though not heavily tested) implementation
+of the simplest use-case of namespace-based translator selection in
+the form of translator `nsmux`. The filter is partially implemented
+and this is the immediate goal. Propagating translators down
+directories is the next objective.
diff --git a/hurd/translator/pfinet.mdwn b/hurd/translator/pfinet.mdwn
new file mode 100644
index 00000000..cbe50b48
--- /dev/null
+++ b/hurd/translator/pfinet.mdwn
@@ -0,0 +1,35 @@
+[[!meta copyright="Copyright © 2002, 2004, 2005, 2007, 2008 Free Software
+Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled
+[[GNU Free Documentation License|/fdl]]."]]"""]]
+
+To configure Internet connectivity, the `pfinet` (*Protocol Family Internet*)
+[[translator]] must be configured. This is done using the
+[[`settrans`|settrans]] command, for example like this:
+
+ # settrans -fgap /servers/socket/2 /hurd/pfinet ↩
+ -i eth0 -a 192.168.0.50 -g 192.168.0.1 -m 255.255.255.0
+
+The argument `/server/socket/2` is the node that the translator is to be
+attached to. This is followed by the translator program to run and any
+arguments to give it.
+
+There, `-i`, `-a`, `-g` and `-m` are, quite obviously, the (Mach) device to
+use, the IP address, the gateway and netmask.
+
+---
+
+To make DNS lookups work, you'll also have to properly configure the
+`/etc/resolv.conf` file, for example by copying it over from your GNU/Linux
+installation.
+
+---
+
+ * [[Implementation]].
+ * [[IPv6]].
diff --git a/hurd/translator/pfinet/implementation.mdwn b/hurd/translator/pfinet/implementation.mdwn
new file mode 100644
index 00000000..50b5dfc2
--- /dev/null
+++ b/hurd/translator/pfinet/implementation.mdwn
@@ -0,0 +1,13 @@
+[[!meta copyright="Copyright © 2000, 2008 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled
+[[GNU Free Documentation License|/fdl]]."]]"""]]
+
+The `pfinet` server is a hacked Linux internet implementation with a glue layer
+translating between the Hurd [[RPC]]s and the middle layer of the Linux
+implementation.
diff --git a/hurd/translator/pfinet/ipv6.mdwn b/hurd/translator/pfinet/ipv6.mdwn
new file mode 100644
index 00000000..5afee0c6
--- /dev/null
+++ b/hurd/translator/pfinet/ipv6.mdwn
@@ -0,0 +1,57 @@
+[[!meta copyright="Copyright © 2007, 2008, 2010 Free Software Foundation,
+Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled
+[[GNU Free Documentation License|/fdl]]."]]"""]]
+
+[[Stefan_Siegl|stesie]] has added IPv6 support to the pfinet [[translator]].
+This was [Savannah task #5470](http://savannah.gnu.org/task/?5470).
+
+
+# Implementation
+
+Because the IPv4 and IPv6 protocols are
+quite related to each other (think of mapped IPv4 addresses, etc.), there is no
+separate [[server|translator]] for IPv6 but support for the latter has been
+incorporated into the common pfinet. Unfortunately it's a little bit clumsy
+now to set the [[translator]] up, since it has to be bound to
+*/servers/socket/2* (like before) as well as */servers/socket/26* (for IPv6).
+
+To achieve this, you can tell pfinet to install [[active_translators|active]]
+on specified nodes, using **-4** and **-6** options. This is, you have to
+install a [[passive_translator|passive]] on */servers/socket/2* that also binds
+the IPv6 port and vice versa.
+
+
+# Examples
+
+Normal IPv4 network setup, address 192.168.7.23/24 and gateway 192.168.7.1.
+IPv6 address shall be assigned using IPv6 auto-configuration.
+
+ settrans -fgp /servers/socket/2 ↩
+ /hurd/pfinet -6 /servers/socket/26 ↩
+ -i eth0 -a 192.168.7.23 -m 255.255.255.0 -g 192.168.7.1
+ settrans -fgp /servers/socket/26 ↩
+ /hurd/pfinet -4 /servers/socket/2 ↩
+ -i eth0 -a 192.168.7.23 -m 255.255.255.0 -g 192.168.7.1
+
+Quite the same, but with static IPv6 address assignment:
+
+ settrans -fgp /servers/socket/2 ↩
+ /hurd/pfinet -6 /servers/socket/26 ↩
+ -i eth0 -a 192.168.7.23 -m 255.255.255.0 -g 192.168.7.1 ↩
+ -A 2001:4b88:10e4:0:216:3eff:feff:4223/64 -G 2001:4b88:10e4::1
+ settrans -fgp /servers/socket/26 ↩
+ /hurd/pfinet -4 /servers/socket/2 ↩
+ -i eth0 -a 192.168.7.23 -m 255.255.255.0 -g 192.168.7.1 ↩
+ -A 2001:4b88:10e4:0:216:3eff:feff:4223/64 -G 2001:4b88:10e4::1
+
+
+# Missing Functionality
+
+Amongst other things, support for [[IOCTL]]s is missing.
diff --git a/hurd/translator/pflocal.mdwn b/hurd/translator/pflocal.mdwn
new file mode 100644
index 00000000..dc2434dc
--- /dev/null
+++ b/hurd/translator/pflocal.mdwn
@@ -0,0 +1,13 @@
+[[!meta copyright="Copyright © 2000, 2008 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled
+[[GNU Free Documentation License|/fdl]]."]]"""]]
+
+The implementation of the `pflocal` server is in the `pflocal` directory, and
+uses [[`libpipe`|libpipe]] (shared code with the [[named_pipe|fifo]]
+implementation).
diff --git a/hurd/translator/procfs.mdwn b/hurd/translator/procfs.mdwn
new file mode 100644
index 00000000..404a6764
--- /dev/null
+++ b/hurd/translator/procfs.mdwn
@@ -0,0 +1,19 @@
+[[!meta copyright="Copyright © 2008 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled
+[[GNU Free Documentation License|/fdl]]."]]"""]]
+
+<http://www.nongnu.org/hurdextras/#procfs>
+
+ * [[`ps`|procps]]
+ * [[`top`|top]]
+ * [[`htop`|htop]]
+ * `gtop`
+ * [[`killall`|killall]]
+ * `pkill`
+ * ...
diff --git a/hurd/translator/procfs/htop.mdwn b/hurd/translator/procfs/htop.mdwn
new file mode 100644
index 00000000..ce38b92c
--- /dev/null
+++ b/hurd/translator/procfs/htop.mdwn
@@ -0,0 +1,25 @@
+[[!meta copyright="Copyright © 2008 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled
+[[GNU Free Documentation License|/fdl]]."]]"""]]
+
+ open("/proc/stat", O_RDONLY) = 3
+ open("/proc/meminfo", O_RDONLY) = 3
+ open("/proc/stat", O_RDONLY) = 3
+ open("/proc", O_RDONLY|O_NONBLOCK|O_LARGEFILE|O_DIRECTORY|0x80000) = 3
+ open("/proc/1/task", O_RDONLY|O_NONBLOCK|O_LARGEFILE|O_DIRECTORY|0x80000) = 4
+ open("/proc/1/status", O_RDONLY) = 4
+ open("/proc/1/statm", O_RDONLY) = 4
+ open("/proc/1/stat", O_RDONLY) = 4
+ open("/proc/1/cmdline", O_RDONLY) = 4
+ open("/proc/2/task", O_RDONLY|O_NONBLOCK|O_LARGEFILE|O_DIRECTORY|0x80000) = 4
+ open("/proc/2/status", O_RDONLY) = 4
+ open("/proc/2/statm", O_RDONLY) = 4
+ open("/proc/2/stat", O_RDONLY) = 4
+ open("/proc/2/cmdline", O_RDONLY) = 4
+ [...]
diff --git a/hurd/translator/procfs/killall.mdwn b/hurd/translator/procfs/killall.mdwn
new file mode 100644
index 00000000..3d31b51a
--- /dev/null
+++ b/hurd/translator/procfs/killall.mdwn
@@ -0,0 +1,23 @@
+[[!meta copyright="Copyright © 2008 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled
+[[GNU Free Documentation License|/fdl]]."]]"""]]
+
+ open("/proc/stat", O_RDONLY) = 3
+ open("/proc/self/stat", O_RDONLY) = 3
+ open("/proc/uptime", O_RDONLY) = 3
+ open("/proc/sys/kernel/pid_max", O_RDONLY) = 4
+ open("/proc/meminfo", O_RDONLY) = 4
+ open("/proc", O_RDONLY|O_NONBLOCK|O_LARGEFILE|O_DIRECTORY|0x80000) = 5
+ open("/proc/1/stat", O_RDONLY) = 6
+ open("/proc/1/status", O_RDONLY) = 6
+ open("/proc/1/cmdline", O_RDONLY) = 6
+ open("/proc/2/stat", O_RDONLY) = 6
+ open("/proc/2/status", O_RDONLY) = 6
+ open("/proc/2/cmdline", O_RDONLY) = 6
+ [...]
diff --git a/hurd/translator/procfs/procps.mdwn b/hurd/translator/procfs/procps.mdwn
new file mode 100644
index 00000000..3d31b51a
--- /dev/null
+++ b/hurd/translator/procfs/procps.mdwn
@@ -0,0 +1,23 @@
+[[!meta copyright="Copyright © 2008 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled
+[[GNU Free Documentation License|/fdl]]."]]"""]]
+
+ open("/proc/stat", O_RDONLY) = 3
+ open("/proc/self/stat", O_RDONLY) = 3
+ open("/proc/uptime", O_RDONLY) = 3
+ open("/proc/sys/kernel/pid_max", O_RDONLY) = 4
+ open("/proc/meminfo", O_RDONLY) = 4
+ open("/proc", O_RDONLY|O_NONBLOCK|O_LARGEFILE|O_DIRECTORY|0x80000) = 5
+ open("/proc/1/stat", O_RDONLY) = 6
+ open("/proc/1/status", O_RDONLY) = 6
+ open("/proc/1/cmdline", O_RDONLY) = 6
+ open("/proc/2/stat", O_RDONLY) = 6
+ open("/proc/2/status", O_RDONLY) = 6
+ open("/proc/2/cmdline", O_RDONLY) = 6
+ [...]
diff --git a/hurd/translator/procfs/top.mdwn b/hurd/translator/procfs/top.mdwn
new file mode 100644
index 00000000..2cba78ad
--- /dev/null
+++ b/hurd/translator/procfs/top.mdwn
@@ -0,0 +1,18 @@
+[[!meta copyright="Copyright © 2008 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled
+[[GNU Free Documentation License|/fdl]]."]]"""]]
+
+ open("/proc/stat", O_RDONLY) = 3
+ open("/proc/sys/kernel/pid_max", O_RDONLY) = 3
+ open("/proc", O_RDONLY|O_NONBLOCK|O_LARGEFILE|O_DIRECTORY|0x80000) = 3
+ open("/proc/1/stat", O_RDONLY) = 4
+ open("/proc/1/statm", O_RDONLY) = 4
+ open("/proc/2/stat", O_RDONLY) = 4
+ open("/proc/2/statm", O_RDONLY) = 4
+ [...]
diff --git a/hurd/translator/random.mdwn b/hurd/translator/random.mdwn
new file mode 100644
index 00000000..afb76953
--- /dev/null
+++ b/hurd/translator/random.mdwn
@@ -0,0 +1,70 @@
+[Savannah task #5130: random translator](http://savannah.gnu.org/task/?5130)
+
+See the attached [[mbox.bz2]] containing all the emails concerning this topic
+which I was able to gather from public archives. (!) This is not up-to-date
+anymore, as [[MichaelCasadevall]] is currently working on this.
+
+# Description
+
+Sources of entropy are for example disk access latencies or keystroke patterns
+or behavior on networks. This suggests that for implementing a random
+translator a kernel part is needed as well, to gather that entropy. That
+kernel part would then export the gathered entropy via a kernel device, named
+perhaps `entropy`.
+
+# Setup Pseudo Random Devices
+
+Stuck getting SSH to work? You need a pseudo random generator (PRG).
+
+There are several solutions to the lack of `/dev/random` and `/dev/urandom`,
+but they are not yet in the default installation.
+
+* Marcus' work can be downloaded at
+ [random.tar.gz](ftp://alpha.gnu.org/gnu/hurd/contrib/marcus/random.tar.gz).
+ (Identical to <http://kilobug.free.fr/hurd/random-64.tar.gz>?)
+ * [A patch](http://mail.gnu.org/pipermail/bug-hurd/2002-August/010248.html)
+ that was probably already incorporated from August 14, 2002.
+ * Clemmitt Sigler [reported
+ success](http://lists.gnu.org/archive/html/help-hurd/2002-10/msg00076.html)
+ October 11, 2002 and Marcus [described
+ some](http://lists.gnu.org/archive/html/help-hurd/2002-10/msg00081.html) of
+ the internals.
+
+* [Entropy Gathering Daemon](http://egd.sourceforge.net/).
+ * [request for packaging](http://bugs.debian.org/145498).
+
+* [OSKit Entropy
+ Patch](http://lists.gnu.org/archive/html/bug-hurd/2003-01/msg00000.html) from
+ Derek Davies - Jan 2003.
+ * See also [this page](http://www.ddavies.net/oskit-entropy/).
+ * Note that this patch can (and should) be used with this [OSKit NIC
+ patch](ftp://flux.cs.utah.edu/flux/oskit/mail/html/oskit-users/msg01570.html).
+
+* [Sune Kirkeby's incomplete port of the Linux /dev/\{,u\}random device
+ driver](http://ibofobi.dk/stuff/hurd-entropy/)
+ * [The files](http://download.ibofobi.dk/hurd-entropy/), including a [patch
+ for GNU
+ Mach](http://download.ibofobi.dk/hurd-entropy/gnumach-entropy.diff.bz2).
+
+* Quick and dirty way:
+
+ sudo cp /bin/bash /dev/random
+ sudo ln -s random /dev/urandom
+
+---
+
+# Setup Tips
+
+Here are some tips on how to actually setup the two random devices using
+Kilobugs' [random-64 server](http://kilobug.free.fr/hurd/random-64.tar.gz).
+His tarball is a complete Hurd server including a pre-built binary - so you
+don't need GCC or magic fingers for this! :)
+
+After untaring the package you copy the random binary to the `/hurd`
+directory. Then you setup the translators for random and urandom.
+
+ # settrans -c /dev/random /hurd/random \
+ --seed-file /var/run/random-seed --secure
+ # settrans -c /dev/urandom /hurd/random \
+ --seed-file /var/run/urandom-seed --fast
+ # chmod 0644 /dev/random /dev/urandom
diff --git a/hurd/translator/random/mbox.bz2 b/hurd/translator/random/mbox.bz2
new file mode 100644
index 00000000..a9a4d4a6
--- /dev/null
+++ b/hurd/translator/random/mbox.bz2
Binary files differ
diff --git a/hurd/translator/short-circuiting.mdwn b/hurd/translator/short-circuiting.mdwn
new file mode 100644
index 00000000..9de9f7b8
--- /dev/null
+++ b/hurd/translator/short-circuiting.mdwn
@@ -0,0 +1,88 @@
+[[!meta copyright="Copyright © 2009 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled
+[[GNU Free Documentation License|/fdl]]."]]"""]]
+
+In traditional [[Unix]], file systems contain special files. These are:
+symbolic links, character devices, block devices, named pipes, and
+named sockets. Naturally the Hurd also support these.
+
+However, if you take a look at `hurd/io.defs` and `hurd/fs.defs`, you'll
+find that there are no [[RPC]]s that deal specifically with these types.
+Sure, you can get the type of the file through `io_stat` (among other
+things), but there are none that e.g. lets you create a symbolic link.
+
+If you take a look at how [[glibc]] implements `symlink`, you'll notice
+that all it does is create a new file and set its passive translator to
+`/hurd/symlink DEST`. You can verify this yourself by creating a symlink
+with `ln -s foo bar` and print its passive translator setting with `showtrans
+bar`.
+
+This is how the other special files are implemented as well. The header
+`hurd/paths.h` contains a list of paths that are used to implement
+special files:
+
+ * `/hurd/symlink`
+ * `/hurd/chrdev`
+ * `/hurd/blkdev`
+ * `/hurd/fifo`
+ * `/hurd/ifsock`
+
+So all special files are implemented through special-purpose translators,
+right? Not quite, instead the translators of this list are often
+implemented in their underlying filesystem through *translator
+short-circuiting*. In fact, `chrdev` and `blkdev` aren't even implemented
+as translators at all.
+
+Translator short-circuiting is when a file system server implements the
+functionality of a passive translator itself, instead of actually starting
+it. For instance, all the [[`symlink`|symlink]] translator does is return
+a `FS_RETRY_*` reply to the caller. So instead of starting it, the file
+system server can simply continue the file name look-up internally by
+appending the target of the symbolic link to the path being looked-up.
+
+This way, we can skip starting the `symlink` translator, skip retrying
+the look-up on the newly started translator, and we might also skip a
+retry to the same file system server again, if the target of the symbolic
+link is in it.
+
+In fact, the list's translators that actually are implemented (`symlink`,
+`fifo`, `ifsock`) are only used as a default implementation if the underlying
+file system's translator does not implement the functionality itself, i.e., if
+it doesn't short-circuit it.
+
+To make sure that you use one of these translators, there by bypassing the
+short-circuiting mechanism, you can either start it as
+an active translator, or use a different path from the one in
+`hurd/path.h`, e.g. `settrans bar /hurd/./symlink foo`.
+
+The best example of how short-circuiting is implemented can be found
+in [[`libdiskfs`|libdiskfs]]. Notice how it detects if a translator to store
+is a special file in `diskfs_S_file_set_translator` and instead
+of storing a real passive translator setting on the disk, stores it as a
+symlink node (using `diskfs_create_symlink_hook` or a generic implementation).
+
+In later look-ups to the node, it checks the node's `stat` structure in
+`diskfs_S_file_get_translator`, or
+`diskfs_S_dir_lookup` and handles special file types appropriately.
+
+Doing this translator short-circuiting has disadvantages: code duplication, or
+in general adding code complexity that isn't needed for implementing the same
+functionality, but it also has advantages: using functionality that the file
+system's data structures nevertheless already provide -- storing symbolic links
+in `ext2fs`' inodes instead of storing passive translator settings -- and thus
+staying compatible with other operating systems mounting that file system.
+
+Also, this short-circuiting does preserve system resources, as it's no longer
+required to start a `symlink` translator for resolving each symbolic link, as
+well as it does reduce the [[RPC]] overhead.
+
+It can also confuse users who expect the passive translator to start.
+For instance, if a user notices that [[`symlink`|symlink]]'s code is
+lacking some functionality, but that it unexpectedly works when the user
+tries to run it.
diff --git a/hurd/translator/storeio.mdwn b/hurd/translator/storeio.mdwn
new file mode 100644
index 00000000..8e26a959
--- /dev/null
+++ b/hurd/translator/storeio.mdwn
@@ -0,0 +1,30 @@
+[[!meta copyright="Copyright © 2007, 2008 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled
+[[GNU Free Documentation License|/fdl]]."]]"""]]
+
+`storeio` is a *translator for devices and other stores*.
+
+It relies heavily on [[libstore]].
+
+
+# Examples
+
+You can make a file's content available as some block device (where `foo` is
+the name of the file to map):
+
+ settrans -ca node /hurd/storeio -T file foo
+
+You can even `ungzip` files on the fly (`bunzip2` is available as well):
+
+ settrans -ca node /hurd/storeio -T gunzip foo.gz
+
+You can use the *typed store*, to create filter chains (of course this example
+is kind of useless since you could use the `gunzip` store directly):
+
+ settrans -ca node /hurd/storeio -T typed gunzip:file:foo.gz
diff --git a/hurd/translator/stowfs.mdwn b/hurd/translator/stowfs.mdwn
new file mode 100644
index 00000000..9c88f1a3
--- /dev/null
+++ b/hurd/translator/stowfs.mdwn
@@ -0,0 +1,11 @@
+[[!meta copyright="Copyright © 2008 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled
+[[GNU Free Documentation License|/fdl]]."]]"""]]
+
+[[!meta redir=unionfs#stowfs]]
diff --git a/hurd/translator/tarfs.mdwn b/hurd/translator/tarfs.mdwn
new file mode 100644
index 00000000..e25e3255
--- /dev/null
+++ b/hurd/translator/tarfs.mdwn
@@ -0,0 +1,25 @@
+[[!meta copyright="Copyright © 2010 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled [[GNU Free Documentation
+License|/fdl]]."]]"""]]
+
+`tarfs` is a translator aimed at providing access to tar files through the
+filesystem. This way you don't have to extract files from the archive to
+access them. It supports compressed archives (bzip2 and gzip) through
+[[libstore]].
+
+
+# Status
+
+Works fine on most cases, occasional corruptions when writing using bzip2/gzip
+stores.
+
+
+# Source
+
+incubator, tarfs/master
diff --git a/hurd/translator/tmpfs.mdwn b/hurd/translator/tmpfs.mdwn
new file mode 100644
index 00000000..0179ad6c
--- /dev/null
+++ b/hurd/translator/tmpfs.mdwn
@@ -0,0 +1,29 @@
+[[!meta copyright="Copyright © 2007, 2008, 2009 Free Software Foundation,
+Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled
+[[GNU Free Documentation License|/fdl]]."]]"""]]
+
+`tmpfs` is a file system server for temporary data storage without using a real
+(permanent) [[backing_store]].
+
+It is based on [[libdiskfs]].
+
+Even though there are other possibilities of creating a
+[[libstore/examples/ramdisk]] and running a regular, for example, [[`ext2` file
+system|ext2fs]] on it, having a real `tmpfs` is better, as it need not deal
+with the additional block-level indirection layer that `ext2` (or any other
+disk-based file system) imposes.
+
+However, `tmpfs` is not working correctly at the moment:
+
+[[!inline
+pages="hurd/translator/tmpfs/*"
+show=0
+feeds=no
+actions=yes]]
diff --git a/hurd/translator/tmpfs/notes_bing.mdwn b/hurd/translator/tmpfs/notes_bing.mdwn
new file mode 100644
index 00000000..fa3eeac2
--- /dev/null
+++ b/hurd/translator/tmpfs/notes_bing.mdwn
@@ -0,0 +1,101 @@
+[[!meta copyright="Copyright © 2007, 2008, 2009 Free Software Foundation,
+Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled
+[[GNU Free Documentation License|/fdl]]."]]"""]]
+
+[[!tag open_issue_hurd]]
+
+1. to run tmpfs as a regular user, /servers/default-pager must be executable by
+ that user. by default it seems to be set to read/write.
+
+ $ sudo chmod ugo+x /servers/default-pager
+
+ Then I get this error:
+
+ tmpfs: /build/mbanck/hurd-20060825/build-tree/hurd/tmpfs/dir.c:62: diskfs_get_directs: Assertion `__builtin_offsetof (struct tmpfs_dirent, name) >= __builtin_offsetof (struct dirent, d_name)' failed.
+
+2. i rearranged the struct tempfs_dirent in tmpfs.h to line up with the struct
+ dirent. now the assert passes at line 62 of dir.c passes.
+
+ struct tmpfs_dirent
+ {
+ struct tmpfs_dirent *next;
+ struct disknode *dn;
+ + char padding[3];
+ uint8_t namelen;
+ char name[0];
+ };
+
+ now ls works on an empty directory.
+ you can touch files, and run `ls' on them.
+ mkdir, rmdir works too.
+ fsysopts works
+ df works
+
+3. creating a symlink fails.
+
+ old patch to get symlinks working:
+
+ http://www.mail-archive.com/bug-hurd@gnu.org/msg11844.html
+ --- node.c.orig 2005-07-24 09:56:39.000000000 -0400
+ +++ node.c 2005-07-24 09:55:46.000000000 -0400
+ @@ -330,6 +330,7 @@
+ create_symlink_hook (struct node *np, const char *target)
+ {
+ assert (np->dn->u.lnk == 0);
+ + np->dn_stat.st_size = strlen (target);
+ if (np->dn_stat.st_size > 0)
+ {
+ const size_t size = np->dn_stat.st_size + 1;
+ @@ -337,6 +338,7 @@
+ if (np->dn->u.lnk == 0)
+ return ENOSPC;
+ memcpy (np->dn->u.lnk, target, size);
+ + np->dn->type = DT_LNK;
+ adjust_used (size);
+ recompute_blocks (np);
+ }
+ @@ -380,8 +382,6 @@
+ error_t
+ diskfs_truncate (struct node *np, off_t size)
+ {
+ - if (np->allocsize <= size)
+ - return 0;
+
+ if (np->dn->type == DT_LNK)
+ {
+ @@ -392,6 +392,9 @@
+ return 0;
+ }
+
+ + if (np->allocsize <= size)
+ + return 0;
+ +
+ assert (np->dn->type == DT_REG);
+
+ if (default_pager == MACH_PORT_NULL)
+
+ now symlinks work.
+
+4. can't write data to a file
+
+
+---
+
+miscellaneous notes:
+
+`diskfs_disk_name` could be `NULL`, but it is `"swap"`
+
+using `default_pager_object_set_size (np->dn->u.reg.memobj, size);` to truncate and grow.
+
+why are our blocks 512? shouldn't it something else? or at least settable?
+or does [[libdiskfs]] demand this?
+
+`diskfs_get_filemap_pager_struct (struct node *np)` returns null.
+shouldn't it return `default_pager`?
diff --git a/hurd/translator/tmpfs/notes_various.mdwn b/hurd/translator/tmpfs/notes_various.mdwn
new file mode 100644
index 00000000..5e4e991c
--- /dev/null
+++ b/hurd/translator/tmpfs/notes_various.mdwn
@@ -0,0 +1,218 @@
+[[!meta copyright="Copyright © 2005, 2006, 2007, 2008, 2009 Free Software
+Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled
+[[GNU Free Documentation License|/fdl]]."]]"""]]
+
+[[!tag open_issue_hurd]]
+
+ <antrik> hde: what's the status on tmpfs?
+ <hde> Broke
+ <hde> k0ro traced the errors like the assert show above to a pager problem.
+ See the pager cannot handle request from multiple ports and tmpfs sends
+ request using two differ ports, so to fix it the pager needs to be hacked
+ to support multiple requests.
+ <hde> You can enable debugging in the pager by changing a line from dprintf
+ to ddprintf I can tell you how if you want.
+ <antrik> and changing tmpfs to use a single port isn't possible?...
+ <hde> antrik, I am not sure.
+ <hde> IIRC k0ro was saying it cannot be changed and I cannot recall his
+ reasons why.
+ <sdschulze> antrik: Doing it the quick&dirty way, I'd just use an N-ary
+ tree for representing the directory structure and mmap one new page (or
+ more) for each file.
+ <hde> sdschulze, What are you talking about?
+ <sdschulze> hde: about how I would implement tmpfs
+ <hde> O
+ <azeem> sdschulze: you don't need to reimplement it, just fix it :)
+ <sdschulze> azeem: Well, it seems a bit more difficult than I considered.
+ <sdschulze> I had assumed it was implemented the way I described.
+ <hde> O and the assert above gets triggered if you don't have a
+ default-pager setup on /servers/default-pager
+ <hde> the dir.c:62 assert that is.
+ <azeem> hde: you sure? I think I have one
+ <hde> I am almost sure.
+ <azeem> mbanck@beethoven:~$ showtrans /servers/default-pager
+ <azeem> /hurd/proxy-defpager
+ <azeem> isn't that enough?
+ <hde> It is suppose to be.
+ <hde> Try it as root
+ <hde> I was experiecing alot of bugs as a normal user, but according to
+ marcus it is suppose to work as root, but I was getting alot of hangs.
+ <azeem> hde: same issue, sudo doesn't work
+ <hde> sucky, well then there are alot of bugs. =)
+ <azeem> eh, no
+ <azeem> I still get the dir.c assert
+ <sdschulze> me too
+ <sdschulze> Without it, I already get an error message trying to set tmpfs
+ as an active translator.
+
+---
+
+ <hde> I think I found the colprit.
+ <hde> default_pager_object_set_size --> This is were tmpfs is hanging.
+ <hde> mmm Hangs on the message to the default-pager.
+
+---
+
+ <hde> Well it looks like tmpfs is sending a message to the default-pager,
+ the default-pager then receives the message and, checks the seqno. I
+ checked the mig gen code and noticed that the seqno is the reply port, it
+ this does not check out then the default pager is put into a what it
+ seems infinte condition_wait hoping to get the correct seqno.
+ <hde> Now I am figuring out how to fix it, and debugging some more.
+
+---
+
+ <marco_g> hde: Still working on tmpfs?
+ <hde> Yea
+ <marco_g> Did you fix a lot already?
+ <hde> No, just trying to narrow down the reason why we cannot write file
+ greater then 4.5K.
+ <marco_g> ahh
+ <marco_g> What did you figure out so far?
+ <hde> I used the quick marcus fix for the reading assert.
+ <marco_g> reading assert?
+ <hde> Yea you know ls asserted.
+ <marco_g> oh? :)
+ <hde> Because, the offsets changed in sturct dirent in libc.
+ <hde> They added 64 bit checks.
+ <hde> So marcus suggested a while ago on bug-hurd to just add some padding
+ arrays to the struct tmpfs_dirent.
+ <hde> And low and behold it works.
+ <marco_g> Oh, that fix.
+ <hde> Yup
+ <hde> marco_g, I have figured out that tmpfs sends a message to the
+ default-pager, the default-pager does receive the message, but then
+ checks the seqno(The reply port) and if it is not the same as the
+ default-pagers structure->seqno then she waits hoping to get the correct
+ one. Unfortantly it puts the pager into a infinite lock and never come
+ out of it.
+ <marco_g> hde: That sucks...
+ <marco_g> But at least you know what the problem is.
+ <hde> marco_g, Yea, now I am figuring out how to fix it.
+ <hde> Which requires more debugging lol.
+ <hde> There is also another bug, default_pager_object_set_size in
+ <hde> mach-defpager does never return when called and makes tmpfs hang. I
+ <hde> will have a closer look at this later this week.
+
+---
+
+ <hde> Cool, now that I have two pagers running, hopefully I will have less
+ system crashes.
+ <marcus> running more than one pager sounds like trouble to me, but maybe
+ hde means something different than I think
+ <hde> Well the other pager is only for tmpfs to use.
+ <hde> So I can debug the pager without messing with the entire system.
+ <hde> marcus, I am trying ti figure out why diskfs_object_set_size waits
+ forever. This way when the pager becomes locked forever I can turn it
+ off and restart it. When I was doing this with only one mach-defpager
+ running the system would crash.
+ <marcus> hde: how were you able to start two default pagers??
+ <hde> Well you most likely will not think my way of doing it was correct,
+ and I am also not sure if it is lol. I made my hacked version not stop
+ working if one is alreay started.
+
+---
+
+ <hde> See, the default-pager has a function called
+ default_pager_object_set_size this sets the size for a memory object,
+ well it checks the seqno for each object if it is wrong it goes into a
+ condition_wait, and waits for another thread to give it a correct seqno,
+ well this never happens.
+ <hde> Thus, you get a hung tmpfs and default-pager.
+ <hde> pager_memcpy (pager=0x0, memobj=33, offset=4096, other=0x20740,
+ size=0x129df54, prot=3) at pager-memcpy.c:43
+ <hde> bddebian, See the problem?
+ <bddebian> pager=0x0?
+ <hde> Yup
+ <hde> Now wtf is the deal, I must debug.
+ <hde> -- Function: struct pager * diskfs_get_filemap_pager_struct
+ <hde> (struct node *NP)
+ <hde> Return a `struct pager *' that refers to the pager returned by
+ <hde> diskfs_get_filemap for locked node NP, suitable for use as an
+ <hde> argument to `pager_memcpy'.
+ <hde> That is failing.
+ <hde> If it is not one thing it is another.
+ <bddebian> All of Mach fails ;-)
+ <hde> It is alot of work to make a test program that uses libdiskfs.
+
+---
+
+ <bing> to run tmpfs as a regular user, /servers/default-pager must be
+ executable by that user. by default it seems to be set to read/write.
+ <bing> $ sudo chmod ugo+x /servers/default-pager
+ <bing> you can see the O_EXEC in tmpfs.c
+ <bing> maybe this is just a debian packaging problem
+ <bing> it's probably a fix to native-install i'd guess
+
+---
+
+ <bing> tmpfs is failing on default_pager_object_create with -308, which
+ means server died
+ <bing> i'm running it as a regular user, so it gets it's pager from
+ /servers/default-pager
+ <bing> and showtrans /servers/default-pager shows /hurd/proxy-defpager
+ <bing> so i'm guessing that's the server that died
+
+---
+
+ <bing> this is about /hurd/tmpfs
+ <bing> a filesystem in memory
+ <bing> such that each file is it's own memory object
+ <andar> what does that mean exactly? it differs from a "ramdisk"?
+ <bing> instead of the whole fs being a memory object
+ <andar> it only allocates memory as needed?
+ <bing> each file is it's own
+ <bing> andar: yeah
+ <bing> it's not ext2 or anything
+ <andar> yea
+ <bing> it's tmpfs :-)
+ <bing> first off, echo "this" > that
+ <bing> fails
+ <bing> with a hang
+ <bing> on default_pager_object_create
+ <andar> so writing to the memory object fails
+ <bing> well, it's on the create
+ <andar> ah
+ <bing> and it returns -308
+ <bing> which is server died
+ <bing> in mig-speak
+ <bing> but if i run it as root
+ <bing> things behave differently
+ <bing> it gets passed the create
+ <bing> but then i don't know what
+ <bing> i want to make it work for the regular user
+ <bing> it doesn't work as root either, it hangs elsewhere
+ <andar> but it at least creates the memory object
+ <bing> that's the braindump
+ <bing> but it's great for symlinks!
+ <andar> do you know if it creates it?
+ <bing> i could do stowfs in it
+
+---
+
+ <antrik> bing: k0ro (I think) analized the tmpfs problem some two years ago
+ or so, remember?...
+ <antrik> it turns out that it broke due to some change in other stuff
+ (glibc I think)
+ <antrik> problem was something like getting RPCs to same port from two
+ different sources or so
+ <antrik> and the fix to that is non-trivial
+ <antrik> I don't remember in what situations it broke exactly, maybe when
+ writing larger files?
+ <bing> antrik: yeah i never understood the explanation
+ <bing> antrik: right now it doesn't write any files
+ <bing> the change in glibc was to struct dirent
+ <antrik> seems something more broke in the meantime :-(
+ <antrik> ah, right... but I the main problem was some other change
+ <antrik> (or maybe it never really worked, not sure anymore)
+
+---
+
+[[!GNU_Savannah_bug 26751]]
diff --git a/hurd/translator/tmpfs/tmpfs_vs_defpager.mdwn b/hurd/translator/tmpfs/tmpfs_vs_defpager.mdwn
new file mode 100644
index 00000000..ef041a23
--- /dev/null
+++ b/hurd/translator/tmpfs/tmpfs_vs_defpager.mdwn
@@ -0,0 +1,73 @@
+[[!meta copyright="Copyright © 2010 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled [[GNU Free Documentation
+License|/fdl]]."]]"""]]
+
+\#hurd, freenode, 2010
+
+ <slpz> humm... why does tmpfs try to use the default pager? that's a bad idea, and probably will never work correctly...
+ * slpz is thinking about old issues
+ <slpz> tmpfs should create its own pagers, just like ext2fs, storeio...
+ <slpz> slopez@slp-hurd:~$ settrans -a tmp /hurd/tmpfs 10M
+ <slpz> slopez@slp-hurd:~$ echo "foo" > tmp/bar
+ <slpz> slopez@slp-hurd:~$ cat tmp/bar
+ <slpz> foo
+ <slpz> slopez@slp-hurd:~$
+ <slpz> :-)
+ <pochu> slpz: woo you fixed it?
+ <slpz> pochu: well, it's WIP, but reading/writing works...
+ <slpz> I've replaced the use of default pager for the standard pager creation mechanism
+ <antrik> slpz: err... how is it supposed to use swap space if not using the default pager?
+ <antrik> slpz: or do you mean that it should act as a proxy, just allocating anonymous memory (backed by the default pager) itself?
+ <youpi> antrik: the kernel uses the default pager if the application pager isn't responsive enough
+ <slpz> antrik: it will just create memory objects and provide zerofilled pages when requested by the kernel (after a page fault)
+ <antrik> youpi: that makes sense I guess... but how is that relevant to the question at hand?...
+ <slpz> antrik: memory objects will contain the data by themselves
+ <slpz> antrik: as youpi said, when memory is scarce, GNU Mach will start paging out data from memory objects to the default pager
+ <slpz> antrik: that's the way in which pages will get into swap space
+ <slpz> (if needed)
+ <youpi> the thing being that the tmpfs pager has a chance to select pages he doesn't care any more about
+ <antrik> slpz: well, the point is that instead of writing the pages to a backing store, tmpfs will just keep them in anonymous memory, and let the default pager write them out when there is pressure, right?
+ <antrik> youpi: no idea what you are talking about. apparently I still don't really understand this stuff :-(
+ <youpi> ah, but tmpfs doesn't have pages he doesn't care about, does it?
+ <slpz> antrik: yes, but the term "anonymous memory" could be a bit confusing.
+ <slpz> antrik: in GNU Mach, anonymous memory is backed by a memory object without a pager. In tmpfs, nodes will be allocated in memory objects, and the pager for those memory objects will be tmpfs itself
+ <antrik> slpz: hm... I thought anynymous memory is backed by memory objects created from the default pager?
+ <antrik> yes, I understand that tmpfs is supposed to be the pager for the objects it provides. they are obviously not anonymoust -- they have inodes in the tmpfs name space
+ <antrik> but my understanding so far was that when Mach returns pages to the pager, they end up in anonymous memory allocated to the pager process; and then this pager is responsible for writing them back to the actual backing store
+ <antrik> am I totally off there?...
+ <antrik> (i.e. in my understanding the returned pages do not reside in the actual memory object the pager provides, but in an anonymous memory object)
+ <slpz> antrik: you're right. The trick here is, when does Mach return the pages?
+ <slpz> antrik: if we set the attribute "can_persist" in a memory object, Mach will keep it until object cache is full or memory is scarce
+ <slpz> or we change the attributes so it can no longer persist, of course
+ <slpz> without a backing store, if Mach starts sending us pages to be written, we're in trouble
+ <slpz> so we must do something about it. One option, could be creating another pager and copying the contents between objects.
+ <antrik> another pager? not sure what you mean
+ <antrik> BTW, you didn't really say why we can't use the default pager for tmpfs objects :-)
+ <slpz> well, there're two problems when using the default pager as backing store for translators
+ <slpz> 1) Mach relies on it to do swapping tasks, so meddling with it is not a good idea
+ <slpz> 2) There're problems with seqnos when trying to work with the default pager from tasks other the kernel itself
+ <slpz> (probably, the latter could be fixed)
+ <slpz> antrik: pager's terminology is a bit confusing. One can also say creating another memory object (though the function in libpager is "pager_create")
+ <antrik> not sure why "meddling" with it would be a problem...
+ <antrik> and yeah, I was vaguely aware that there is some seqno problem with tmpfs... though so far I didn't really understand what it was about :-)
+ <antrik> makes sense now
+ <antrik> anyways, AIUI now you are trying to come up with a mechanism where the default pager is not used for tmpfs objects directly, but without making it inefficient?
+ <antrik> slpz: still don't understand what you mean by creating another memory object/pager...
+ <antrik> (and yeat, the terminology is pretty mixed up even in Mach itself)
+ <slpz> antrik: I meant creating another pager, in terms of calling again to libpager's pager_create
+ <antrik> slpz: well, I understand what "create another pager" means... I just don't understand what this other pager would be, when you would create it, and what for...
+ <slpz> antrik: oh, ok, sorry
+ <slpz> antrik: creating another pager it's just a trick to avoid losing information when Mach's objects cache is full, and it decides to purge one of our objects
+ <slpz> anyway, IMHO object caching mechanism is obsolete and should be replaced
+ <slpz> I'm writting a comment to bug #28730 which says something about this
+ <slpz> antrik: just one more thing :-)
+ <slpz> if you look at the code, for most time of their lives, anonymous memory objects don't have a pager
+ <slpz> not even the default one
+ <slpz> only the pageout thread, when the system is running really low on memory, gives them a reference to the default pager by calling vm_object_pager_create
+ <slpz> this is not really important, but worth noting ;-)
diff --git a/hurd/translator/unionfs.mdwn b/hurd/translator/unionfs.mdwn
new file mode 100644
index 00000000..d1e3868b
--- /dev/null
+++ b/hurd/translator/unionfs.mdwn
@@ -0,0 +1,155 @@
+[[!meta copyright="Copyright © 2008, 2009, 2010 Free Software Foundation,
+Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled
+[[GNU Free Documentation License|/fdl]]."]]"""]]
+
+# `unionfs`
+
+*Unionfs allows you to simply union one directory or translator into another one, so you see the files of both of them side by side.*
+
+Source repository: <http://git.savannah.gnu.org/cgit/hurd/unionfs.git/>
+
+Right now there are some problems with syncing, so please be aware
+that it might not work as expected.
+
+<a name="unionmount"></a>
+# `unionmount`
+
+... is a special mode of `unionfs`.
+
+## Project Idea
+
+When setting a translator on Hurd -- similar to mounting a file system on UNIX
+-- the new node(s) exported by the translator are obscuring the original node
+where the translator is set, and any nodes below it in the directory tree. The
+translator itself can access the underlying node (which is a very nice feature,
+as it allows translators presenting the contents of the node in a different
+format); but it's no longer accessible from the "outside".
+
+Plan9 has a feature where a file system can be mounted in union mode: the new
+file system doesn't obscure the mount point in this case, but instead the
+contents are combined. (This feature has also been under discussion in Linux
+for a couple of years now, under the label "VFS-based union mounts".)
+
+This kind of union mounts is generally useful, as it's sometimes more
+convenient than unioning existing filesystem locations with unionfs -- it's not
+necessary to mount a file system that is to be unioned at some external
+location first: just union-mount it directly at the target location.
+
+But union mounts also allow creating passive translator hierarchies: If there
+is a passive translator on a parent node, and further passive translators on
+child nodes, the union mount allows the child nodes with the further translator
+settings still to be visible after the parent translator has started.
+
+This could be useful for device nodes for example: let's say we have an
+ethernet multiplexer at /dev/veth. Now the virtual subnodes could all be
+directly under /dev, i.e. /dev/veth0, /dev/veth1 etc., and explicitely refer to
+the main /dev/veth node in the translator command line. It would be more
+elegant however to store the virtual nodes direcly below the main multiplexer
+node -- /dev/veth/0, /dev/veth/1 etc.
+
+There are two possible approaches how union mounts could be implemented in the
+Hurd. The first one is to let the various translators handle union mounts
+internally, i.e. let them present the underlying nodes to the clients in
+addition to the actual nodes they export themselfs. This probably can be
+implemented as some kind of extension to the existing netfs and diskfs
+libraries.
+
+The other possible apporach is less efficient and probably more tricky, but
+probably also more generic: create a special unionmount translator, which
+serves as a kind of proxy: setting the union-mounted translator on some
+internal node; and at the actual mount location, presenting a union of the
+nodes exported by this translator, and the nodes from the underlying file
+system.
+
+The goal of this project is implementing union mounts using either of the
+approaches described above. (Though it might be useful initially to prototype
+both for comparision.) The ethernet multiplexer shall serve as an example use
+case -- any changes necessary to allow using it with the union mount
+functionality are also to be considered part of the task.
+
+[[Sergiu Ivanov|scolobb]] has been working on this as a [[Google Summer of Code
+2009 project|community/gsoc/2009]].
+
+## Implementation
+
+### Source
+
+Union mounts are currently implemented as two additional command line
+options of the `unionfs` translator. This implementation resides in
+the master-unionmount branch of the unionfs git repository. To
+checkout the code, do the following:
+
+ $ git clone git://git.sv.gnu.org/hurd/unionfs.git
+ $ cd unionfs
+ $ git checkout -b master-unionmount
+ $ git pull origin master-unionmount
+
+You can skip the checkout step if you don't mind that the
+`master-unionmount` branch gets merged into the `master` branch.
+
+### Short Documentation
+
+The `unionmount` project adds options "--mount" and "--no-mount" to
+`unionfs` (short versions: "-t" and "-n" correspondingly). Both
+options are used to implement union-mounting, but the first option
+will create a *transparent* union mount, while the second option will
+create a *nontransparent* union mount.
+
+One can create a transparent union mount with the following command:
+
+ $ settrans -a <node> unionfs --underlying --mount=<translator>
+
+When running
+
+ $ fsysopts <node>
+
+one will see the information about the `<translator>`, not the
+`unionfs` translator. Although this might seem the only natural way
+to do union mounts, one must keep in mind that such transparency
+deprives one of the possibility to modify the unioned virtual
+filesystem exported by `unionfs` at run-time (via `fsysopts`).
+
+One can create a nontransparent union mount with the following command:
+
+ $ settrans -a <node> unionfs --underlying --no-mount=<translator>
+
+When running
+
+ $ fsysopts <node>
+
+one will see the information about the `unionfs` translator. Although
+this way allows modifying the contents of the unioned filesystem
+exported by `unionfs` at runtime, the access to `<translator>` is
+blocked.
+
+The filesystem exported by the *mountee* (`<translator>`) is actually
+treated like a normal filesystem within `unionfs`, which means that
+one can assign priorities to the *mountee* to achieve the desired
+order of layering of the unioned directories. The following will make
+`unionfs` query the underlying filesystem first and then the
+*mountee*:
+
+ $ settrans -a <node> unionfs --priority=2 --underlying --priority=1 --mount=<translator>
+
+Note that the same functionality can also be achieved by assigning
+priority 1 to the underlying filesystem and keeping the priority of
+the *mountee* at 0.
+
+<a name="stowfs"></a>
+# `stowfs`
+
+... is a special mode of `unionfs`.
+
+# External Links
+
+ * [*Unioning file systems for Linux*](http://valerieaurora.org/union/)
+
+ * [FUSE page about
+ `unionfs`](http://sourceforge.net/apps/mediawiki/fuse/index.php?title=UnionFileSystems)
diff --git a/hurd/translator/unionmount.mdwn b/hurd/translator/unionmount.mdwn
new file mode 100644
index 00000000..7384afc7
--- /dev/null
+++ b/hurd/translator/unionmount.mdwn
@@ -0,0 +1,11 @@
+[[!meta copyright="Copyright © 2009 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled
+[[GNU Free Documentation License|/fdl]]."]]"""]]
+
+[[!meta redir=unionfs#unionmount]]
diff --git a/hurd/translator/wishlist_1.mdwn b/hurd/translator/wishlist_1.mdwn
new file mode 100644
index 00000000..36290883
--- /dev/null
+++ b/hurd/translator/wishlist_1.mdwn
@@ -0,0 +1,129 @@
+[[!meta copyright="Copyright © 2007, 2008, 2009 Free Software Foundation,
+Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled
+[[GNU Free Documentation License|/fdl]]."]]"""]]
+
+* [[devfs]]
+
+* FUSE(fuse.sourceforge.net/) compatilbility library. : just modify FUSE apps a and compile little to work as translator :-)
+
+* File Finder. (uses find, grep, a combination or a custom command (htdig, mp3 info)
+ * Files found will be available under one directory and then can be used like a normal directory
+ * usefull to generate Albums, Share only resulting files over the et, etc..
+ * The filefinder can be scheduled or can be connected over some ipc like dbus to the VFS system if any to keep a watch for new files.
+
+* GNOKII, BitPim and openobex as translators
+ * grep through your SMSs!
+ * share your addressbook!
+ * "Attach" that funny SMS/MMS to your email.
+ * "svn commit" your joke collection :-D
+
+* Real Language Translator [[br]]
+ * cat /usr/translator/de-en/usr/share/doc/something.txt should take /usr/share/doc/something.txt , submit it to google's website and bring back results.
+
+* Mozilla Bookmarks = del.ici.ous
+ * Need more explanation ? ;-)
+
+* <http://hnb.sf.net>
+ * having a directory structure for a file can allow to "svn commit" the hnb document in a more "node-safe" manner, thus allowing multiple people to work on the same hnb document.
+ * This must be fairly easy as HNB can already export to XML and XMLfs already exists.
+
+* DavFS
+ * Just setup a 'WebDav share' as a directory. The implimentation of the protocol is already available in nautilus and konqueror.
+
+* Compiled form of your project
+ * you have your project in /somedir/project with source in /somedir/project/src .. /somedir/project/bin should always have the compiled version.. is it possible?
+ * The source has to have a MakeFile.
+ * creating /somedir/project/bin-somearch should aotomatically crosscompile
+ * Seems feasible for a small project.
+
+* Report generation FrameWork - an idea to be hugged by app developers..not kernel developers.
+ * You have financial data in some Spreadsheet like format in /yourFinance directory
+ * You add report templates to /yourFinance/repTemplates
+ * Once you save data in /yourFinance the next cat /yourFinance/reports/areportname will give you an uptodate report.
+ * This will be usefull for any purpose including serving by static page web servers, sharing over samba/nfs/ftp etc.!
+ * The advantage is any save to the spreadsheet will update the report.. not just from one software.
+
+* SVN (Subversion suite)
+ * like [[cvsfs]]. /svndir/version/version-no should automatically have subversion
+ * I think it is nice to write a generalised version control system framework library which will help in writing version control translators and other tools easily.
+
+* Flexi-acls
+ * First of all - Can this be done? : A translator bound to a directory must be able to access the contents of the directory which would have been accessible in the absence of the translator..
+ * This will enable to wirte translators that can implement custom "Access Control Lists". Just imagine having advanced ACLs even if underlying FileSystem is dumb! Imagine changing the type of ACLs implemented with Just two commands - one to unattach previous translator and the next to attach a different ACL translator! The ACLs themselves may be stored in a different directory
+
+* The translator manager!
+ * Some translators will need to be inserted automatically - like for eg: hardware drivers ..
+ * Each hardware translator will pubish its capabilities.
+ * The "top" translator will query the capabilities of different hardware and match capabilities with the "slave" translators. That is it's only Job.
+ * The control is then handed over to the slave translator.
+ * The ranking not only looks who can handle the "most" capabilites of the hardware. If it finds that multiple translators can handle the same hardware, It will use other parameters to choose between them.. like may be the size in memory? The revision date? Stability (inferred from version number)? And to go to the extreme, the profiling data of the driver ;-P
+ * Advantage : The best driver wins!
+
+* An eg. Skip it if you understood the above :-):
+ * You have a driver that can handle VGA + SVGA + Super3d acceleration + Particle graphics + Works for nvidea card.
+ * You have a driver that can handle SVGA + VGA .
+ * You have a driver that can handle VGA.
+ * Case 1: Your card: A VGA card with some extra fonts..
+ * First the VGA driver will be quireied .. ok can handle essential capability.
+ * Next SVGA driver: can handle but has extra module.. unnecassary weight .
+ * The Nvidia driver: can handle , but again unnecassary weight : ruled out.
+ * Winner : VGA driver:
+ * Case 2: Your card An SVGA card:
+ * First the VGA driver will be quireied .. ok can handle one essential capability.
+ * Next SVGA driver: can handle essential and one extra capability no extra weight..
+ * The Nvidia driver: can handle , but unnecassary weight : ruled out.
+ * Winner : SVGA driver..
+ * Case 3 : You have an VGA .. but no VGA driver .. then the SVGA driver would win.
+
+* Sound Server
+ * /ahsa - stands for Advanced HURD sound architecture :-) Just a temporary name .. for fun.
+ * /ahsa/out - directory wich will hold "plug"s where apllications come and join the server .. see below.
+ * /ahsa/out/mixer - main mixer
+ * /ahsa/out/nextfree - the file when "cat"ed gives the number of the next free plug
+ * /ahsa/plugins/ - info about available plugins
+ * /ahsa/out/[1..n]/ - dynamically generated directories for applications to plug on..
+ * /ahsa/out/[1..n]/data this is where you should do a "cat somerawsoundfile>/ahsa/out/`cat /ahsa/nextfree`/data"
+ * /ahsa/out/[1..n]/plugins - the plugin stack .. volume is also a plugin..
+ * /ahsa/out/[1..n]/plugins/[1..m]/ - echo "plugin param1 param2 param3" > /ahsa/out/[1..n]/plugins/`cat /ahsa/out/[1..n]/plugins/nextfree`/add
+ * /ahsa/out/[1..n]/plugins/[1..m]/params/{param1.. paramn}
+ * /ahsa/out/[1..n]/data.out - can be catted to get data processed through the server
+ * /ahsa/in - similar to /ahsa/out .. with except for an extra file to choose input devices.
+ * /ahsa/devs/{1..n} - devices detected .. can be dynamic .. there are usb soundcards and and midi devices.
+ * /ahsa/out/[1..n]/plugins/[1..m]/0/params/dev
+ * Dont get tempted for :/ahsa/out/[1..n]/params/{rate, channels, and other stuff}
+ * that goes into /ahsa/out/[1..n]/plugins/0/params if /ahsa/out/[1..n]/plugins/0/detected == "headerless audio"
+ * There are a lot more things I can continue about the "sound server" .. The Ideas simply dont seem to exhaust..
+ * Some features/advantages
+ * set output's translator plugin as ordinary text -- have text to speech conversion done by sound server!
+ * Create and apply plugin presets by simply copying directories!
+ * Me getting dizzy thinking of the zillion more advantages.
+ * If you are really doing some ordinary output , all you need to do is "cat" data into next free "plug" and everything will be autodetected including the format of the data and sent to the final sound "merge"r
+ * Dizzy ...
+
+* /usr/share/menu !!!! extension for package management idea ..
+ * cat mymenuitem.menu >> /usr/share/menu/menu
+ * cat /usr/share/menu/debian/kde ... :-)
+
+* Spam/Malware Control
+ * /usr/antimalware/ - put your mail here.. it will automatically be scanned. when finished it will vanish from here ..
+ * /usr/antimalware/clean - ... and pop out from here
+ * /usr/antimalware/malware - or here.
+
+* NetDevice
+ * !JustImagine(tm)... settrans -ac /netdevices /hurd/netdevfs - [ host | net ]
+ * One can access device files remotely
+ * This could be acheived by allowing translators talk to one another over a network
+ * This will need translators to catch and handle ioctls (if there is such a thing in HURD).
+ * The device server which will listen to requests from the translators can be run even on a Linux machine!!!
+ * !JustImagine(tm)... accessing the crwriter/webcam on that GNU/Linux machine on the network using cdrecord of your local hurd machine!
+ * !JustImagine(tm)... running GNU/HURD on a minimalistic GNU/Linux(but with all the drivers) through a specially modified and optimised Qemu. The device server runs on the host machine, and the client translators access over the virtual network created by Qemu. You got most of the drivers for free!
+
+* Emacs File VFS
+ * I came to know from my Emacs loving friend that there are lots of VFS handlers in Emacs.. I was wondering if there can be translator which can tap into these Emacs VFS handlers.
diff --git a/hurd/translator/wishlist_2.mdwn b/hurd/translator/wishlist_2.mdwn
new file mode 100644
index 00000000..a927db55
--- /dev/null
+++ b/hurd/translator/wishlist_2.mdwn
@@ -0,0 +1,191 @@
+## <a name="Introduction"> Introduction </a>
+
+The idea behind file system translators is a powerful concept which hasn't recieved much attention in the mainstream computing world. So here is a list of interesting translators I've been able to dream up. I'm sure there are many more ideas floating around out there, so add them to the list!
+
+The [ferris project](http://witme.sourceforge.net/libferris.web/features.html) has some great ideas and code in the area of userspace dynamic filesystems, as has the [FUSE project](http://fuse.sourceforge.net/).
+
+## <a name="Audio_cdfs"> Audio\_cdfs </a>
+
+A translator which produces a directory of \*.wav files when you have an audio CD in the drive.
+
+## <a name="Ogg"> Ogg </a>
+
+This translator could be a sub-directory of the Audio\_cdfs translator and it would translate the \*.wav files into Ogg Vorbis/MP3 format.
+
+## <a name="CDDB"> </a> CDDB
+
+Of course it would be a lot nicer if the above two translators didn't name their files something worthless like track001.ogg. So we would want a translator which would hook up with a database on the web and produce meaningful file names.
+
+## <a name="Crypto"> Crypto </a>
+
+A cryptographic/steganographic seem like a nice match with the concept of user-land file systems. I like the idea of something like `settrans -a /secure stegfs --mpeg file001.mpg`
+
+## <a name="Revision_control"> Revision control </a>
+
+All of the empty space on your drive is now being wasted. Why not have a revision control translator which tracks changes to your documents? See also [this guy](http://www.linuxjournal.com/article.php?sid=5976). And then you'd do something like `cd /time-machine/2003/sept/14/` to see what your system looked like on the 14th of septempber 2003.
+
+## <a name="CVSFS"> </a> CVSFS
+
+See [cvsFS for Linux](http://cvsfs.sourceforge.net/). This provides a package which presents the CVS contents as mountable file system. It allows to view the versioned files as like they were ordinary files on a disk. There is also a possibility to check in/out some files for editing. A read-only version has been written by Stefan Siegl and is available at [Berlios](http://cvs.berlios.de/cgi-bin/viewcvs.cgi/cvsfs4hurd/cvsfs/).
+
+## <a name="tar_and_gzip"> tar and gzip </a>
+
+Rumor has it that they are on the way. Actually, a tar + gzip/bzip2 translator does exist (although it hasn't been used much...) : see [the Hurdextras project](http://savannah.nongnu.org/projects/hurdextras/) on Savannah.
+
+## <a name="ROM"> </a> ROM
+
+How about a translator which makes it look like you can write to read only media (like CDs), or change files which I don't have permission to change. This translator would make it seem like you could copy files to places where you normally couldn't. Think about combining this translator with the ftp translator and the tar and gzip translators. (cd /ftp/gnu.org/gnome.tar.gz/writes\_allowed; make install). It could be that unionfs does this very thing.
+
+## <a name="Super_FIFO"> Super\_FIFO </a>
+
+It's like a named pipe which is smart enough to start a process everytime something new tries to read from it. For example, let's say I have a script that reads in a JPEG image and spits out a smaller thumbnail \*.jpg to STDOUT. With a standard fifo (`mknod -p fifo`) this would almost works (`script big.jpg > fifo`). But what if there are two processes trying to read the fifo at once? Ick. And of course the standard way only works once without rerunning the command. I'm not quite sure what the syntax should look like, but I'm sure someone out there has a great idea waiting to happen.
+
+## <a name="Perl"> Perl </a>
+
+Perl is a wonderful language for hacking together something useful in a short amount of time. No concept is complete without being able to use it in a perl one-liner. And that goes for Hurd translators too. Right?
+
+ #!/usr/bin/perl
+ use Hurd::translator;
+
+ #file named "two" can produce an endless supply of twos, etc. (a la /dev/zero)
+ my $i=0;
+ for $filename ([zero one two three four])
+ {
+ $libtrivfsread_codehash{$filename}=
+ sub{ $num_bytes=shift; my $data=$i; return chr($data) x $num_bytes; };
+ #that's a hash of references to closures
+ $i++;
+ }
+ translator_startup();
+
+A Perl translator has been started by [John Edwin Tobey](http://john-edwin-tobey.org/Hurd/) (pith).
+
+## <a name="Source_code"> Source code </a>
+
+Here's a crazy thought. How about a translator for source code. You have a C source file like `hello.c` which is your normal everyday file. But there's a translator sitting underneath, so when you `cd hello.c` you get a directory with files like `main()` which represent the subroutines in `hello.c`. And of course you should be able to edit/remove those and have it modify the original source.
+
+## <a name="Libraries"> Libraries </a>
+
+Here's an [idea](http://www.circlemud.org/~jelson/software/fusd/docs/node13.html) from the people making [userspace drivers in Linux](http://www.circlemud.org/~jelson/software/fusd/):
+
+* "One particularly interesting application of FUSD that we've found very useful is as a way to let regular user-space libraries export device file APIs. For example, imagine you had a library which factored large composite numbers. Typically, it might have a C interface--say, a function called `int *factorize(int bignum)`. With FUSD, it's possible to create a device file interface--say, a device called `/dev/factorize` to which clients can `write(2)` a big number, then `read(2)` back its factors.
+
+* This may sound strange, but device file APIs have at least three advantages over a typical library API. First, it becomes much more language independent--any language that can make system calls can access the factorization library. Second, the factorization code is running in a different address space; if it crashes, it won't crash or corrupt the caller. Third, and most interestingly, it is possible to use `select(2)` to wait for the factorization to complete. `select(2)` would make it easy for a client to factor a large number while remaining responsive to other events that might happen in the meantime. In other words, FUSD allows normal user-space libraries to integrate seamlessly with UNIX's existing, POSIX-standard event notification interface: `select(2)`."
+
+## <a name="Mail"> Mail </a>
+
+Am I off my rocker, or does an IMAP/POP translator sound like a good idea? It would make your remote mail servers look like local ones. Or what about a translator that makes a mbox format mail spool look like a directory. Can anyone think of a good use for an SMTP translator?
+
+*Definitely: Copy my email in there to send it.* -- [[ArneBab|community/weblogs/ArneBab]]
+
+## <a name="UUEncode"> </a> UUEncode
+
+How about a UUEncode translator for those places you can only store ASCII. Combine this with a NNTP translator and store your data in someone's Usenet archive. Or since, (as far as I know), there are no size limitations on file names in the Hurd, why not have a filesystem translator whose underlying store is a file name. (Now ls becomes cat).
+
+## <a name="Computation"> Computation </a>
+
+This is from the revenge of the command-line department. Make a directory translator whose contents are a result of the computation specified in the directory name. Here's an example...
+
+ $ settrans -a /comp /hurd/computationfs
+ $ cd "/comp/3+4"
+ $ ls -l
+ total 0
+ -rw-r--r-- 1 nobody users 0 Oct 16 11:41 7
+ $
+ $ cd "/comp/sqrt(2)"
+ $ ls -l
+ total 0
+ -rw-r--r-- 1 nobody users 0 Oct 16 11:42 1.4142135623731
+ $
+
+...etc. Now think about your favorite GUI HTML editor and using File-&gt;Open on the following directory name, ``"/comp/for i in $( find / -name *.html ); do ln -s $i `basename $i`;done"`` Which would produce a directory listing with soft links to all of the \*.html files on your system. You could have all of the comforts of the shell from within that little File-&gt;Open dialog box.
+
+## <a name="Other"> Other </a>
+
+Just found Wolfgang J�hrling's translator [wishlist](http://www.8ung.at/shell/trans.html).
+
+## <a name="Bochs"> Bochs </a>
+
+A translator which works with [Bochs](http://bochs.sourceforge.net/) disk images would be nice.
+
+## <a name="Rollover"> Rollover </a>
+
+A translator that uses a circular buffer to store log files. The translated node only contains the last N (mega,kilo)bytes.
+
+## <a name="Birthday"> Birthday </a>
+
+A translator that provides an interface into the birthday program.
+
+You can cat your calendar, eg. bd/calendar/today bd/calendar/this-week or bd/calendar/this-month.
+
+And you could write new events into files located in bd/events/DATE/event-name.
+
+DATE is of the format the birthday expects DD/MM/YYYY.
+
+The contents of the file are any or none of the following birthday options: ann (An anniversary), bd (A birthday), ev (Some other event), wN (Warn N days in advance of date), toDATE (Event lasts until this DATE), forDAYS (Event runs for DAYS).
+
+You can optionally just edit the bd/birthdays file if you want to edit the configuration file by hand. It might make sense to write changes from bd/birthdays into ~/.birthdays.
+
+ $ settrans -c bd /hurd/birthday -f ~/.birthdays
+ $ ls bd/
+ birthdays calendar events
+ $ find bd -print
+ bd
+ bd/calendar
+ bd/calendar/daily
+ bd/calendar/this-week
+ bd/calendar/this-month
+ bd/events
+ bd/birthdays
+ $
+
+## <a name="LVM"> </a> LVM
+
+A translator to access block devices from Linux's [Logical Volume Management](http://www.tldp.org/HOWTO/LVM-HOWTO/) would be an useful addition.
+
+ # settrans -cap /dev/VolumeGroup0 /hurd/lvm /dev/PhysicalVolume0 /dev/PhysicalVolume1 ...
+ # ls /dev/VolumeGroup0/
+ home
+ var
+ # settrans -cap /home /hurd/ext2fs /dev/VolumeGroup0/home
+ # settrans -cap /var /hurd/ext2fs /dev/VolumeGroup0/var
+
+Probably both [LVM2](http://sourceware.org/lvm2/) and the [Device-mapper](http://sourceware.org/dm/) need to be ported.
+
+## <a name="bridging_translator"> bridging translator </a>
+
+A [bridging](http://bridge.sourceforge.net/faq.html) translator could improve the Hurd's networking facilities.
+
+ # settrans -cap /dev/br0 /hurd/bridge -i eth0 -i eth1 ...
+ # settrans -cap /servers/socket/2 /hurd/pfinet -i /dev/br0 -a ... -g ... -m ...
+
+Perhaps Linux's bridging code and [utilities](http://bridge.sourceforge.net/) can be ported (or glued in) or code from one of the BSDs.
+
+## <a name="SSH_translator"> </a> SSH translator
+
+Presenting remote file systems through SSH similar to what gnome-vfs does.
+
+## <a name="SMB_translator"> </a> SMB translator
+
+Presenting remote file systems through Samba similar to what gnome-vfs does. Guiseppe Scrivano has worked on this and smbfs is available at [hurdextras](http://savannah.nongnu.org/cgi-bin/viewcvs/hurdextras/smbfs/).
+
+## <a name="Crontab_translator"> Crontab translator </a>
+
+Presenting a user's crontab in a filesystem where cron entries are files.
+
+## <a name="globlink"> globlink </a>
+
+Firmlink to a file according to a filename matching pattern. When a file goes away, the next file that is matched is automatically linked to.
+
+ $ settrans -ac libfoo.so /hurd/globlink '/lib/libfoo*'
+
+## <a name="alphabetfs"> alphabetfs </a>
+
+Organize a large group of files by their first letter. Present one subdirectory for each letter in the alphabet.
+
+## <a name="fsysoptsctl"> fsysoptsctl </a>
+
+Send an fsysopts command to a set of translators. When you have a directory full of translators and you want to send each of them the same runtime option, this translator can do it for you.
+
+ $ settrans -ac all /hurd/fsysoptsctl '/tmp/mystuff/*'
+ $ fsysopts all --update
diff --git a/hurd/translator/writing/example.mdwn b/hurd/translator/writing/example.mdwn
new file mode 100644
index 00000000..0a3be4df
--- /dev/null
+++ b/hurd/translator/writing/example.mdwn
@@ -0,0 +1,303 @@
+[[!meta copyright="Copyright © 2007, 2008 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled
+[[GNU Free Documentation License|/fdl]]."]]"""]]
+
+## Data User-Server Translator Example
+
+The code examples were written by Anand Babu.
+
+We have a data.h header file, a data.defs file, a data-user.c, data-server.c
+sources files and a Makefile.
+
+data.h:
+-------
+
+ #ifndef _data_user_
+ #define _data_user_
+
+ /* Module data */
+
+ #include <mach/kern_return.h>
+ #include <mach/port.h>
+ #include <mach/message.h>
+
+ #include <mach/std_types.h>
+ #include <mach/mach_types.h>
+ #include <device/device_types.h>
+ #include <device/net_status.h>
+ #include <sys/types.h>
+ #include <sys/stat.h>
+ #include <sys/statfs.h>
+ #include <sys/resource.h>
+ #include <sys/utsname.h>
+ #include <hurd/hurd_types.h>
+
+ /* Routine data_set_value */
+ #ifdef mig_external
+ mig_external
+ #else
+ extern
+ #endif
+ kern_return_t S_data_set_value
+ #if defined(LINTLIBRARY)
+ (data_port, value)
+ mach_port_t data_port;
+ int value;
+ { return S_data_set_value(data_port, value); }
+ #else
+ (
+ mach_port_t data_port,
+ int value
+ );
+ #endif
+
+ /* Routine data_get_value */
+ #ifdef mig_external
+ mig_external
+ #else
+ extern
+ #endif
+ kern_return_t S_data_get_value
+ #if defined(LINTLIBRARY)
+ (data_port, value)
+ mach_port_t data_port;
+ int *value;
+ { return S_data_get_value(data_port, value); }
+ #else
+ (
+ mach_port_t data_port,
+ int *value
+ );
+ #endif
+
+ #endif /* not defined(_data_user_) */
+
+data.defs:
+----------
+
+ /* Definitions for data interface
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with the GNU Hurd; see the file COPYING. If not, write to
+ the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+ subsystem data 45000;
+
+ #include <hurd/hurd_types.defs>
+
+ #ifdef STACK_IMPORTS
+ STACK_IMPORTS
+ #endif
+
+ /* intr-rpc.defs defines the INTR_INTERFACE macro to make the generated RPC
+ stubs send-interruptible, and to prefix them with `hurd_intr_rpc_'. */
+ INTR_INTERFACE
+
+ /* set integer value to data */
+ routine data_set_value (
+ data_port: mach_port_t;
+ value: int);
+
+ /* get integer value from data */
+ routine data_get_value (
+ data_port: mach_port_t;
+ out value: int);
+
+data-user.c:
+------------
+
+ #include <stdio.h>
+ #include <hurd.h>
+ #include <hurd/hurd_types.h>
+ #include "data.h"
+
+ #ifndef _GNU_SOURCE
+ #define _GNU_SOURCE
+ #endif
+
+ int
+ main(int argc, char *argv[])
+ {
+ int value=0;
+ mach_port_t data_server_port;
+
+ data_server_port = file_name_lookup ("/tmp/trans", 0, 0);
+ printf ("data_server_port [%u]\n", data_server_port);
+ S_data_set_value (data_server_port, 99);
+ S_data_get_value (data_server_port, &value);
+ printf ("data->get_value: [%d]\n", value);
+
+ return 0;
+ }
+
+data-server.c:
+--------------
+
+ #ifndef _GNU_SOURCE
+ #define _GNU_SOURCE
+ #endif
+
+ #include <stdio.h>
+ #include <getopt.h>
+ #include <errno.h>
+ #include <sys/stat.h>
+ #include <error.h>
+
+ #include <hurd/ports.h>
+ #include <hurd/hurd_types.h>
+ #include <hurd/trivfs.h>
+
+ #include "data.h"
+
+ extern boolean_t S_data_server
+ (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP);
+
+ int trivfs_fstype = FSTYPE_MISC;
+ int trivfs_fsid = 0;
+ int trivfs_support_read = 0;
+ int trivfs_support_write = 0;
+ int trivfs_support_exec = 0;
+ int trivfs_allow_open = 0x00;
+ int trivfs_protid_nportclasses = 0;
+ int trivfs_cntl_nportclasses = 0;
+
+ int data_value;
+
+ int demuxer (mach_msg_header_t *inp, mach_msg_header_t *outp)
+ {
+ return (S_data_server(inp,outp)||trivfs_demuxer(inp,outp));
+ }
+
+ void trivfs_modify_stat (struct trivfs_protid *cred, io_statbuf_t *st)
+ {
+ }
+ error_t trivfs_goaway (struct trivfs_control *fsys, int flags)
+ {
+ exit (0);
+ }
+
+ kern_return_t S_data_set_value (mach_port_t data_port, int value)
+ {
+ data_value = value;
+ return 0;
+ }
+
+ kern_return_t S_data_get_value (mach_port_t data_port, int *value)
+ {
+ *value = data_value;
+ return 0;
+ }
+
+ int
+ main(int argc, char *argv[])
+ {
+ int err;
+ mach_port_t bootstrap;
+ struct trivfs_control *fsys;
+
+ if (argc > 1)
+ {
+ fprintf(stderr, "Usage: settrans [opts] node %s\n", program_invocation_name);
+ exit (1);
+ }
+
+ task_get_bootstrap_port (mach_task_self (), &bootstrap);
+ if (bootstrap == MACH_PORT_NULL)
+ error(2, 0, "Must be started as a translator");
+
+ /* Reply to our parent */
+ err = trivfs_startup (bootstrap, 0, 0, 0, 0, 0,&fsys);
+ mach_port_deallocate (mach_task_self (), bootstrap);
+ if (err) {
+ return (0);
+ }
+
+ ports_manage_port_operations_one_thread (fsys->pi.bucket, demuxer, 0);
+
+ return 0;
+ }
+
+Makefile:
+---------
+
+ CC = gcc
+ MIG = mig
+ CFLAGS = -Wall -g -D_GNU_SOURCE
+ LDFLAGS = -lthreads -lports -ltrivfs -lfshelp -lshouldbeinlibc
+ INCLUDES = -I.
+ LCHDRS =
+ MIGCOMSFLAGS = -prefix S_
+ OBJS = $(SRCS:.c=.o)
+ TAGS = etags.emacs21
+
+ all: data-server data-user
+ tags:
+ $(TAGS) $(SRCS) $(LCHDRS)
+
+ stubs: data.defs
+ $(MIG) $(MIGCOMSFLAGS) -server dataServer.c -user dataUser.c $^
+ data-server: data-server.c dataServer.c
+ $(CC) $^ $(CFLAGS) $(INCLUDES) $(LDFLAGS) -o $@
+ data-user: data-user.c dataUser.c
+ $(CC) $^ $(CFLAGS) $(INCLUDES) -o $@
+ clean:
+ rm -f *.o data-server data-user
+
+ start: data-server data-user
+ settrans -ac /tmp/trans data-server
+ ps -x | grep data-server
+ end:
+ settrans -fg /tmp/trans
+
+Building
+--------
+
+Do
+
+ make stubs
+
+to create the dataUser.c and dataServer.c files generated by mig. Create the
+executables using:
+
+ make all
+
+Testing
+-------
+
+Start the data-server translator using:
+
+ settrans -ac /tmp/trans data-server
+
+You can check if it is running using
+
+ ps -x | grep data-server
+
+Run the data-user executable to get the resultant output.
+
+You can remove the translator using:
+
+ settrans -fg /tmp/trans
+
+To remove the built files use:
+
+ make clean
+
+Happy Hacking!
diff --git a/hurd/translator/xmlfs.mdwn b/hurd/translator/xmlfs.mdwn
new file mode 100644
index 00000000..769c43ce
--- /dev/null
+++ b/hurd/translator/xmlfs.mdwn
@@ -0,0 +1,11 @@
+[[!meta copyright="Copyright © 2008 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled
+[[GNU Free Documentation License|/fdl]]."]]"""]]
+
+<http://www.nongnu.org/hurdextras/#xmlfs>