diff options
Diffstat (limited to 'hurd/translator')
47 files changed, 0 insertions, 4111 deletions
diff --git a/hurd/translator/auth.mdwn b/hurd/translator/auth.mdwn deleted file mode 100644 index d9e70ec2..00000000 --- a/hurd/translator/auth.mdwn +++ /dev/null @@ -1,13 +0,0 @@ -[[!meta copyright="Copyright © 2008 Free Software Foundation, Inc."]] - -[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable -id="license" text="Permission is granted to copy, distribute and/or modify this -document under the terms of the GNU Free Documentation License, Version 1.2 or -any later version published by the Free Software Foundation; with no Invariant -Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license -is included in the section entitled -[[GNU Free Documentation License|/fdl]]."]]"""]] - -[[*The_Authentication_Server*|documentation/auth]], the transcript of a talk -about the details of the authentication mechanisms in the Hurd by Wolfgang -Jährling. diff --git a/hurd/translator/cvsfs.mdwn b/hurd/translator/cvsfs.mdwn deleted file mode 100644 index f5f1a9e0..00000000 --- a/hurd/translator/cvsfs.mdwn +++ /dev/null @@ -1,52 +0,0 @@ -[[!meta copyright="Copyright © 2007, 2008 Free Software Foundation, Inc."]] - -[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable -id="license" text="Permission is granted to copy, distribute and/or modify this -document under the terms of the GNU Free Documentation License, Version 1.2 or -any later version published by the Free Software Foundation; with no Invariant -Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license -is included in the section entitled -[[GNU Free Documentation License|/fdl]]."]]"""]] - -## Setting up cvsfs on GNU/Hurd - A step by step process - -### Description of cvsfs - -cvsfs is a virtual ([[libnetfs]] based) filesystem allowing you to mount -remotely located CVS modules into your local filesystem. The version -controlled files will appear to you just like regular ones. If you just want -to view one file (or a small bunch) you furthermore save a lot of network -bandwidth since only these files will be downloaded. The usual way to do so -would be to check out the whole tree and deleting it after using. - -## Step by Step process in installing cvsfs - -Download and prepare the source files from the CVS repositiory and build them. - - $ cvs -z3 -d:pserver:anonymous@cvs.savannah.nongnu.org:/sources/hurdextras co cvsfs - $ cd cvsfs/ - $ autoreconf -i - $ ./configure - $ make - $ make install - -Set up the translator and start grazing. - - $ mkdir -p cvsfs_test - $ settrans -a cvsfs_test /hurd/cvsfs cvs.sourceforge.net /cvsroot/projectname modulename - -Example to mount the cvsfs module on hurdextras to a local directory. - - $ mkdir cvs.d - $ settrans -ac cvs.d/cvsfs /hurd/cvsfs cvs.savannah.nongnu.org sources/hurdextras cvsfs - -Now change to that directory and start using ls, emacs, and whatever you feel -like. :-) - -Happy Hacking. - - -## References - - * <http://www.nongnu.org/hurdextras/> - * <http://cvs.sv.nongnu.org/viewcvs/*checkout*/cvsfs/README?root=hurdextras> diff --git a/hurd/translator/devfs.mdwn b/hurd/translator/devfs.mdwn deleted file mode 100644 index 8784e998..00000000 --- a/hurd/translator/devfs.mdwn +++ /dev/null @@ -1,39 +0,0 @@ -[[!meta copyright="Copyright © 2009, 2012 Free Software Foundation, Inc."]] - -[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable -id="license" text="Permission is granted to copy, distribute and/or modify this -document under the terms of the GNU Free Documentation License, Version 1.2 or -any later version published by the Free Software Foundation; with no Invariant -Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license -is included in the section entitled [[GNU Free Documentation -License|/fdl]]."]]"""]] - -`devfs` is a translator sitting on `/dev` and providing what is to be provided -in there in a dynamic fashion -- as compared to static passive translator -settings as they're used now. - -`devfs` has not yet been written. - ---- - -If applicable, it has to be taken care that all code concerning the page-in -path is resident at all times. - ---- - -# IRC, freenode, #hurd, 2012-01-29 - - <pinotree> what would be an hurdish way to achieve something like the - various system (udev, devfs, devd, etc) for populating devices files - automatically according to the found system devices? - <pinotree> (not that i plan anything about that, just curious) - <youpi> it's not really a stupid question at all :) - <youpi> I guess translators in /dev - <youpi> such as a blockfs on /dev/block - <antrik> pinotree: in an ideal world (userspace drivers and all), the - device nodes will be exported by the drivers themselfs; and the drivers - will be launched by the bus respective bus driver - <antrik> an interesting aspect is what to do if we want a traditional flat - /dev directory with unique device names... probably need some - unionfs-like translator that collects the individual driver nodes in an - intelligent manner diff --git a/hurd/translator/discussion.mdwn b/hurd/translator/discussion.mdwn deleted file mode 100644 index e038ba84..00000000 --- a/hurd/translator/discussion.mdwn +++ /dev/null @@ -1,25 +0,0 @@ -[[!meta copyright="Copyright © 2011 Free Software Foundation, Inc."]] - -[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable -id="license" text="Permission is granted to copy, distribute and/or modify this -document under the terms of the GNU Free Documentation License, Version 1.2 or -any later version published by the Free Software Foundation; with no Invariant -Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license -is included in the section entitled [[GNU Free Documentation -License|/fdl]]."]]"""]] - -[[!tag open_issue_documentation open_issue_hurd]] - -IRC, freenode, #hurd, 2011-08-25: - - < frhodes> how can I replace an existing running server with a new one - without rebooting? - < antrik> frhodes: depends. if other critical things depend on it, you - can't. there is no mechanism to serialize and pass on the open sessions - < antrik> in some situations, you can orphan the old translator while - starting a new one, so the previous clients will stay with the old one - while new one will get the new one - < antrik> obviously that only works for things that aren't exclusive by - nature - < antrik> in some cases, you might even be able simply to remove the old - translator... but obviously only for non-critical stuff :-) diff --git a/hurd/translator/emailfs.mdwn b/hurd/translator/emailfs.mdwn deleted file mode 100644 index 80e2b150..00000000 --- a/hurd/translator/emailfs.mdwn +++ /dev/null @@ -1,287 +0,0 @@ -[[!meta copyright="Copyright © 2007, 2008 Free Software Foundation, Inc."]] - -[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable -id="license" text="Permission is granted to copy, distribute and/or modify this -document under the terms of the GNU Free Documentation License, Version 1.2 or -any later version published by the Free Software Foundation; with no Invariant -Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license -is included in the section entitled -[[GNU Free Documentation License|/fdl]]."]]"""]] - -# How cool it would be if the email becomes similar to snail mail? - -## Let see how the snail mail works - -* You write the letter with a pen and paper -* You write the "To" address -* Post it -> Put it in a Post Box - -## How Email works - -* You have your email client (and there comes the limitation, you can't use - your favourite editor) -* Fill in destination email address -* Send it (May be a send button or a keyboard shortcut) - -## What are the problems - -If you want to use a wordprocessor for sending email, it should implement a -feature like mail merge, suppose it doesn't have such a feature then? You copy -and paste it to your email client and if it doesn't support HTML? Well you have -work arounds for all these, but how cool it would be if you have something like -this: - -* You create a file, use any wordprocessor or text editor. -* Right click and see the properties -* Set the "To" field -* Drag it to the Post Box icon on your panel next to Trash - -## How can we implement it? - -An SMTP translator which uses extended atributes - -## Comments - -IRC Logs about the discussion on #hurd - -<!-- That smileys are rendered the wrong way below is a ikiwiki bug. --> - ->>>>>>> 5384ccd0a47e900fbdae993143110538248517a2:emailfs.mdwn - - <manuel>j4v4m4n: isn't the HHG a good enough libtrivfs tutorial? - <manuel> the problem I have with mail, blog, ..., translators is that you want to save mails before sending them, so you can't edit directly into the translator - <j4v4m4n> manuel, may be we want a simpler one, all of us are beginners except AB - <manuel> they're not stream-based so unless you save it into memory and wait for "sync" to send mails, it doesn't seem well suited - <manuel> who's AB? - <j4v4m4n> manuel, create any file with your favourite editor and copy it to the directory where SMTP translator is sitting - <j4v4m4n> manuel, Anand Babu - <manuel> sure, but how is that better than sending it via the "mail" command then? - <manuel> except it's less hype, of course. - <j4v4m4n> manuel, http://savannah.gnu.org/users/ab - <j4v4m4n> manuel, it would be cool :-) - <manuel> still not convinced :) - * schlesix (n=thomas@xdsl-81-173-230-219.netcologne.de) has joined #hurd - <j4v4m4n> manuel, set up SMTP translator on the desktop and may be add it next to the Trash :-) - <j4v4m4n> manuel, have a nice postbox icon - <j4v4m4n> manuel, drag your files to it - <j4v4m4n> manuel, it would be closer to the real world and snail mail - <bvk> j4v4m4n: To whom do they go? - <manuel> bvk: the file must be preformatted, probably - <j4v4m4n> bvk, in snail mail you will write to address on top of the envelop, right? - <manuel> j4v4m4n: yeah well, it could make sense in a desktop envronment - <j4v4m4n> bvk, here we can have it as the first line of the file - <manuel> not sure - <bvk> j4v4m4n: i never used snail :( - <j4v4m4n> manuel, that is what I have in mind - * j4v4m4n like snail mail that email - <manuel> bvk: you never sent a mail via snail mail?! :) - * j4v4m4n like snail mail more than email - <bvk> manuel: nope :( whats that btw? - <j4v4m4n> manuel, or why not have it as the file property itself?? - <bvk> manuel: you know its first time i hear it *snail mail* :(( - <j4v4m4n> bvk, the normal mails which a postman delivers :-) - <j4v4m4n> manuel, you create a file text, open document or whatver format - <manuel> j4v4m4n: I'm quite sure it'd make things more complicated in the end, using file properties, dragging files etc. - <j4v4m4n> manuel, righ click and see properties and set the to field - <bvk> Oh, these english words... :X - <j4v4m4n> manuel, we can use the xtended atributes - <j4v4m4n> manuel, which really showcase the power of hurd - <j4v4m4n> manuel, it becomes closer to the real world - <bvk> actually, is X working on hurd? - <j4v4m4n> bvk, well it used to work and the new Xorg release has som - <j4v4m4n> bvk, well it used to work and the new Xorg release has some broken packages - <j4v4m4n> bvk, if you use an old snapshot repository it will work (xorg 6.9) - * marco_g (n=marco@gnu/the-hurd/marco) has joined #hurd - <marco_g> hi - <j4v4m4n> marco_g, hi - * bvk watching MIT lecture video on 'structure and interpretation of computer programs' - <manuel> bvk: yeah, X was ported on GNU/Hurd five-six years ago or so - <j4v4m4n> manuel, see http://hurd.in/bin/view/Main/EmailFS - <j4v4m4n> manuel, add your comments, if you like - <manuel> j4v4m4n: how would you convert the .odt to a mail? - <j4v4m4n> manuel, attachment - <manuel> with an empty mail? - <manuel> that won't get through *most* spam filters :) - <j4v4m4n> manuel, or may be convert it to HTML - <manuel> well converting it to text or HTML would require a set of rules to convert from any format to text/HTML, like a2ps has (some sort of mailcap file) - <j4v4m4n> manuel, it can be flexible, a parameter to the translator as to what should be done with each formats - <manuel> and there's no convenient way to convert ODT to text AFAIK, you need to use ooffice with a batch program, which you need to provide too - <manuel> well that's really complex - <j4v4m4n> manuel, well how will you send me a CD by post? - <j4v4m4n> manuel, or say a bed? - <j4v4m4n> manuel, courier or parcel, right? so attachment is fine - <manuel> sure but you'll add a note saying "this is a bed from Praveen" - <ness> why not add a note to such a mail - <ness> you could even move multiple files simultaneously to the mail translator - <manuel> hm - <manuel> so how is the translator supposed to know that all the files I move are to be sent in a single mail and not on separate mails? - <manuel> and how'll you be able to add a note to such a mail? I mean, of course you can set it on the xattr but that's quite strange (the attachment is supposed to be an attr of the mail, not the other way) and not convenient at all - <manuel> I'm quite sure using a MUA is still easier - <ness> you could move a complete directory to the mail trans - <ness> (and the desktop icon can do this transparently) - <manuel> hmm so you have to create a directory, write a text file on it (with a special filename, I guess, since you could also have text files as attachments) and add the attachments to the directory - <manuel> and then drag & drop it - * manuel thinks things are getting more and more complicated :) - <ness> the special file name or attribute thing is right - <ness> but you not necisirily need to create a dir - <ness> s/necisirily/necessarily/ - <ness> you just drag 'n' drop multiple files to the icon - <manuel> and how is the translator supposed to know they are dragged at the same time and not one after the other? - <ness> I do not know if it is viable - * antrik (n=olaf@port-212-202-210-130.dynamic.qsc.de) has joined #hurd - <manuel> AFACS, dragging multiple files just make the desktop issue multiple rename() - <moritz> manuel: however the desktop handles that - it would be a rather easy thing to fix, i guess. - * schlesix has quit (Remote closed the connection) - <manuel> moritz: how is the desktop supposed to handle that? - <moritz> if this mail translator approach, is primarily to be used in desktop environments, one could implement the whole thing on the desktop environment layer, not with Hurd translators. - <moritz> manuel: i think it would be rather easy for the desktop to distinguish between actions like "ONE file is dragged" and "MULTIPLE files are dragged". - * schlesix (n=schlesix@xdsl-81-173-230-219.netcologne.de) has joined #hurd - <manuel> oh yeah, but then you loose the transparency, and there's no point in making it a translator. I think we agree on that :) - <moritz> i see rather little point in making it a translator anyway, since only god knows wether we have similar concept to translators in hurd-ng. - <manuel> yeah sure, but praveen wasn't planning it for HurdNG AIUI - <moritz> in that case it would probably be toy project. fine. - <moritz> i need to do some maths. see you. - <manuel> hmm well, you can't write anything else than toy projects, then - <ness> moritz: you shouldn't be too sure about success of ngHurg - <ness> it is an experiment - <antrik> sdschulze: ping - * antrik has quit (Remote closed the connection) - * antrik (n=olaf@port-212-202-210-130.dynamic.qsc.de) has joined #hurd - * bddebian (n=bdefrees@71.224.172.103) has joined #hurd - <j4v4m4n> manuel, This is a lot of input, let me sink these all first :-) - <bddebian> Heya folks - <schlesix> heya bddebian! - <j4v4m4n> it is ofcoures a "nice to have" feature. - <j4v4m4n> These are quite intersting inputs as well - <bddebian> Hi schlesix - <j4v4m4n> manual in the real wprld how will you send multiple things, say you want to send a CD and a bed - <j4v4m4n> manuel, you will package it (files) and then one parcel containing all these things (folder) - <manuel> j4v4m4n: well you want to make sending emails easier than sending real mails :-) - <j4v4m4n> manuel, it won't substitute MUAs - <j4v4m4n> manuel, we need it as the backend - <diocles> geekoe: You asked about GFS yesterday; well, glibc compiled. :) I've not done much more after that. - <antrik> regarding mail translator: take a look at Plan9, they have been doing it for years - <j4v4m4n> manuel, sorry not MUA I meant MTA - * syamajala (n=syamajal@c-24-147-61-120.hsd1.ma.comcast.net) has joined #hurd - <manuel> ah yes sure, but MUA will still be easier to use afaics - <j4v4m4n> manuel, people who are used to Windows say GNU/Linux is tough to use - <j4v4m4n> manuel, but when they start with GNOME or KDE they don't have any issues - <j4v4m4n> antrik, that is a great info I will look into it - <j4v4m4n> manuel, sorry not MUA I meant MTA - * syamajala (n=syamajal@c-24-147-61-120.hsd1.ma.comcast.net) has joined #hurd - <manuel> ah yes sure, but MUA will still be easier to use afaics - <j4v4m4n> manuel, people who are used to Windows say GNU/Linux is tough to use - <j4v4m4n> manuel, but when they start with GNOME or KDE they don't have any issues - <j4v4m4n> antrik, that is a great info I will look into it - <ness> j4v4m4n: they do it quite differently - <manuel> this doesn't answer to the basic question: how is it better than what we have now - <j4v4m4n> manuel, it is different, better is always debatable - <j4v4m4n> manuel, GNOME might work for but some doesn't use X at all - <j4v4m4n> manuel, whether it is good will be depending on the implemetation - <Jeroen> people who used to GNU/Linux say Windows is tough to use - <Jeroen> +are - <unlink> GNU/Linux is at least tougher to say - <Jeroen> no, people have less experience with GNU/Linux - <manuel> "to say", Jeroen - <j4v4m4n> manuel, better and easier are always relative - <j4v4m4n> manuel, there a lot of people still using mutt when you have thunderbird - <manuel> well because they have reasons to say mutt is easier than thunderbird - <Jeroen> the only thing is that you've to learn a few shortcuts when you want to use mutt, you can't just click around - <j4v4m4n> manuel, exactly - <j4v4m4n> manuel, consider this, you want to send a document across to someone - * Blackmore has quit (Read error: 104 (Connection reset by peer)) - * koollman has quit (Remote closed the connection) - <j4v4m4n> manuel, now you open a MUA add the attachment send it - * koollman (n=samson_t@gsv95-1-82-233-13-27.fbx.proxad.net) has joined #hurd - <j4v4m4n> manuel, if you just have to drag it to an icon, would that be easier? - * Casanova (n=prash@unaffiliated/casanova) has joined #hurd - <j4v4m4n> manuel, chmod +to:manuel@somehost doc.pdf ; cp doc.pdf postbox/ - <Jeroen> yeah - <Jeroen> chmod is for setting permissions... - <j4v4m4n> manuel, I am not sure how to set xattr - <manuel> well, setfattr - <Jeroen> well - <j4v4m4n> manuel, ok - <Jeroen> how do you type your subject? - <Jeroen> and there message itself? - <Jeroen> s/there/the/ - <Jeroen> how do you encrypt+sign it with pgp? - <manuel> j4v4m4n: well the problem is still the same you know. OK for to/subject : they'd be extended attributes. but how do you type the message itself? - <antrik> I don't think using xattr for such stuff is a good idea - <antrik> after all, it's not a property of the document - <j4v4m4n> antrik, we can use it only on a particular directory on which our translator sit - <j4v4m4n> manuel, create a folder - <manuel> that'd mean mkdir message; ln -s doc.pdf message/; cat >message/message <<EOF; setfattr -n to -v mmenal@hurdfr.org; setfattr -n subject -v document; mv message postbox - <antrik> the reason why having translators for such stuff is that this way you have a generic service for sending mail, whether you use it through a special UI (MUA), directly with file commands, from a script, or from some other program that just sends mails as a side functionality - * mheath has quit (Connection reset by peer) - <j4v4m4n> manuel, that looks scary :-( - <manuel> not sure it's easier than "mutt; m; mmenal@hurdfr.org; document; >typing the message<; a; doc.pdf; y" - <antrik> manuel: it is easier in some situations - <antrik> (and again, I would not use xattr for such stuff) - <j4v4m4n> manuel, now how do you use mutt on GNOME? - <antrik> in fact, Plan9 explicitely does *not* have any xattr and stuff - <manuel> antrik: well xattr on the directory that represents the message is not illogical - * mheath (n=mheath@c-67-182-231-23.hsd1.co.comcast.net) has joined #hurd - <j4v4m4n> antrik, may be we can think of some other way if you don't wanna xattr - <manuel> j4v4m4n: well I just used the CLI because it's easier to describe, but try to explain the steps in a GUI and you'll see it's the same problem - <j4v4m4n> manuel, right click on desktop -> create folder -> drag the files to the folder -> set attributes to the folder-> drag it to postbox - <j4v4m4n> manuel, it is quite logical step - <manuel> sure, but how is it easier than click on the MUA icon -> create mail -> drag the files to the mail window ; type the attrbutes + contents ; click on send mail - <manuel> looks quite similar to me :-) - <j4v4m4n> manuel, or if you already have the folder just drag it - <kilobug> a POP or IMAP translator would be more useful IMHO (but well, I didn't read all the backlog, so I may be off topic) - <j4v4m4n> manuel, you don't have a MUA here :-) just files and folders - <kilobug> to read mails, I mean - <j4v4m4n> kilobug, that is even easier IMAP->mabox and then mboxfs - <manuel> j4v4m4n: well you have a MUA : that's the translator - <j4v4m4n> kilobug, mboxfs is already available - <j4v4m4n> kilobug, I think someone already wrote IMAP to mbox as well but couldn't find it - <kilobug> j4v4m4n: well, imapfs could work both way, writing changes on the imap server too ;) - <antrik> manuel: the difference is not how it is used; the difference is how it is implemented - <antrik> manuel: if you have a generic mail translator, you have most functionality already there with the file manager; all you need to add is some scripts for better comfort - <antrik> j4v4m4n: the way I would do it (and I guess Plan9 does, though I haven't checked) is either having a file in the mail directory with the headers, or a subdirectory with a single file for each header (probably the latter) - <j4v4m4n> antrik, that would make it too complicated IMHO, it would be close to how it is for snail mail - <antrik> j4v4m4n: I don't see how this would be more complicated than xattr - <j4v4m4n> manuel, you can write your own scripts to automate it for whatver way you want - * azor (n=azor@62-43-135-201.user.ono.com) has joined #hurd - <manuel> antrik: having the functionality in the filesystem is useful because programs can use this functionality without patching; the protocol to use the mail translator is so specific that you either need to be a real user (but then a MUA is a lot more useful) or have a patched program (but then you could use a lib) - <j4v4m4n> antrik, right clicking a file and setting u p to and subject seems easier that creating more files - <antrik> j4v4m4n: I don't think so. maybe it is in gnome, but than I'd consider it a shortcoming of gnome - <antrik> j4v4m4n: in shell "cat foo@example.invalid headers/to" is about as simple as you can get - <manuel> > - <antrik> erm... I mean echo - <antrik> and >, yes - <antrik> sorry - * yoj (n=jao@200.163.8.72) has joined #hurd - <manuel> "echo foo@example.invalid > headers/to" is not easier than "setfattr -n to -v foo@example.invalid" AFAICS. - <antrik> echo foo@example.invalid >headers/to - * yoj (n=jao@200.163.8.72) has left #hurd - * yoj (n=jao@200.163.8.72) has joined #hurd - <kilobug> manuel: it is a tiny bit if your "foo@example.invalid" is the output of a command, mycomplexcommand > headers/to is a bit easier than setfattr -n to -v `mycomplexcommand` - <kilobug> manuel: but it's the same for a value you type directly - <antrik> manuel: objectively it is not simpler, but it uses a generic mechanism users now well, instead of obscure xattr stuff - <antrik> know well - <j4v4m4n> antrik, ok we can think of that, but how about a desktop user? - abeaumont andar antrik arnau azeem azor - <j4v4m4n> antrik, he has to use more clicks and more head aches - <j4v4m4n> antrik, just right click and add to address and subject just you write on the envelop - <kilobug> j4v4m4n: that's good ! it makes him buy more medicine, drug corporations will sponsor you then ! - * kilobug runs away - * j4v4m4n chases kilobug - <j4v4m4n> kilobug, better way would be making outlook run on GNU :-) - <marco_g> Or GNU on outlook \o/ - * yoj (n=jao@200.163.8.72) has left #hurd - <kilobug> this channel is becoming insnae :p - <j4v4m4n> kilobug, or is it the members ?? :-) - <marco_g> I agree with kilobug, we should stop those weirdos here :-/ - * whr` (i=whr@acy238.neoplus.adsl.tpnet.pl) has joined #hurd - <antrik> hm... anyone have the marcus quote at hand? - <j4v4m4n> i got to go as well - <j4v4m4n> bye - <kilobug> bye j4v4m4n - -## Interesting?? - -Join the project -- Add yourself to the list below - -* [[Praveen A]] diff --git a/hurd/translator/examples.mdwn b/hurd/translator/examples.mdwn deleted file mode 100644 index ee766fbf..00000000 --- a/hurd/translator/examples.mdwn +++ /dev/null @@ -1,93 +0,0 @@ -[[!meta copyright="Copyright © 2007, 2008 Free Software Foundation, Inc."]] - -[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable -id="license" text="Permission is granted to copy, distribute and/or modify this -document under the terms of the GNU Free Documentation License, Version 1.2 or -any later version published by the Free Software Foundation; with no Invariant -Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license -is included in the section entitled -[[GNU Free Documentation License|/fdl]]."]]"""]] - -## Setting up translators - HowTo - -Translators can be got from hurd-extras <http://www.nongnu.org/hurdextras/> - - cvs -z3 -d:pserver:anonymous@cvs.savannah.nongnu.org:/sources/hurdextras co <modulename> - -* httpfs translator - -<!-- Prevent ikiwiki / Markdown rendering bug. --> - - $ settrans -a tmp/ /hurd/httpfs www.hurd-project.com/ - -or - - $ settrans -a tmp/ /hurd/httpfs www.hurd-project.com/ --proxy=<proxy> --port=<port> - $ cd tmp/ - $ ls -l - -* ftpfs translator - -<!-- Prevent ikiwiki / Markdown rendering bug. --> - - $ settrans -cgap ftp /hurd/hostmux /hurd/ftpfs / - $ cd ftp - ftp$ ls - ftp$ cd ftp.fr.debian.org - ftp/ftp.fr.debian.org $ ls - -* tarfs translator (needs uatime fix, 2010-08-25 → [git repo](http://github.com/giselher/tarfs)) - -You can use tarfs to mount (almost) any tar file (currently broken, 2010-08-25): - - $ settrans -ca a /hurd/tarfs -z myfile.tar.gz - $ settrans -ca b /hurd/tarfs -y myfile.tar.bz2 - $ settrans -ca c /hurd/tarfs myfile.tar - -You can even use it to create new tar files: - - $ settrans -ca new /hurd/tarfs -cz newfile.tar.gz - $ cp -r all my files new/ - $ syncfs new - -This is not as fast as `tar czvf newfile.tar.gz all my files`, but at least it's more original. ;) - -* cvsfs translator - -<!-- Prevent ikiwiki / Markdown rendering bug. --> - - $ settrans -ac cvsfs_testing /hurd/cvsfs cvs.savannah.nongnu.org /sources/hurdextras - $ cd cvsfs_testing - -* pfinet translator -- configuring your network interface - -<!-- Prevent ikiwiki / Markdown rendering bug. --> - - $ settrans -fgca /servers/socket/2 /hurd/pfinet -i <interface> -a <ip address> -m <subnet mask> -g <gateway ip> - -* Console translator -- setting up virtual consoles - -<!-- Prevent ikiwiki / Markdown rendering bug. --> - - $ console -d vga -d pc_mouse -d pc_kbd -d generic_speaker /dev/vcs - -* iso9660fs translator -- 'mounting' your cdrom - -<!-- Prevent ikiwiki / Markdown rendering bug. --> - - $ settrans -ac /cdrom /hurd/iso9660fs /dev/<cdrom device file> - -* ext2fs translator -- 'mounting' an ext2fs partition - -<!-- Prevent ikiwiki / Markdown rendering bug. --> - - $ settrans -ac /linux /hurd/ext2fs /dev/<partition device file> - -* unionfs translator - -To join "foo/" "bar/" and "baz/" in the directory "quux/", just do: - - $ settrans -capfg quux/ /hurd/unionfs foo/ bar/ baz/ - -If you want to join even quux/ contents in the union itself, add -u as a translator argument. -You can add filesystems at run-time with the fsysopts command. diff --git a/hurd/translator/exec.mdwn b/hurd/translator/exec.mdwn deleted file mode 100644 index d5b6bfbc..00000000 --- a/hurd/translator/exec.mdwn +++ /dev/null @@ -1,12 +0,0 @@ -[[!meta copyright="Copyright © 2009 Free Software Foundation, Inc."]] - -[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable -id="license" text="Permission is granted to copy, distribute and/or modify this -document under the terms of the GNU Free Documentation License, Version 1.2 or -any later version published by the Free Software Foundation; with no Invariant -Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license -is included in the section entitled [[GNU Free Documentation -License|/fdl]]."]]"""]] - -The *exec* server, listening on `/servers/exec`, is responsible for -preparing the execution of processes. diff --git a/hurd/translator/ext2fs.mdwn b/hurd/translator/ext2fs.mdwn deleted file mode 100644 index fff2e74b..00000000 --- a/hurd/translator/ext2fs.mdwn +++ /dev/null @@ -1,45 +0,0 @@ -[[!meta copyright="Copyright © 2007, 2008, 2010, 2011 Free Software Foundation, -Inc."]] - -[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable -id="license" text="Permission is granted to copy, distribute and/or modify this -document under the terms of the GNU Free Documentation License, Version 1.2 or -any later version published by the Free Software Foundation; with no Invariant -Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license -is included in the section entitled -[[GNU Free Documentation License|/fdl]]."]]"""]] - -# Implementation - - * [[filetype]] option - - -## Large Stores - -The `ext2fs` translator from the upstream Hurd code base can only handle file -systems with sizes of less than roughly 2 GiB. - -[[!tag open_issue_hurd]] - - -### Ognyan's Work - - * Ognyan Kulev, [[*Supporting Large ext2 File Systems in the - Hurd*|ogi-fosdem2005.mgp]], 2005, at FOSDEM - - * Ognyan Kulev, [[large_stores]] - - * <http://kerneltrap.org/node/4429> - -Ognyan's patch lifts this limitation (and is being used in the -[[Debian_GNU/Hurd_distribution|running/debian]]), but it introduces another -incompatibility: `ext2fs` then only supports block sizes of 4096 bytes. -Smaller block sizes are commonly automatically selected by `mke2fs` when using -small backend stores, like floppy devices. - - -# Documentation - - * <http://e2fsprogs.sourceforge.net/ext2.html> - - * <http://www.nongnu.org/ext2-doc/> diff --git a/hurd/translator/ext2fs/filetype.mdwn b/hurd/translator/ext2fs/filetype.mdwn deleted file mode 100644 index 5d85bac9..00000000 --- a/hurd/translator/ext2fs/filetype.mdwn +++ /dev/null @@ -1,33 +0,0 @@ -[[!meta copyright="Copyright © 2011 Free Software Foundation, Inc."]] - -[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable -id="license" text="Permission is granted to copy, distribute and/or modify this -document under the terms of the GNU Free Documentation License, Version 1.2 or -any later version published by the Free Software Foundation; with no Invariant -Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license -is included in the section entitled [[GNU Free Documentation -License|/fdl]]."]]"""]] - -The *ext2fs* translator doesn't support the ext2 format's *filetype* option. - -According to *mke2fs(8)*: - -> **filetype**: Store file type information in directory entries. - -By setting directory listings' informational `d_type` field (`readdir`, etc.), -this may avoid the need for subsequent `stat` calls. - -Not all file systems can support this option. - -In `[hurd]/ext2fs/dir.c` the `EXT2_FEATURE_INCOMPAT_FILETYPE` is generally -masked out (is not even considered) when adding a node to a directory in -`diskfs_direnter_hard` and when reading in `diskfs_get_directs`. The Hurd's -ext2fs unconditionally sets this field to 0 (`EXT2_FT_UNKNOWN`). - - -# `e2fsck` - -Running `e2fsck` on a file system with the *filetype* option, will correct the -*filetype* for a lot of files (all `EXT2_FT_UNKNOWN`?) to either 1 (regular -file, `EXT2_FT_REG_FILE`), or 2 (directory, `EXT2_FT_DIR`), and likely others. -The Hurd's ext2fs will again ignore these fields, of course. diff --git a/hurd/translator/ext2fs/large_stores.txt b/hurd/translator/ext2fs/large_stores.txt deleted file mode 100644 index 6e7ffc6f..00000000 --- a/hurd/translator/ext2fs/large_stores.txt +++ /dev/null @@ -1,520 +0,0 @@ -[[!meta copyright="Copyright © 2005, 2010 Free Software Foundation, Inc."]] - -[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable -id="license" text="Permission is granted to copy, distribute and/or modify this -document under the terms of the GNU Free Documentation License, Version 1.2 or -any later version published by the Free Software Foundation; with no Invariant -Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license -is included in the section entitled [[GNU Free Documentation -License|/fdl]]."]]"""]] - -This is -*- mode: outline -*- - -* Introduction - -Here is a try to describe the ext2fs patch for the Hurd. This patch -allows using partitions/stores larger that approximately 1.5G by not -memory mapping the whole store to address space. - -As a guideline, the changelog of RC1 (Release Candidate 1) is -followed, so I hope nothing is missed. During writing of this text, -some questions arised and they are marked with XXX. An effort will be -made to fix all these for RC2. - - Ognyan Kulev <ogi@fmi.uni-sofia.bg> - -* The block layer and its purpose - -The basic unit of ext2 filesystem is "block". All filesystem -operation work on blocks which are read, and sometimes modified and -written back. Possible block sizes are 1K, 2K and 4K, but current -implementation works reliably only on 4K blocks (= page size of i386). - -So the two basic operations on blocks are "reading" block and -"writing" block. - -* Current implementation - -** Reading - -Currently, the whole store is memory mapped into address space of -ext2fs process. The is called "disk image", although "store image" -would be more accurate. The address of the start of the disk image is -stored in pager.c:disk_image. So "reading" block is easy: just -calculate byte offset of block and add it to disk_image. The resulting -address points to the start of the desired block. - -The macro ext2fs.h:bptr has exactly this purpose: given block number, -it returns pointer to block. Sometimes we have pointer somewhere in -the block, and we want the block number. This is calculated by -ext2fs.h:bptr_block. - -There is another set of macros that use byte offsets instead of block -numbers. These are boffs_ptr (store offset -> memory pointer) and -bptr_offs (memory pointer -> store offset). - -Converting between store offset and block number is easy with macros -boffs (block -> offset) and boffs_block (offset -> block). Other -useful macros are trunc_block and round_block. - -** Writing - -Modifying block and saving it is not that straight-forward as -reading. For writing, you need to use "pokel" ("poked elements"). -Pokel interface is in ext2fs.h. Implementation is in pokel.c. - -The problem is that generally multiple blocks are modified and we want -all these changes to hit disk at relatively same time. So we can't -just change block and leave decision when it's going to be written to -the microkernel. - -So there is a pokel for each set of changes and each change should be -reported to the pokel by calling pokel_add. When this set of changes -is completed, pokel_sync of pokel_flush is called. (The latter is -used to ignore changes.) - -In practice, there is one indir_pokel for each ext2fs.h:disknode, -which is used for indirect blocks of ext2fs. The only other pokel -used is ext2fs.h:global_pokel, where all other changes to metadata are -registered. - -* Proposed implementation - -First one must realize that the idea of mapping the whole store is to -be thrown away. So only parts of the store should be mapped. These -currently mapped parts of store are collectively called "cache". - -In the proposed implementation, the cache has fixed size of -ext2fs.h:DISK_CACHE_BLOCKS. In RC1, it's 100, but this is only to -easily catch bugs. In practice, it can be, for example, 512M, or -(512*1024/4) blocks of 4K. pager.c:disk_cache_size and -pager.c:disk_cache_blocks are additional variables about that -information. - -The cached blocks are mapped in ext2fs.h:disk_cache and span -disk_cache_size bytes (= disk_cache_blocks blocks). As in the -original implementation, this part of address space is handled by -custom pager. - -** Data structures - -Blocks in cache aren't consecutive, so we need data structure to hold -which part of address space represents what block. This is the -purpose of pager.c:disk_cache_info. Index in this array is "cached -block index". But this array doesn't help in finding if specific -block is mapped, and where. This is the purpose of the -pager.c:disk_cache_bptr ihash which finds cached block index from -given block number. Both data structures are guarded by -pager.c:disk_cache_lock. - -** Public interface - -"Public" interface to the cache are functions disk_cache_block_ref, -disk_cache_block_ref_ptr, disk_cache_block_deref, -disk_cache_block_is_ref. disk_cache_block_ref takes block number and -return pointer to block content. Reference count of this cached block -is incremented. After finishing work with block, -disk_cache_block_deref should be called. - -In converting original ext2fs code to use this functions, usually call -to bptr is turned into call to disk_cache_block_ref. In addition, -after pointer to block content is not used anymore, -disk_cache_block_deref is called. This simple scheme is only for -reading from block. For modifying block, see about pokels below. - -disk_cache_block_ref_ptr just increments reference count of specified -block. It's used when we give pointer to block content to somebody -else that will dereference it (e.g. pokel) and we want to continue to -use this content. - -disk_cache_block_is_ref checks if specified block has reference count -greater than zero. It's used in assert:s. - -*** bptr* and boffs* macros - -These macros continue to work as before, but they don't deal with -reference counting and this should be taken into consideration. In -addition, bptr_index returns cached block index from given pointer to -block content. (This function is used internally.) - -*** Pokels - -When pokel_add is called with pointer to block content, this -"consumes" reference of block. It's not consumed (decremented by 1) -immediately, but when pokel_sync or pokel_flush is called. (Reference -is consumed immediately if the block is already in the pokel. The -important thing is that you always lose one reference of the block.) - -So we have the following code when we read from block: - - char *bh = disk_cache_block_ref (block); - ... - disk_cache_block_deref (bh); - -And the following code when we modify block: - - char *bh = disk_cache_block_ref (block); - ... - pokel_add (pokel, bh, block_size); - -**** Indirect calls to pokel_add - -Some functions indirectly call pokel_add, so this should be taken into -consideration. These are: - - * record_global_poke - * record_indir_poke - -So these functions should be treated in the same scheme as pokel_add. -For example: - - char *bh = disk_cache_block_ref (block); - ... - record_indir_poke (node, bh); - -**** Modifying SBLOCK in diskfs_set_hypermetadata - -SBLOCK is global variable that points to superblock content. There is -one reference count for superblock, so before we call -record_global_poke (which consumes reference), -disk_cache_block_ref_ptr is called. - -**** Modifying GDP - -When group descriptor is wanted, usuall group_desc is called and -result is stored in local variable GDP. After modifying GDP, -record_global_poke is called. But because record_global_poke is used, -we need call to disk_cache_block_ref_ptr: - - gdp = group_desc (i); - ... - disk_cache_block_ref_ptr (gdp); - record_global_poke (gdp); - -*** More complex use of pointer to block content - -In ext2_new_block and ext2_alloc_inode functions, we have local -pointer variable BH that sometimes points to block content and -sometimes points to nothing. In order to reduce possible errors, when -BH points to nothing it's always 0. In some points (goto labels), -there is assertion if BH is what's expected (pointer to nothing or -pointer to something). - -*** dino - -dino function return pointer to struct ext2_inode for given ino_t. -This uses reference, so corresponding disk_cache_block_deref should be -called after finishing work with ext2_inode. For convenience, dino is -renamed to dino_ref, and dino_deref just calls disk_cache_block_deref. - - struct ext2_inode *di = dino_ref (np->cache_id); - ... - dino_deref (di); - -Or - - struct ext2_inode *di = dino_ref (np->cache_id); - ... - sync_global_ptr (di, 1); - dino_deref (di); - -Or - - struct ext2_inode *di = dino_ref (np->cache_id); - ... - record_global_poke (di); - -* Internals of the proposed implementation - -As said earlier, instead of mapping the whole store of filesystem to -address space, only part of it is mapped. This part is called "cache" -or "disk cache" (although "store cache" would be more appropriate). -Currently, the cache is contiguous area in address space that starts -at disk_cache. Its size is disk_cache_size which is disk_cache_blocks -number of blocks of size block_size. - -Mapped blocks in disk cache are not fixed -- each block in the cache -can be replaced at any time with another block. So we need to know -which blocks are cached currently and where. Information about each -cached block is stored in disk_cache_info[]. Index is from 0 to -disk_cache_blocks-1. In this information the block number is stored -(among some other things, discussed later). The reverse direction, -getting the index of cached block from block number, is achieved by -using disk_cache_bptr ihash. Both these data structures are guarded -by disk_cache_lock. - -** Requesting a block - -When ext2 code requests block, it calls disk_cache_block_ref. First, -this block is search with disk_cache_bptr. If its there, the -reference count is incremented and pointer to block content is -returned. In this case, there is a call to disk_cache_wait_remapping, -which is explained a bit later. - -It's more interesting when block is not found in disk_cache_bptr. In -this case, disk_cache_map is called. Again, disk_cache_bptr is -consulted, because in the meantime another could already have mapped -this block. If this is the case, the code is essentially the same as -those in disk_cache_block_ref. - -When it's assured that block is not in the cache, we have no choice -but throw away an already mapped/cached block and put our block in its -place. Such block has to meet the following conditions: - -- Its reference count being 0 -- Not in the core -- Not being remapped (explained later) -- Not being forbidden to be remapped ("fixed", explained later) - -The last three conditions are actually flags in disk_cache_info: -DC_INCORE, DC_REMAPPING and DC_FIXED. DC_DONT_REUSE collectively -gives the condition in which block is not suitable for -reusing/remapping. - -Searching suitable place in cache is linear. As an optimisation, this -search doesn't start from the beginning, but starts from where last -time it has ended. This last index is stored in disk_cache_hint. So -new candidate blocks for replacement are searched "circular". - -If suitable place is found, the old mapping is removed, and the new -mapping is initialized. But we are still not ready to return pointer -to block content, because this content is not available yet. We mark -the block as DC_REMAPPING, which makes disk_cache_block_ref for that -block in other threads to wait until page is completely remapped. - -In both cases, when we have found place and when suitable place is not -found, disk_cache_hint is updated so that next disk_cache_map -continues searching from where we ended. - -When not suitable place is found, we have to use force. First all -pages in disk cache are touched. This is workaround because of some -bug in GNU Mach. The patch relies on "precious page" features of -Mach. Marking a page as precious instructs Mach to always inform us -about evicting this page. If page is modified, it seems that we are -always informed. But if page is unmodified and page is evicted, -sometimes Mach forgets to tell us. It's true that with large disk -cache, e.g. 512M, this potentially will re-read the whole cache from -disk. But if we reach this point, the microkernel is telling us that -all is already read :-) - -This is preparation for following calls to pager_return_some. This -libpager function is called only on cached blocks that has reference -count of 0. These are the potential candidates for replacement -- -there is no sense in calling pager_return_some when reference count is -1 or more. One final case is when there is no cached block that has -reference count of 0. This is bad and we can't do anything about it. -In this case, we just wait one second hoping that some other thread -will drop reference count of block to 0. (XXX Currently (in RC1) -sleep(1) is always executed. It should be executed only when disk -cache is starving. There is some rationale behind calling sleep(1) even when -disk cache is not starving. Although pager_return_some(,,,1) -guarantees that upon return of this function the page is returned, I'm -not sure that it's guaranteed that pager_notify_pageout is called. -This is because pager_return_some and -libpager/data-return.c:_pager_do_write_request are executed in -different threads and pager_return_some is confirmed before calling -pager_notify_pageout. This issue is open.) - -So, after forcibly evicting all pages (blocks) that can potentially be -reused, disk_cache_map is called again. - -In the case when suitable place is found and all data structures -(disk_cache_info and disk_cache_bptr) are changed accordingly, -pager_return_some(,,,1) is called and we wait for pager_read_page to -clear DC_REMAPPING. The purpose of this flag (DC_REMAPPING) is solely -this: to forbid any use of this block until we are absolutely sure -that this page contains exactly the wanted block. If NDEBUG is not -defined (so we include debug code), flags of the blocks are checked if -DC_REMAPPING is really cleared. - -Is DC_REMAPPING really needed? Is there possibility that between last -"mutex_unlock (&disk_cache_lock)" and "return bptr" something could go -wrong? Actually, disk cache just follows protocol set by -pager_notify_pageout: that between pager_return_some and changing -internal structures for the remapping no thread may touch the page. -This is achieved by marking the page as DC_REMAPPING. For -convenience, function disk_cache_wait_remapping is defined which waits -for cached block while it's marked as DC_REMAPPING. - -XXX XXX: Actually, the sequence used in RC1 is: remap block and -pager_return_some. The latter seems redundant, as only blocks that -are evicted are candidates for remapping. I'll try to fix that for -RC2. - -** Modifying blocks and pokels - -After block is modified, it should be registered with pokel_add to -some pokel. Pokel contains list of ranges of cached blocks. All this -blocks should have reference count at least 1. In pokel_flush and -pokel_sync, this reference is consumed. - -So in pokel_add if added blocks are already in the pokel, their -references are consumed, because only 1 reference is consumed in -pokel_{sync,flush}. It's checked if pokel is for disk_cache, because -pokels are used in file access too, where disk cache layer is not -used. - -pokel_{flush,sync} both use _pokel_exec, so this is the place where -block references are consumed. (XXX: In RC1, they are consumed -always, but it's better to check if these pages are in disk_cache. -Although calling disk_cache_block_deref on non-disk_cache page does no -harm.) - -*** Indirect use of pokel_add - -record_global_poke and record_indir_poke use indirectly pokel_add. -These functions are slightly changed to use public interface of -disk_cache. Only new precondition is added for them: caller should -supply "reference" that will be consumed later by pokel_{flush,sync}. - -*** Modifying block without using pokels - -sync_global_ptr synchronizes given block immediately. No reference is -consumed. (XXX: This should be changed in RC2 to consuming reference. -This will make the function similar in use to -record_{global,indir}_poke and will make the code more nice-looking.) - -** Initialization - -*** The superblock - -To create disk cache, we need the block size of the filesystem. This -information is in superblock, so we need to read superblock without -using disk cache. For this purpose get_hypermetadata is changed to -read the superblock with store_read instead of old bptr. New function -map_hypermetadata is created that sets sblock global variable to point -to the already mapped superblock. So to get behavior of old -get_hypermetadata, first new get_hypermetadata should be called, and -then map_hypermetadata. - -In ext2fs.c:main, instead of calling get_hypermetadata, -map_hypermetadata is called. The call to get_hypermetadata is in -pager.c:create_disk_pager. - -In ext2fs.c:diskfs_reload_global_state, along with get_hypermetada, -map_hypermetadata is called. - -*** disk_cache - -Disk cache data structures are initialized in -pager.c:create_disk_pager called from ext2fs.c:main. Disk pager is -still initialized with diskfs_start_disk_pager, but due to block_size -variable we call get_hypermetadata. Basic parameters of disk cache -like disk_cache_blocks and disk_cache_size are initialized here. The -rest of the initialization process is delegated to disk_cache_init. - -disk_cache_init initializes the rest of disk cache data structures: -disk_cache_lock, disk_cache_remapping, disk_cache_bptr, -disk_cache_info and disk_cache_hint. After that superblock and group -descriptors are mapped into the cached and are marked as DC_FIXED. -This forbids reusing those blocks, because Hurd's ext2 code relies on -these blocks being mapped into fixed location in address space. - -** Pager callbacks - -disk_pager_read_page and disk_pager_write_page just use disk cache -data structures to get the right pointers to blocks. -disk_pager_read_page requests notification of page-out and updates -DC_INCORE and DC_REMAPPING too. DC_INCORE is set and DC_REMAPPING is -cleared (because reading the new block finishes its remapping). - -disk_pager_notify_pageout just clears DC_INCORE, making that page -available for remapping. - -* libpager changes - -Here memory_object_data_ prefix is shorten to m_o_d_. And when it's -talked about m_o_d_function Mach function, usually its libpager -handler is meant. - -** Notification on eviction - -The most important change that is wanted from libpager is supporting -notification when page is evicted. Mach already has partial support -for notification on eviction by argument "kcopy" of m_o_d_return. If -kcopy is 0, then Mach doesn't have copy of this page anymore, so the -page is "evicted". The problem is that m_o_d_return is usually called -only when page is modified, and if it's not modified, it's silently -dropped. - -The solutions is marking page as "precious". This has the exact -semantics we need: when page is evicted, m_o_d_return callback is -always called with kcopy=0. - -*** Implementation details - -New argument is added to user callback pager_read_page: -notify_on_pageout. If it's non-zero and the page is evicted, user -callback pager_notify_pageout(pager,page) is called. This change ABI -requires all libpager clients in the Hurd to be changed according to -the new API. - -m_o_d_request stores notify_on_pageout as flag PM_NOTIFY_PAGEOUT. - -m_o_d_return no longer just skips non-dirty pages. Local array -notified[] is build and at the end of the function, -pager_notify_pageout is called for all pages that are evicted -(kcopy=0). - -** Avoiding libpager optimization - -Unfortunately, there is one more problem, this time specific to -libpager, not Mach. There is an optimization in m_o_d_request when -page is being paged out. In the beginning of m_o_d_return, all pages -being return are marked as PM_PAGINGOUT. This mark is cleared after -m_o_d_supply (which supplies page content to Mach) is called. If -m_o_d_request is called on page that is marked as PM_PAGINGOUT, this -page is marked with PM_PAGEINWAIT, and m_o_d_supply inside -m_o_d_return is not called for this page. This is possible because -neither of these functions hold pager->interlock during the whole -execution of function. This lock is temporarily unlocked during call -to user callbacks pager_read_page and pager_write_page. - -So what is the implication of this optimization to our page eviction -notification? When page is paged out, we get notified and we can -decide to reuse it. After arranging disk_cache_info, etc, page is -touched, but if this happens fast enough, the optimization is -triggered and we get the old content! Reading the page is "optimized" -and pager_read_page is not called, but instead the content of old -block is used. - -This is solved by marking flushed and synced pages (via -pager_{flush,sync}{,_some} with PM_FORCEREAD. (These functions call -lock-object.c:_pager_lock_object which marks pages with PM_FORCEREAD -if they are already marked with PM_NOTIFY_PAGEOUT.) In handling -m_o_d_request, pages marked as PM_FORCEREAD are not optimized in this -way. XXX: Currently, this fine-grained logic is disabled (with #if), -as it needs more testing. Probably RC2 will use it. For now, all -pages are considered PM_FORCEREAD and this particular optimization -never happens. - -*** Technical details - -As said above, we need guarantee that after pager_{sync,flush}*, -pager_read_page callback is called. The most convenient place to mark -these pages as being forced to re-read is -lock-object.c:_pager_lock_object, because this function is used by all -pager_{sync,flush}* functions. So there we just mark page as -PM_FORCEREAD if it's already marked as PM_NOTIFY_PAGEOUT. - -First, this mark influences behaviour of m_o_d_request. If page is -marked with PM_FORCEREAD and PM_PAGINGOUT, then we set PM_PAGEINWAIT -and wait until related m_o_d_return finishes (unmarks PM_PAGEINWAIT). -Then we continue with pager_read_page, etc. If page is not marked -with PM_FORCEREAD and is marked with PM_PAGINGOUT, then old logic is -used and pager_read_page is not called (because m_o_d_return handler -will call m_o_d_supply instead of us). (XXX: Again, this logic is -inside #if 0. Currently, all pages are considered as marked with -PM_FORCEREAD.) - -The other place where PM_FORCEREAD is taken into consideration is -handler of m_o_d_return. The original code checks if page is marked -with PM_PAGEINWAIT, and if it is, m_o_d_supply is called for the just -written page. PM_PAGEINWAIT is used as "delegator" of the -m_o_d_supply call to Mach. - -In patched libpager, there is one more condition for when to call -m_o_d_supply. It's called when page is marked as PM_PAGEINWAIT and -not marked as PM_FORCEREAD. If it's marked as PM_FORCEREAD, then we -leave m_o_d_supply to m_o_d_request handler which gets notified by -condition pager->wakeup. diff --git a/hurd/translator/ext2fs/ogi-fosdem2005.mgp b/hurd/translator/ext2fs/ogi-fosdem2005.mgp deleted file mode 100644 index 27b5077c..00000000 --- a/hurd/translator/ext2fs/ogi-fosdem2005.mgp +++ /dev/null @@ -1,165 +0,0 @@ -# "Supporting Larger ext2 File Systems in the Hurd" -# Written by Ognyan Kulev for presentation at FOSDEM 2005. -# Content of this file is in public domain. -%include "default.mgp" -%page -%nodefault -%center, font "thick", size 5 - - - - -Supporting Larger ext2 File Systems in the Hurd - - - -%font "standard", size 4 -Ognyan Kulev -%size 3 -<ogi@fmi.uni-sofia.bg> - - -%size 4 -FOSDEM 2005 - -%page - -Need for supporting larger file systems - - Active development during 1995-1997 - - Hurd 0.2 was released in 1997 and it was very buggy - - Many bugs are fixed since then - - The 2G limit for ext2 file systems becomes more and more annoying - -%page - -Timeline - - 2002: Time for graduating, fixing the 2G limit in Hurd's ext2fs and implementing ext3fs were chosen for MSc thesis - - 2003: First alfa quality patch - - 2004: Graduation, ext2fs patch in Debian, but ext3fs is unstable - -%page - -User pager in GNU Mach - - Address space - memory_object_data_supply - memory_object_data_return - Memory object (Mach concept) - pager_read_page - pager_write_page - User-supplied backstore (libpager concept) - -%page - -Current ext2fs - - Memory mapping of the whole store - - Applies only for metadata! - - bptr (block -> data pointer) - = image pointer + block * block_size - - Inode and group descriptor tables are used as if they are continous in memory - -%page - -Patched ext2fs, part one - - Address space region - mapping - Array of buffers - association - Store - - Association of buffers changes (reassocation) - - It's important reassociation to occur on buffers that are not in core - -%page - -Patched ext2fs, part two - - Always use buffer guarded by - disk_cache_block_ref (block -> buffer) - disk_cache_block_deref (release buffer) - - Buffer = data + reference count + flags (e.g. INCORE) - - Calling some functions implies releasing buffer: - pokel_add (pokels are list of dirty buffers) - record_global_poke (use pokel_add) - sync_global_ptr (sync immediately) - record_indir_poke (use pokel_add) - - Use ihash for mapping block to buffer - -%page - -When unassociated block is requested - - -%font "typewriter", size 4, cont -retry: - i = hint; - while (buffers[i] is referenced or in core) { - i = (i + 1) % nbuffers; - if (i == hint) { - return_unreferenced_buffers (); - goto retry; - } - } - hint = i + 1; - - deassociate (buffers[i]); - associate (buffers[i], block); - - return buffers[i]; - -%page - -Notification for evicted pages - - Notification is essential for optimal reassociation - - Precious pages in Mach - - Slight change to API and ABI of libpager is required - - Mach sometimes doesn't notify! - -%page - -Pager optimization - -1. Mach returns page to pager without leaving it in core - -2. Pager becomes unlocked because of calling callback pager_write_page - -3. User task touches the page - -4. Mach requests the same page from pager - -5. XXX Pager supplies the page that was returned by Mach, instead of calling callback pager_read_page - -%page - -Future directions - - Committing in the Hurd :-) - Block sizes of 1K and 2K - Run-time option for buffer array size (?) - Compile-time option for memory-mapping the whole store - Upgrade of UFS - Extended attributes (EAs) and Access control lists (ACLs) - -# Local Variables: -# mgp-options: "-g 640x480" -# End: diff --git a/hurd/translator/fatfs.mdwn b/hurd/translator/fatfs.mdwn deleted file mode 100644 index 006fac0b..00000000 --- a/hurd/translator/fatfs.mdwn +++ /dev/null @@ -1,13 +0,0 @@ -[[!meta copyright="Copyright © 2007, 2008, 2009 Free Software Foundation, -Inc."]] - -[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable -id="license" text="Permission is granted to copy, distribute and/or modify this -document under the terms of the GNU Free Documentation License, Version 1.2 or -any later version published by the Free Software Foundation; with no Invariant -Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license -is included in the section entitled -[[GNU Free Documentation License|/fdl]]."]]"""]] - -The current `fatfs` translator is read-only, and it has a severe bug: -[[!GNU_Savannah_bug 25961]]. diff --git a/hurd/translator/gopherfs.mdwn b/hurd/translator/gopherfs.mdwn deleted file mode 100644 index 6c32430f..00000000 --- a/hurd/translator/gopherfs.mdwn +++ /dev/null @@ -1,16 +0,0 @@ -[[!meta copyright="Copyright © 2010 Free Software Foundation, Inc."]] - -[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable -id="license" text="Permission is granted to copy, distribute and/or modify this -document under the terms of the GNU Free Documentation License, Version 1.2 or -any later version published by the Free Software Foundation; with no Invariant -Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license -is included in the section entitled [[GNU Free Documentation -License|/fdl]]."]]"""]] - -`gopherfs` is a virtual filesystem allowing you to access Gopher sites. - - -# Source - -incubator, gopherfs/master diff --git a/hurd/translator/hello.mdwn b/hurd/translator/hello.mdwn deleted file mode 100644 index bd56cd76..00000000 --- a/hurd/translator/hello.mdwn +++ /dev/null @@ -1,14 +0,0 @@ -[[!meta copyright="Copyright © 2011 Free Software Foundation, Inc."]] - -[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable -id="license" text="Permission is granted to copy, distribute and/or modify this -document under the terms of the GNU Free Documentation License, Version 1.2 or -any later version published by the Free Software Foundation; with no Invariant -Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license -is included in the section entitled [[GNU Free Documentation -License|/fdl]]."]]"""]] - -The *hello* translator is an example of a simple [[libtrivfs]]-based one-node -[[translator]]. It is shipped as part of the [[Hurd source code -repository|source_repositories]], and exists in a single-threaded and a -multi-threaded variant. diff --git a/hurd/translator/hostmux.mdwn b/hurd/translator/hostmux.mdwn deleted file mode 100644 index 5fab2dc5..00000000 --- a/hurd/translator/hostmux.mdwn +++ /dev/null @@ -1,31 +0,0 @@ -Multiplexes arbitrary host names, making access to many differnt host fast and easy. - -For each host accessed via a directory an new translator is started with the hostname as option. Say, /hostmuxdemo should let you access your favourite host with your translator mytranslatorfs. - -<code>**ls /hostmuxdemo/mybox/**</code> would give the result of mytranslatorfs applied to host mybox. - -## <a name="Usage"> Usage </a> - -Hostmux takes translator options as argument and (in the easiest case ) starts the translator with the given arguments and the hostname as the last argument. - -### <a name="ftpfs"> ftpfs </a> - -ftpfs is a good example, that is even very usefull. With hostmux and ftpfs you can access anonymous ftp via the filesystem, sparing out complicate use of a ftp client. - -We assume you want to access the ftp root at all servers. The example host is ftp.yourbox.com. - -Usermux is called via <code>**settrans -fgap /ftp /hurd/hostmux /hurd/ftpfs /**</code> . - -* <code>**-fg**</code> makes settrans try hard to remove an existing old translator from <code>**/ftp**</code> -* <code>**ap**</code> sets an active translator (starts the translator) and a passive translator (stores translator information in the filesystem with which an active translator can be started on access of this node) -* <code>**/ftp**</code> is where we want to set the translator -* <code>**/hurd/hostmux**</code> is obviously our hostmux translator that will be started at <code>**/ftp**</code> and handle filesystem operations on <code>**/ftp**</code> and everything below (like <code>**/ftp/ftp.yourbox.com/pub/**</code>) -* <code>**/hurd/ftpfs /**</code> is the argument to hostmux. - -When <code>**/ftp**</code> is accessed, the first directory is interpreted as hostname and a new translator is set up with the <code>**hostmux**</code> arguments: - -<code>**ls /ftp/ftp.yourhost.com/pub/**</code> lets hostmux start a new traslator <code>**/hurd/ftpfs / ftp.yourhost.com**</code> and serve it via <code>**/ftp/ftp.yourhos t.com/**</code> as directory. Subsequent the directory <code>**pub/**</code> on <code>**/ftp.yourhost.com/**</code> can be accessed via the new created translator. - -You can see the new created translator in the process list: <code>**ps ax | grep ftpsfs**</code> . You shoud see <code>**/hurd/ftpfs / ftp.yourhost.com**</code> . - --- [[Main/PatrickStrasser]] - 13 Jul 2004 diff --git a/hurd/translator/libguestfs.mdwn b/hurd/translator/libguestfs.mdwn deleted file mode 100644 index 649b31f5..00000000 --- a/hurd/translator/libguestfs.mdwn +++ /dev/null @@ -1,15 +0,0 @@ -[[!meta copyright="Copyright © 2010 Free Software Foundation, Inc."]] - -[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable -id="license" text="Permission is granted to copy, distribute and/or modify this -document under the terms of the GNU Free Documentation License, Version 1.2 or -any later version published by the Free Software Foundation; with no Invariant -Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license -is included in the section entitled [[GNU Free Documentation -License|/fdl]]."]]"""]] - -[[!tag open_issue_hurd]] - -[libguestfs](http://libguestfs.org/) is said to be able to access a lot -of different filesystem types -- can we use it to build GNU Hurd -[[translator]]s? (There is a [[FUSE]] module, too.) diff --git a/hurd/translator/magic.mdwn b/hurd/translator/magic.mdwn deleted file mode 100644 index 84bacdfb..00000000 --- a/hurd/translator/magic.mdwn +++ /dev/null @@ -1,22 +0,0 @@ -[[!meta copyright="Copyright © 2006, 2007, 2008, 2010 Free Software Foundation, -Inc."]] - -[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable -id="license" text="Permission is granted to copy, distribute and/or modify this -document under the terms of the GNU Free Documentation License, Version 1.2 or -any later version published by the Free Software Foundation; with no Invariant -Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license -is included in the section entitled [[GNU Free Documentation -License|/fdl]]."]]"""]] - -The magic translator provides `/dev/fd`. - - $ showtrans /dev/fd - /hurd/magic --directory fd - -The `/dev/fd` directory holds the open [[unix/file_descriptor]]s for your -current process. You can't see them with `ls -l /dev/fd/` but you can see them -individually like this: - - $ ls -l /dev/fd/0 - crw--w---- 1 bing tty 0, 0 Nov 19 18:00 /dev/fd/0 diff --git a/hurd/translator/mboxfs.mdwn b/hurd/translator/mboxfs.mdwn deleted file mode 100644 index e357294f..00000000 --- a/hurd/translator/mboxfs.mdwn +++ /dev/null @@ -1,11 +0,0 @@ -[[!meta copyright="Copyright © 2008 Free Software Foundation, Inc."]] - -[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable -id="license" text="Permission is granted to copy, distribute and/or modify this -document under the terms of the GNU Free Documentation License, Version 1.2 or -any later version published by the Free Software Foundation; with no Invariant -Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license -is included in the section entitled -[[GNU Free Documentation License|/fdl]]."]]"""]] - -<http://www.nongnu.org/hurdextras/#mboxfs> diff --git a/hurd/translator/netio.mdwn b/hurd/translator/netio.mdwn deleted file mode 100644 index aca9cd69..00000000 --- a/hurd/translator/netio.mdwn +++ /dev/null @@ -1,17 +0,0 @@ -[[!meta copyright="Copyright © 2010 Free Software Foundation, Inc."]] - -[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable -id="license" text="Permission is granted to copy, distribute and/or modify this -document under the terms of the GNU Free Documentation License, Version 1.2 or -any later version published by the Free Software Foundation; with no Invariant -Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license -is included in the section entitled [[GNU Free Documentation -License|/fdl]]."]]"""]] - -`netio` is a translator designed for creating socket ports through the -filesystem. - - -# Source - -incubator, netio/master diff --git a/hurd/translator/nfs.mdwn b/hurd/translator/nfs.mdwn deleted file mode 100644 index bf24370a..00000000 --- a/hurd/translator/nfs.mdwn +++ /dev/null @@ -1,18 +0,0 @@ -[[!meta copyright="Copyright © 2012 Free Software Foundation, Inc."]] - -[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable -id="license" text="Permission is granted to copy, distribute and/or modify this -document under the terms of the GNU Free Documentation License, Version 1.2 or -any later version published by the Free Software Foundation; with no Invariant -Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license -is included in the section entitled [[GNU Free Documentation -License|/fdl]]."]]"""]] - -Translator acting as a NFS client. - - -# See Also - - * [[libnetfs: `io_map`|open_issues/libnetfs_io_map]] issue - - * [[open_issues/libnfs]] diff --git a/hurd/translator/nsmux.mdwn b/hurd/translator/nsmux.mdwn deleted file mode 100644 index d156772b..00000000 --- a/hurd/translator/nsmux.mdwn +++ /dev/null @@ -1,121 +0,0 @@ -[[!meta copyright="Copyright © 2009 Free Software Foundation, Inc."]] - -[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable -id="license" text="Permission is granted to copy, distribute and/or modify this -document under the terms of the GNU Free Documentation License, Version 1.2 or -any later version published by the Free Software Foundation; with no Invariant -Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license -is included in the section entitled -[[GNU Free Documentation License|/fdl]]."]]"""]] - -# nsmux - -`nsmux` implements the simplest use-case of namespace-based translator -selection (see below). - -To use `nsmux` do the following: - - $ settrans -a <node> nsmux <directory> - -After this operation `<node>` will be a mirror of `<directory>` with -namespace-based translator selection functionality enabled. - -Please note that due to some details `nsmux` may complain a lot when -run as a normal user. This matter is the most urgent on the TODO -list. - -## Source - -`nsmux` translator can be obtained with the following series of -commands: - - $ git clone git://git.sv.gnu.org/hurd/incubator.git nsmux - $ cd nsmux/ - $ git checkout -b nsmux origin/nsmux - -`filter` translator can be obtained with the following series of -commands: - - $ git clone git://git.sv.gnu.org/hurd/incubator.git filter - $ cd filter/ - $ git checkout -b filter origin/filter - -The filter is not yet working. - -## Namespace-based Translator Selection - -Namespace-based translator selection is the special technique of using -"magic" filenames for both accessing the file and setting translators -on it. - -A "magic" filename is a filename which contains an unescaped sequence -of two commas: ",,". This sequence can be escaped by adding another -comma: ",,,". In the magic filename the part up to the first double -commas is interpreted as the filename itself; the remaining segments -into which the string is split by occurrences of ",," are treated as -names of translators located under `/hurd/`. - -The simplest advantage before traditional way of setting -translators is shown in the following examples. Compare this - - $ settrans -a file translator1 - $ settrans -a file translator2 - $ cat file - -to this: - - $ cat file,,translator1,,translator2 - -One simple command versus three more lengthy ones is an obvious -improvement. However, this advantage is not the only one and, -probably, not even the most important. - -What is a good candidate for the most important advantage is that -translators requested via "magic" filenames are session-bound. In -other words, by running `cat file,,translator` we set a translator -visible *only* to `cat`, while the original file remains untranslated. -Such session-specific translators are called **dynamic** and there is -no (theoretical) way for a client to get a port to a dynamic -translator requested by another client. - -Obviously, dynamic translators can be stacked, similarly to static -translators. Also, dynamic translator stacks may reside on top of -static translator stacks. - -An important operation of namespace-based translator selection is -*filtering*. Filtering basically consists in looking up a translator -by name in the stack and ignoring translators located on top of it. -Note that filtering does not mean dropping some translators: in the -current implementation a filter is expected to be a normal dynamic -translator, included in the dynamic translator stack similarly to -other translators. - -An important detail is that filtering is not limited to dynamic -translator stacks: a filter should be able to descend into static -translator stacks as well. - -Although the concept of filtering may seem purely abstract in the -simplest use-case of setting dynamic translators on top of files, the -situation changes greatly when dynamic translator stacks on top of -directories are considered. In this case, the implementation of -namespace-based translator selection is expected to be able to -propagate the dynamic translators associated with the directory down -the directory structure. That is, all files located under a directory -opened with magic syntax, are expected to be translated by the same -set of translators. In this case having the possibility to -specifically discard some of the translators set up on top of certain -files is very useful. - -Note that the implementation of propagation of dynamic translators -down directories is not fully conceived at the moment. The -fundamental problem is distinguishing between situations when the -dynamic translators are to be set on the underlying files of the -directory or on the directory itself. - -## Currently Implemented - -Currently there a working (though not heavily tested) implementation -of the simplest use-case of namespace-based translator selection in -the form of translator `nsmux`. The filter is partially implemented -and this is the immediate goal. Propagating translators down -directories is the next objective. diff --git a/hurd/translator/pfinet.mdwn b/hurd/translator/pfinet.mdwn deleted file mode 100644 index f6f69ea4..00000000 --- a/hurd/translator/pfinet.mdwn +++ /dev/null @@ -1,38 +0,0 @@ -[[!meta copyright="Copyright © 2002, 2004, 2005, 2007, 2008, 2011 Free Software -Foundation, Inc."]] - -[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable -id="license" text="Permission is granted to copy, distribute and/or modify this -document under the terms of the GNU Free Documentation License, Version 1.2 or -any later version published by the Free Software Foundation; with no Invariant -Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license -is included in the section entitled [[GNU Free Documentation -License|/fdl]]."]]"""]] - -To configure Internet connectivity, the `pfinet` (*Protocol Family Internet*) -[[translator]] must be configured. This is done using the -[[`settrans`|settrans]] command, for example like this: - - # settrans -fgap /servers/socket/2 /hurd/pfinet ↩ - -i eth0 -a 192.168.0.50 -g 192.168.0.1 -m 255.255.255.0 - -The argument `/server/socket/2` is the node that the translator is to be -attached to. This is followed by the translator program to run and any -arguments to give it. - -There, `-i`, `-a`, `-g` and `-m` are, quite obviously, the (Mach) device to -use, the IP address, the gateway and netmask. - ---- - -To make DNS lookups work, you'll also have to properly configure the -`/etc/resolv.conf` file, for example by copying it over from your GNU/Linux -installation. - ---- - - * [[DHCP]]. - - * [[Implementation]]. - - * [[IPv6]]. diff --git a/hurd/translator/pfinet/dhcp.mdwn b/hurd/translator/pfinet/dhcp.mdwn deleted file mode 100644 index 456d0c84..00000000 --- a/hurd/translator/pfinet/dhcp.mdwn +++ /dev/null @@ -1,46 +0,0 @@ -[[!meta copyright="Copyright © 2002, 2003, 2005, 2011 Free Software Foundation, -Inc."]] - -[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable -id="license" text="Permission is granted to copy, distribute and/or modify this -document under the terms of the GNU Free Documentation License, Version 1.2 or -any later version published by the Free Software Foundation; with no Invariant -Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license -is included in the section entitled [[GNU Free Documentation -License|/fdl]]."]]"""]] - -[[!tag open_issue_hurd]] - -[[Debian GNU/Hurd|running/debian]] has some script hackery to get -[[running/debian/DHCP]] going. - ---- - -According to the following thread, no port should be needed since all the patches that have been applied, including the one concerning the thread. In fact, the thread finishes without concluding whether the patch has been applied or not. You can grab it in the thread, anyway. - -[Link to thread](http://lists.gnu.org/archive/html/bug-hurd/2005-01/msg00025.html) - -The thread starts at Jan 4th 2005 until Jan 6th and is only retaken at April 14th in [this thread](http://lists.gnu.org/archive/html/bug-hurd/2005-01/msg00025.html). - -[This](http://mail.gnu.org/archive/html/help-hurd/2003-10/msg00016.html) thread on help-hurd has a little more info on what's still needed for DHCP. - -Found this [message](http://mail.gnu.org/archive/html/bug-hurd/2003-08/msg00045.html) about DHCP capabilities in the Hurd encouraging. - -* Tom Hart began a [discussion ](http://mail.gnu.org/pipermail/help-hurd/2002-October/006643.html) of 14 posts in Oct 2002. - -The beginnings of a DHCP translator is available in the Hurd sources on Savannah: [hurd/trans/pump.c](http://savannah.gnu.org/cgi-bin/viewcvs/hurd/hurd/trans/pump.c?rev=1.3&content-type=text/vnd.viewcvs-markup) - -Unfortunately our current TCP/IP stack, the pfinet translator, lacks support for the AF\_PACKET interface as well as sending packets with an IP address of 0.0.0.0. - -Grant Bowman on bug-hurd: - - Herbert Xu (Pump maintainer) told me that to operate correctly, pump - uses the AF_PACKET interface which is only present in 2.2. - - Anyone else know the status of getting these compiled and functional? - -Neal Walfield on bug-hurd replies: - - > Anyone else know the status of getting these compiled and functional? - - We need to be able to send to the DHCP server with ip address 0.0.0.0. diff --git a/hurd/translator/pfinet/implementation.mdwn b/hurd/translator/pfinet/implementation.mdwn deleted file mode 100644 index 50b5dfc2..00000000 --- a/hurd/translator/pfinet/implementation.mdwn +++ /dev/null @@ -1,13 +0,0 @@ -[[!meta copyright="Copyright © 2000, 2008 Free Software Foundation, Inc."]] - -[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable -id="license" text="Permission is granted to copy, distribute and/or modify this -document under the terms of the GNU Free Documentation License, Version 1.2 or -any later version published by the Free Software Foundation; with no Invariant -Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license -is included in the section entitled -[[GNU Free Documentation License|/fdl]]."]]"""]] - -The `pfinet` server is a hacked Linux internet implementation with a glue layer -translating between the Hurd [[RPC]]s and the middle layer of the Linux -implementation. diff --git a/hurd/translator/pfinet/ipv6.mdwn b/hurd/translator/pfinet/ipv6.mdwn deleted file mode 100644 index 5afee0c6..00000000 --- a/hurd/translator/pfinet/ipv6.mdwn +++ /dev/null @@ -1,57 +0,0 @@ -[[!meta copyright="Copyright © 2007, 2008, 2010 Free Software Foundation, -Inc."]] - -[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable -id="license" text="Permission is granted to copy, distribute and/or modify this -document under the terms of the GNU Free Documentation License, Version 1.2 or -any later version published by the Free Software Foundation; with no Invariant -Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license -is included in the section entitled -[[GNU Free Documentation License|/fdl]]."]]"""]] - -[[Stefan_Siegl|stesie]] has added IPv6 support to the pfinet [[translator]]. -This was [Savannah task #5470](http://savannah.gnu.org/task/?5470). - - -# Implementation - -Because the IPv4 and IPv6 protocols are -quite related to each other (think of mapped IPv4 addresses, etc.), there is no -separate [[server|translator]] for IPv6 but support for the latter has been -incorporated into the common pfinet. Unfortunately it's a little bit clumsy -now to set the [[translator]] up, since it has to be bound to -*/servers/socket/2* (like before) as well as */servers/socket/26* (for IPv6). - -To achieve this, you can tell pfinet to install [[active_translators|active]] -on specified nodes, using **-4** and **-6** options. This is, you have to -install a [[passive_translator|passive]] on */servers/socket/2* that also binds -the IPv6 port and vice versa. - - -# Examples - -Normal IPv4 network setup, address 192.168.7.23/24 and gateway 192.168.7.1. -IPv6 address shall be assigned using IPv6 auto-configuration. - - settrans -fgp /servers/socket/2 ↩ - /hurd/pfinet -6 /servers/socket/26 ↩ - -i eth0 -a 192.168.7.23 -m 255.255.255.0 -g 192.168.7.1 - settrans -fgp /servers/socket/26 ↩ - /hurd/pfinet -4 /servers/socket/2 ↩ - -i eth0 -a 192.168.7.23 -m 255.255.255.0 -g 192.168.7.1 - -Quite the same, but with static IPv6 address assignment: - - settrans -fgp /servers/socket/2 ↩ - /hurd/pfinet -6 /servers/socket/26 ↩ - -i eth0 -a 192.168.7.23 -m 255.255.255.0 -g 192.168.7.1 ↩ - -A 2001:4b88:10e4:0:216:3eff:feff:4223/64 -G 2001:4b88:10e4::1 - settrans -fgp /servers/socket/26 ↩ - /hurd/pfinet -4 /servers/socket/2 ↩ - -i eth0 -a 192.168.7.23 -m 255.255.255.0 -g 192.168.7.1 ↩ - -A 2001:4b88:10e4:0:216:3eff:feff:4223/64 -G 2001:4b88:10e4::1 - - -# Missing Functionality - -Amongst other things, support for [[IOCTL]]s is missing. diff --git a/hurd/translator/pflocal.mdwn b/hurd/translator/pflocal.mdwn deleted file mode 100644 index dc2434dc..00000000 --- a/hurd/translator/pflocal.mdwn +++ /dev/null @@ -1,13 +0,0 @@ -[[!meta copyright="Copyright © 2000, 2008 Free Software Foundation, Inc."]] - -[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable -id="license" text="Permission is granted to copy, distribute and/or modify this -document under the terms of the GNU Free Documentation License, Version 1.2 or -any later version published by the Free Software Foundation; with no Invariant -Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license -is included in the section entitled -[[GNU Free Documentation License|/fdl]]."]]"""]] - -The implementation of the `pflocal` server is in the `pflocal` directory, and -uses [[`libpipe`|libpipe]] (shared code with the [[named_pipe|fifo]] -implementation). diff --git a/hurd/translator/procfs.mdwn b/hurd/translator/procfs.mdwn deleted file mode 100644 index 70448e94..00000000 --- a/hurd/translator/procfs.mdwn +++ /dev/null @@ -1,35 +0,0 @@ -[[!meta copyright="Copyright © 2008, 2009, 2010 Free Software Foundation, -Inc."]] - -[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable -id="license" text="Permission is granted to copy, distribute and/or modify this -document under the terms of the GNU Free Documentation License, Version 1.2 or -any later version published by the Free Software Foundation; with no Invariant -Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license -is included in the section entitled -[[GNU Free Documentation License|/fdl]]."]]"""]] - -Although there is no standard (POSIX or other) for the layout of the `/proc` -pseudo-filesystem, it turned out a very useful facility in GNU/Linux and other -systems, and many tools concerned with process management use it. (`ps`, `top`, -`htop`, `gtop`, `killall`, `pkill`, ...) - -Instead of porting all these tools to use [[libps]] (Hurd's official method for -accessing process information), they could be made to run out of the box, by -implementing a Linux-compatible `/proc` filesystem for the Hurd. - -The goal is to implement all `/proc` functionality needed for the various process -management tools to work. (On Linux, the `/proc` filesystem is used also for -debugging purposes; but this is highly system-specific anyways, so there is -probably no point in trying to duplicate this functionality as well...) - -*Status*: Madhusudan.C.S has implemented a new, fully functional [[procfs|madhusudancs]] for -[[GSoC 2008|community/gsoc/2008]]. - - -# [[New Implementation by Jérémie Koenig|jkoenig]] - - -# Old Implementation from [[open_issues/HurdExtras]] - -<http://www.nongnu.org/hurdextras/#procfs> diff --git a/hurd/translator/procfs/jkoenig.mdwn b/hurd/translator/procfs/jkoenig.mdwn deleted file mode 100644 index 9543b658..00000000 --- a/hurd/translator/procfs/jkoenig.mdwn +++ /dev/null @@ -1,23 +0,0 @@ -[[!meta copyright="Copyright © 2010, 2011 Free Software Foundation, Inc."]] - -[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable -id="license" text="Permission is granted to copy, distribute and/or modify this -document under the terms of the GNU Free Documentation License, Version 1.2 or -any later version published by the Free Software Foundation; with no Invariant -Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license -is included in the section entitled [[GNU Free Documentation -License|/fdl]]."]]"""]] - -In August 2010, Jérémie Koenig [published another, new -version](http://lists.gnu.org/archive/html/bug-hurd/2010-08/msg00165.html). -This can be found in <http://git.savannah.gnu.org/cgit/hurd/procfs.git/>, -branch *jkoenig/master*. - -Testing it is as simple as this: - - $ git clone git://git.savannah.gnu.org/hurd/procfs.git - $ cd procfs/ - $ git checkout jkoenig/master - $ make - $ settrans -ca proc procfs --compatible - $ ls -l proc/ diff --git a/hurd/translator/procfs/jkoenig/discussion.mdwn b/hurd/translator/procfs/jkoenig/discussion.mdwn deleted file mode 100644 index 339fab50..00000000 --- a/hurd/translator/procfs/jkoenig/discussion.mdwn +++ /dev/null @@ -1,220 +0,0 @@ -[[!meta copyright="Copyright © 2010, 2011 Free Software Foundation, Inc."]] - -[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable -id="license" text="Permission is granted to copy, distribute and/or modify this -document under the terms of the GNU Free Documentation License, Version 1.2 or -any later version published by the Free Software Foundation; with no Invariant -Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license -is included in the section entitled [[GNU Free Documentation -License|/fdl]]."]]"""]] - -[[!tag open_issue_hurd]] - -[[!toc]] - - -# Miscellaneous - -IRC, #hurd, around September 2010 - - <youpi> jkoenig: from a quick read, your procfs implementation seems quite - simple, probably much more what I was expecting from Madhusudan (who - probably now hates you :) ) - <youpi> jkoenig: is it not possible to provide a /proc/self which points at - the client's pid? - <pinotree> (also, shouldn't /proc/version say something else than "Linux"?) - <youpi> to make linux tools work, no :/ - <youpi> kfreebsd does that too - <pinotree> really? - <youpi> yes - <youpi> (kfreebsd, not freebsd) - <pinotree> does kbsd's one print just "Linux version x.y.z" too, or - something more eg in a second line? - <pinotree> (as curiosity) - <youpi> % cat /proc/version - <youpi> Linux version 2.6.16 (des@freebsd.org) (gcc version 4.3.5) #4 Sun - Dec 18 04:30:00 CET 1977 - <pinotree> k - <giselher> I had some problems with killall5 to read the pid from /proc, Is - this now more reliable? - <youpi> I haven't tested with jkoenig's implementation - [...] - <pinotree> looks like he did 'self' too, see rootdir_entries[] in rootdir.c - <youpi> but it doesn't point at self - <antrik> youpi: there is no way to provide /proc/self, because the server - doesn't know the identity of the client - <youpi> :/ - <antrik> youpi: using the existing mechanisms, we would need another magic - lookup type - <antrik> an alternative idea I discussed with cfhammer once would be for - the client to voluntarily provide it's identity to the server... but that - would be a rather fundamental change that requires careful consideration - <antrik> also, object migration could be used, so the implementation would - be provided by the server, but the execution would happen in the - client... but that's even more involved :-) - <youpi> but we've seen how much that'd help with a lot of other stuff - <antrik> I'm not sure whether we discussed this on the ML at some point, or - only on IRC - <youpi> it "just" needs to be commited :) - <antrik> in either case, it can't hurt to bring this up again :-) - - -# root group - -IRC, #hurd, around October 2010 - - <pinotree> the only glitch is that files/dirs have the right user as - owner, but always with root group - - -# `/proc/$pid/stat` being 400 and not 444, and some more - -IRC, freenode, #hurd, 2011-03-27 - - <pochu> is there a reason for /proc/$pid/stat to be 400 and not 444 like on - Linux? - <pochu> there is an option to procfs to make it 444 like Linux - <pochu> jkoenig: ^ - <jkoenig> pochu, hi - <jkoenig> /proc/$pid/stat reveals information which is not usually - available on Hurd - <jkoenig> so I made it 400 by default to avoid leaking anything - <pochu> is there a security risk in providing that info? - <jkoenig> probably not so much, but it seemed like it's not really a - descision procfs should make - <jkoenig> I'm not sure which information we're speaking about, though, I - just remember the abstract reason. - <pochu> things like the pid, the memory, the priority, the state... - <pochu> sounds safe to expose - <jkoenig> also it's 0444 by default in "compatible" mode - <jkoenig> (which is necessary for the linux tools to work well) - <pochu> yeah I saw that :) - <pochu> my question is, should we change it to 0444 by default? if there - are no security risks and this improves compatibility, sounds like a good - thing to me - <pochu> we're already 'leaking' part of that info through e.g. ps - <jkoenig> I think /proc should be translated by /hurd/procfs --compatible - by default (I'm not sure whether it's already the case) - <jkoenig> also I'm not sure why hurd-ps is setuid root, rather than the - proc server being less paranoid, but maybe I'm missing something. - <pochu> jkoenig: it's not, at least not on Debian - <pochu> youpi: hi, what do you think about starting procfs with - --compatible by default? - <pochu> youpi: or changing /proc/$pid/stat to 0444 like on Linux - (--compatible does that among a few other things) - <youpi> I guess you need it for something? - <pochu> I'm porting libgtop :) - <youpi> k - <pochu> though I still think we should do this in procfs itself - <youpi> ymmv - <jkoenig> pochu, youpi, --compatible is also needed because mach's high - reported sysconf(_SC_CLK_TCK) makes some integers overflow (IIRC) - <youpi> agreed - <jkoenig> luckily, tools which use procfs usually try to detect the value - /proc uses rather than rely on CLK_TCK - <jkoenig> (so we can choose whatever reasonable value we want) - -IRC, freenode, #hurd, 2011-03-28 - - <antrik> jkoenig: does procfs expose any information that is not available - to everyone through the proc server?... - <antrik> also, why is --compatible not the default; or rather, why is there - even another mode? the whole point of procfs is compatibility... - <jkoenig> antrik, yes, through the <pid>/environ and (as mentionned above) - <pid>/stat files, but I've been careful to make these files readable only - to the process owner - <jkoenig> --compatible is not the default because it relaxes this paranoia - wrt. the stat file, and does not conform to the specification with regard - to clock tick counters - <antrik> what specification? - <jkoenig> the linux proc(5) manpage - <jkoenig> which says clock tick counters are in units of - 1/sysconf(_SC_CLK_TCK) - <antrik> so you are saying that there is some information that the Hurd - proc server doesn't expose to unprivileged processes, but linux /proc - does? - <jkoenig> yes - <antrik> that's odd. I wonder what the reasoning behind that could be - <antrik> but this information is available through Hurd ps? - <antrik> BTW, what exactly is _SC_CLK_TCK supposed to be? - <pinotree> jkoenig: hm, just tried with two random processes on linux - (2.6.32), and enrivon is 400 - <pinotree> (which makes sense, as you could have sensible informations eg - in http_proxy or other envvars) - <jkoenig> antrik, CLK_TCK is similar to HZ (maybe clock resolution instead - of time slices ?) - <jkoenig> sysconf(3) says "The number of clock ticks per second." - <jkoenig> antrik, I don't remember precisely what information this was, but - ps-hurd is setuid root. - <jkoenig> anyway, if you run procfs --compatible as a user and try to read - foo/1/stat, the result is an I/O error, which is the result of the proc - server denying access. - <antrik> but Linux /proc acutally uses HZ as the unit IIRC? or is - _SC_CLK_TCK=HZ on Linux?... - <jkoenig> I expect they're equal. - <jkoenig> in practice procps uses heuristics to guess what value /proc uses - (for compatibility purposes with older kernels) - <jkoenig> I don't think HZ is POSIX, while _SC_CLK_TCK is specifies as the - unit for (at least) the values returned by times() - <jkoenig> s/specifies/specified/ - <jkoenig> antrik, some the information is fetched directly from mach by - libps, and understandably, the proc server does not give the task port to - anyone who asks. - <antrik> well, as long as the information is exposed through ps, there is - no point in hiding it in procfs... - <antrik> and I'm aware of the crazy guessing in libproc... I was actually - mentoring the previous procfs implementation - <antrik> (though I never got around to look at his buggy code...) - <jkoenig> ok - -IRC, freenode, #hurd, 2011-07-22 - - <pinotree> hm, why /proc/$pid/stat is 600 instead of 644 of linux? - <jkoenig> pinotree, it reveals information which, while not that sensitive, - would not be available to users through the normal proc interface. - <jkoenig> (it's available through the ps command which is setuid root) - <jkoenig> we discussed at some point making it 644, IIRC. - <pinotree> hm, then why is it not a problem on eg linux? - <jkoenig> (btw you can change it with the -s option.) - <jkoenig> pinotree, it's not a problem because the information is not that - sensitive, but when rewriting procfs I preferred to play it self and - consider it's not procfs' job to decide what is sensitive or not. - <jkoenig> IIRC it's not sensitive but you need the task port to query it. - <jkoenig> like, thread times or something. - <pinotree> status is 644 though - <jkoenig> but status contains information which anyone can ask to the proc - server anyway, I think. - - -# `/proc/mounts`, `/proc/$pid/mounts` - -IRC, freenode, #hurd, 2011-07-25 - - < pinotree> jkoenig: btw, what do you think about providing empty - /proc/mounts and /proc/$pid/mounts files? - < jkoenig> pinotree, I guess one would have to evaluate the consequences - wrt. existing use cases (in other words, "I have absolutely no clue - whatsoever about whether that would be desirable" :-) - < jkoenig> pinotree, the thing is, an error message like "/proc/mounts: No - such file or directory" is rather explicit, whereas errors which would be - caused by missing data in /proc/mounts would maybe be harder to track - < braunr> this seems reasonable though - < braunr> there already are many servers with e.g. grsecurity or chrooted - environments where mounts is empty - < pinotree> well, currently we also have an empty mtab - < braunr> pinotree: but what do you need that for ? - < braunr> pinotree: the init system ? - < pinotree> and the mnt C api already returns no entries (or it bails out, - i don't remember) - < pinotree> not a strict need - - -# `/proc/[PID]/auxv`, `/proc/[PID]/exe`, `/proc/[PID]/mem` - -Needed by glibc's `pldd` tool (commit -11988f8f9656042c3dfd9002ac85dff33173b9bd). - - -# `/proc/self/exe` - -[[!message-id "alpine.LFD.2.02.1110111111260.2016@akari"]] diff --git a/hurd/translator/random.mdwn b/hurd/translator/random.mdwn deleted file mode 100644 index afb76953..00000000 --- a/hurd/translator/random.mdwn +++ /dev/null @@ -1,70 +0,0 @@ -[Savannah task #5130: random translator](http://savannah.gnu.org/task/?5130) - -See the attached [[mbox.bz2]] containing all the emails concerning this topic -which I was able to gather from public archives. (!) This is not up-to-date -anymore, as [[MichaelCasadevall]] is currently working on this. - -# Description - -Sources of entropy are for example disk access latencies or keystroke patterns -or behavior on networks. This suggests that for implementing a random -translator a kernel part is needed as well, to gather that entropy. That -kernel part would then export the gathered entropy via a kernel device, named -perhaps `entropy`. - -# Setup Pseudo Random Devices - -Stuck getting SSH to work? You need a pseudo random generator (PRG). - -There are several solutions to the lack of `/dev/random` and `/dev/urandom`, -but they are not yet in the default installation. - -* Marcus' work can be downloaded at - [random.tar.gz](ftp://alpha.gnu.org/gnu/hurd/contrib/marcus/random.tar.gz). - (Identical to <http://kilobug.free.fr/hurd/random-64.tar.gz>?) - * [A patch](http://mail.gnu.org/pipermail/bug-hurd/2002-August/010248.html) - that was probably already incorporated from August 14, 2002. - * Clemmitt Sigler [reported - success](http://lists.gnu.org/archive/html/help-hurd/2002-10/msg00076.html) - October 11, 2002 and Marcus [described - some](http://lists.gnu.org/archive/html/help-hurd/2002-10/msg00081.html) of - the internals. - -* [Entropy Gathering Daemon](http://egd.sourceforge.net/). - * [request for packaging](http://bugs.debian.org/145498). - -* [OSKit Entropy - Patch](http://lists.gnu.org/archive/html/bug-hurd/2003-01/msg00000.html) from - Derek Davies - Jan 2003. - * See also [this page](http://www.ddavies.net/oskit-entropy/). - * Note that this patch can (and should) be used with this [OSKit NIC - patch](ftp://flux.cs.utah.edu/flux/oskit/mail/html/oskit-users/msg01570.html). - -* [Sune Kirkeby's incomplete port of the Linux /dev/\{,u\}random device - driver](http://ibofobi.dk/stuff/hurd-entropy/) - * [The files](http://download.ibofobi.dk/hurd-entropy/), including a [patch - for GNU - Mach](http://download.ibofobi.dk/hurd-entropy/gnumach-entropy.diff.bz2). - -* Quick and dirty way: - - sudo cp /bin/bash /dev/random - sudo ln -s random /dev/urandom - ---- - -# Setup Tips - -Here are some tips on how to actually setup the two random devices using -Kilobugs' [random-64 server](http://kilobug.free.fr/hurd/random-64.tar.gz). -His tarball is a complete Hurd server including a pre-built binary - so you -don't need GCC or magic fingers for this! :) - -After untaring the package you copy the random binary to the `/hurd` -directory. Then you setup the translators for random and urandom. - - # settrans -c /dev/random /hurd/random \ - --seed-file /var/run/random-seed --secure - # settrans -c /dev/urandom /hurd/random \ - --seed-file /var/run/urandom-seed --fast - # chmod 0644 /dev/random /dev/urandom diff --git a/hurd/translator/random/mbox.bz2 b/hurd/translator/random/mbox.bz2 Binary files differdeleted file mode 100644 index a9a4d4a6..00000000 --- a/hurd/translator/random/mbox.bz2 +++ /dev/null diff --git a/hurd/translator/short-circuiting.mdwn b/hurd/translator/short-circuiting.mdwn deleted file mode 100644 index 9de9f7b8..00000000 --- a/hurd/translator/short-circuiting.mdwn +++ /dev/null @@ -1,88 +0,0 @@ -[[!meta copyright="Copyright © 2009 Free Software Foundation, Inc."]] - -[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable -id="license" text="Permission is granted to copy, distribute and/or modify this -document under the terms of the GNU Free Documentation License, Version 1.2 or -any later version published by the Free Software Foundation; with no Invariant -Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license -is included in the section entitled -[[GNU Free Documentation License|/fdl]]."]]"""]] - -In traditional [[Unix]], file systems contain special files. These are: -symbolic links, character devices, block devices, named pipes, and -named sockets. Naturally the Hurd also support these. - -However, if you take a look at `hurd/io.defs` and `hurd/fs.defs`, you'll -find that there are no [[RPC]]s that deal specifically with these types. -Sure, you can get the type of the file through `io_stat` (among other -things), but there are none that e.g. lets you create a symbolic link. - -If you take a look at how [[glibc]] implements `symlink`, you'll notice -that all it does is create a new file and set its passive translator to -`/hurd/symlink DEST`. You can verify this yourself by creating a symlink -with `ln -s foo bar` and print its passive translator setting with `showtrans -bar`. - -This is how the other special files are implemented as well. The header -`hurd/paths.h` contains a list of paths that are used to implement -special files: - - * `/hurd/symlink` - * `/hurd/chrdev` - * `/hurd/blkdev` - * `/hurd/fifo` - * `/hurd/ifsock` - -So all special files are implemented through special-purpose translators, -right? Not quite, instead the translators of this list are often -implemented in their underlying filesystem through *translator -short-circuiting*. In fact, `chrdev` and `blkdev` aren't even implemented -as translators at all. - -Translator short-circuiting is when a file system server implements the -functionality of a passive translator itself, instead of actually starting -it. For instance, all the [[`symlink`|symlink]] translator does is return -a `FS_RETRY_*` reply to the caller. So instead of starting it, the file -system server can simply continue the file name look-up internally by -appending the target of the symbolic link to the path being looked-up. - -This way, we can skip starting the `symlink` translator, skip retrying -the look-up on the newly started translator, and we might also skip a -retry to the same file system server again, if the target of the symbolic -link is in it. - -In fact, the list's translators that actually are implemented (`symlink`, -`fifo`, `ifsock`) are only used as a default implementation if the underlying -file system's translator does not implement the functionality itself, i.e., if -it doesn't short-circuit it. - -To make sure that you use one of these translators, there by bypassing the -short-circuiting mechanism, you can either start it as -an active translator, or use a different path from the one in -`hurd/path.h`, e.g. `settrans bar /hurd/./symlink foo`. - -The best example of how short-circuiting is implemented can be found -in [[`libdiskfs`|libdiskfs]]. Notice how it detects if a translator to store -is a special file in `diskfs_S_file_set_translator` and instead -of storing a real passive translator setting on the disk, stores it as a -symlink node (using `diskfs_create_symlink_hook` or a generic implementation). - -In later look-ups to the node, it checks the node's `stat` structure in -`diskfs_S_file_get_translator`, or -`diskfs_S_dir_lookup` and handles special file types appropriately. - -Doing this translator short-circuiting has disadvantages: code duplication, or -in general adding code complexity that isn't needed for implementing the same -functionality, but it also has advantages: using functionality that the file -system's data structures nevertheless already provide -- storing symbolic links -in `ext2fs`' inodes instead of storing passive translator settings -- and thus -staying compatible with other operating systems mounting that file system. - -Also, this short-circuiting does preserve system resources, as it's no longer -required to start a `symlink` translator for resolving each symbolic link, as -well as it does reduce the [[RPC]] overhead. - -It can also confuse users who expect the passive translator to start. -For instance, if a user notices that [[`symlink`|symlink]]'s code is -lacking some functionality, but that it unexpectedly works when the user -tries to run it. diff --git a/hurd/translator/storeio.mdwn b/hurd/translator/storeio.mdwn deleted file mode 100644 index 8e26a959..00000000 --- a/hurd/translator/storeio.mdwn +++ /dev/null @@ -1,30 +0,0 @@ -[[!meta copyright="Copyright © 2007, 2008 Free Software Foundation, Inc."]] - -[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable -id="license" text="Permission is granted to copy, distribute and/or modify this -document under the terms of the GNU Free Documentation License, Version 1.2 or -any later version published by the Free Software Foundation; with no Invariant -Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license -is included in the section entitled -[[GNU Free Documentation License|/fdl]]."]]"""]] - -`storeio` is a *translator for devices and other stores*. - -It relies heavily on [[libstore]]. - - -# Examples - -You can make a file's content available as some block device (where `foo` is -the name of the file to map): - - settrans -ca node /hurd/storeio -T file foo - -You can even `ungzip` files on the fly (`bunzip2` is available as well): - - settrans -ca node /hurd/storeio -T gunzip foo.gz - -You can use the *typed store*, to create filter chains (of course this example -is kind of useless since you could use the `gunzip` store directly): - - settrans -ca node /hurd/storeio -T typed gunzip:file:foo.gz diff --git a/hurd/translator/storeio/discussion.mdwn b/hurd/translator/storeio/discussion.mdwn deleted file mode 100644 index 0766e0af..00000000 --- a/hurd/translator/storeio/discussion.mdwn +++ /dev/null @@ -1,16 +0,0 @@ -[[!meta copyright="Copyright © 2011, 2012 Free Software Foundation, Inc."]] - -[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable -id="license" text="Permission is granted to copy, distribute and/or modify this -document under the terms of the GNU Free Documentation License, Version 1.2 or -any later version published by the Free Software Foundation; with no Invariant -Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license -is included in the section entitled [[GNU Free Documentation -License|/fdl]]."]]"""]] - -[[!tag open_issue_hurd]] - -IRC, freenode, #hurd, 2011-11-10: - - <pinotree> hm, is it normal that st_rdev for storeio translators of - /dev/hd* devices is 0? diff --git a/hurd/translator/stowfs.mdwn b/hurd/translator/stowfs.mdwn deleted file mode 100644 index 9c88f1a3..00000000 --- a/hurd/translator/stowfs.mdwn +++ /dev/null @@ -1,11 +0,0 @@ -[[!meta copyright="Copyright © 2008 Free Software Foundation, Inc."]] - -[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable -id="license" text="Permission is granted to copy, distribute and/or modify this -document under the terms of the GNU Free Documentation License, Version 1.2 or -any later version published by the Free Software Foundation; with no Invariant -Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license -is included in the section entitled -[[GNU Free Documentation License|/fdl]]."]]"""]] - -[[!meta redir=unionfs#stowfs]] diff --git a/hurd/translator/tarfs.mdwn b/hurd/translator/tarfs.mdwn deleted file mode 100644 index e25e3255..00000000 --- a/hurd/translator/tarfs.mdwn +++ /dev/null @@ -1,25 +0,0 @@ -[[!meta copyright="Copyright © 2010 Free Software Foundation, Inc."]] - -[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable -id="license" text="Permission is granted to copy, distribute and/or modify this -document under the terms of the GNU Free Documentation License, Version 1.2 or -any later version published by the Free Software Foundation; with no Invariant -Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license -is included in the section entitled [[GNU Free Documentation -License|/fdl]]."]]"""]] - -`tarfs` is a translator aimed at providing access to tar files through the -filesystem. This way you don't have to extract files from the archive to -access them. It supports compressed archives (bzip2 and gzip) through -[[libstore]]. - - -# Status - -Works fine on most cases, occasional corruptions when writing using bzip2/gzip -stores. - - -# Source - -incubator, tarfs/master diff --git a/hurd/translator/tmpfs.mdwn b/hurd/translator/tmpfs.mdwn deleted file mode 100644 index 626fad86..00000000 --- a/hurd/translator/tmpfs.mdwn +++ /dev/null @@ -1,24 +0,0 @@ -[[!meta copyright="Copyright © 2007, 2008, 2009, 2011 Free Software Foundation, -Inc."]] - -[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable -id="license" text="Permission is granted to copy, distribute and/or modify this -document under the terms of the GNU Free Documentation License, Version 1.2 or -any later version published by the Free Software Foundation; with no Invariant -Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license -is included in the section entitled [[GNU Free Documentation -License|/fdl]]."]]"""]] - -`tmpfs` is a file system server for temporary data storage without using a real -(permanent) [[backing_store]]. - -It is based on [[libdiskfs]]. - -Even though there are other possibilities of creating a -[[libstore/examples/ramdisk]] and running a regular, for example, [[`ext2` file -system|ext2fs]] on it, having a real `tmpfs` is better, as it need not deal -with the additional block-level indirection layer that `ext2` (or any other -disk-based file system) imposes. - -However, `tmpfs` is not working correctly at the moment, see the [[discussion]] -sub-pages. There is a [[!FF_project 271]][[!tag bounty]] on this task. diff --git a/hurd/translator/tmpfs/discussion.mdwn b/hurd/translator/tmpfs/discussion.mdwn deleted file mode 100644 index bdee0f78..00000000 --- a/hurd/translator/tmpfs/discussion.mdwn +++ /dev/null @@ -1,430 +0,0 @@ -[[!meta copyright="Copyright © 2011, 2012 Free Software Foundation, Inc."]] - -[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable -id="license" text="Permission is granted to copy, distribute and/or modify this -document under the terms of the GNU Free Documentation License, Version 1.2 or -any later version published by the Free Software Foundation; with no Invariant -Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license -is included in the section entitled [[GNU Free Documentation -License|/fdl]]."]]"""]] - -[[!tag open_issue_hurd]] - - * [[notes_bing]] - - * [[notes_various]] - - * [[tmpfs_vs_defpager]] - - * [[!GNU_Savannah_bug 26751]] - - * [[!GNU_Savannah_bug 32755]] - - -# [[Maksym_Planeta]] - -## IRC, freenode, #hurd, 2011-11-29 - - <mcsim> Hello. In seqno_memory_object_data_request I call - memory_object_data_supply and supply one zero filled page, but seems that - kernel ignores this call because this page stays filled in specified - memory object. In what cases kernel may ignore this call? It is written - in documentation that "kernel prohibits the overwriting of live data - pages". But when I called memory_object_lock_request on this page with - should flush and MEMORY_OBJECT_RETURN_ALL nothing change - <braunr> what are you trying to do ? - <mcsim> I think that memory object holds wrong data, so I'm trying to - replace them. This happens when file is truncated, so I should notify - memory object that there is no some data. But since gnumach works only - with sizes that are multiple of vm_page_size, I should manually correct - last page for case when file size isn't multiple of vm_page_size. This is - needed for case when file grows again and that tail of last page, which - wasn't part of file should be filled wit - <mcsim> I've put some printf's in kernel and it seems that page that holds - data which I want replace both absent and busy: - <mcsim> m = vm_page_lookup(object,offset); - <mcsim> ... - <mcsim> if (m->absent && m->busy) { <-- Condition is true - <mcsim> in vm/memory_object.c:169 - <slpz> mcsim: Receiving m_o_data_request means there's no page in the - memory object at that offset, so m_o_data_supply should work - <slpz> are you sure that page is not being installed into the memory - object? - <braunr> it seems normal it's both absent and busy - <braunr> absent because, as sergio said, the page is missing, and busy - because the kernel starts a transfer for its content - <braunr> i don't understand how you determine the kernel ignores your - data_supply - <braunr> "because this page stays filled in specified memory object" - <braunr> please explain this with more detail - <slpz> mcsim: anyway, when truncating a file to a non page-aligned length, - you can just zero fill the rest of the page by mapping the object and - writing to it with memset/bzero - <braunr> (avoid bzero, it's obsolete) - <mcsim> slpz: I'll try try it now. - <braunr> slpz: i think that's what he's trying to do - <mcsim> I don't vm_map it - <braunr> how do you zero it then ? - <braunr> "I call memory_object_data_supply and supply one zero filled page" - <mcsim> First I call mo_lock_request and ask to return this page, than I - memset tail and try to mo_data_supply - <mcsim> I use this function when I try to replace kr = - memory_object_data_supply(reply_to, offset, addr, vm_page_size, FALSE, - VM_PROT_NONE, FALSE, MACH_PORT_NULL); - <mcsim> where addr points to new data, offset points to old data in - object. and reply_to is memory_control which I get as parameter in - mo_data_request - <braunr> why would you want to vm_map it then ? - <mcsim> because mo_data_supply doesn't work. - <braunr> mcsim: i still don't see why you want to vm_map - <mcsim> I just want to try it. - <braunr> but what do you think will happen ? - <mcsim> But seems that it doesn't work too, because I can't vm_map - memory_object from memory_manager of this object. - - -## IRC, freenode, #hurd, 2012-01-05 - - <mcsim> Seems tmpfs works now. The code really needs cleaning, but the main - is that it works. So in nearest future it will be ready for merging to - master branch. BTW, anyone knows good tutorial about refactoring using - git (I have a lot of pointless commits and I want to gather and scatter - them to sensible ones). - <antrik> I wonder whether he actually got the "proper" tmpfs with the - defaul pager working? or only the hack with a private pager? - <mcsim> antrik: with default pager - <antrik> mcsim: wow, that's great :-) - <antrik> how did you fix it? - <mcsim> antrik: The main code I wrote before December, so I forgot some of - what exactly I were doing. So I have to look through my code :) - <mcsim> antrik: old defpager was using old functions like m_o_data_write - instead of m_o_data_return etc. I changed it, mostly because of my - misunderstanding. But I hope that this is not a problem. - - -## IRC, freenode, #hurd, 2012-01-18 - - <antrik> mcsim: did you publish your in-progress work? - <mcsim> there is a branch with working tmpfs in git repository: - http://git.savannah.gnu.org/cgit/hurd/hurd.git/log/?h=mplaneta/tmpfs/defpager - <jd823592> sorry for interrupting the meeting but i wonder what is a - lazyfs? - <mcsim> jd823592: lazyfs is tmpfs which uses own pager - <antrik> mcsim: ah, nice :-) - <antrik> BTW, what exactly did you need to fix to make it work? - <mcsim> most fixes wore in defpager in default_pager_object_set_size. Also, - as i said earlier, I switched to new functions (m_o_data_return instead - of m_o_data_write and so on). I said that this was mostly because of my - misunderstanding, but it turned out that new function provide work with - precious attribute of page. - <mcsim> Also there were some small errors like this: - <mcsim> pager->map = (dp_map_t) kalloc (PAGEMAP_SIZE (new_size)); - <mcsim> memcpy (pager->map, old_mapptr, PAGEMAP_SIZE (old_size)); - <mcsim> where in second line should be new_size too - <mcsim> I removed all warnings in compiling defpager (and this helped to - find an error). - <antrik> great work :-) - <jd823592> tmpfs is nice thing to have :), are there other recent - improvements that were not yet published in previous moth? - <mcsim> BTW, i measured tmpfs speed in it is up to 6 times faster than - ramdisk+ext2fs - <antrik> mcsim: whow, that's quite a difference... didn't expect that - - -## IRC, freenode, #hurd, 2012-01-24 - - <mcsim> braunr: I'm just wondering is there any messages before hurd - breaks. I have quite strange message: memory_object_data_request(0x0, - 0x0, 0xf000, 0x1000, 0x1) failed, 10000003 - <braunr> hm i don't think so - <braunr> usually it either freezes completely, or it panics because of an - exhausted resource - <mcsim> where first and second 0x0 are pager and pager_request for memory - object in vm_fault_page from gnumach/vm_fault.c - <braunr> if you're using the code you're currently working on (which i - assume), then look for a bug there first - <tschwinge> mcsim: Maybe you're running out of swap? - <mcsim> tschwinge: no - <braunr> also, translate the error code - <mcsim> AFAIR that's MACH_INVALID_DEST - <braunr> and what does it mean in this situation ? - <mcsim> I've run fsx as long as possible several times. It runs quite long - but it breaks in different ways. - <mcsim> MACH_SEND_INVALID_DEST - <mcsim> this means that kernel tries to call rpc with pager 0x0 - <mcsim> this is invalid destiantion - <braunr> null port - <braunr> ok - <braunr> did the pager die ? - <mcsim> When I get this message pager dies, but also computer can suddenly - reboot - <braunr> i guess the pager crashing makes mach print this error - <braunr> but then you may have a dead port instead of a null port, i don't - remember the details - <mcsim> braunr: thank you. - <mcsim> btw, for big file sizes fsx breaks on ext2fs - <braunr> could you identify the threshold ? - <braunr> and what's fsx exactly ? - <mcsim> fsx is a testing utility for filesystems - <mcsim> see http://codemonkey.org.uk/projects/fsx/ - <braunr> ah, written by tevanian - <mcsim> threshold seems to be 8Mb - <braunr> fyi, avadis tevanian is the main author of the mach 3 core - services and VM parts - <braunr> well, ext2fs is bugged, we already know that - <braunr> old code maintained as well as possible, but still - <mcsim> hmm, with 6mb it breaks too - <braunr> i guess that it may break on anything larger than a page actually - :p - <mcsim> When I tested with size of 256kb, fsx worked quite long and didn't - break - <braunr> mcsim: without knowing exactly what the test actually does, it's - hard to tell - <mcsim> I see, I just wanted to tell that there are bugs in ext2fs too. But - I didn't debugged it. - <mcsim> fsx performs different operations, like read, write, truncate file, - grow file in random order. - <braunr> in parellel too ? - <braunr> parellel - <braunr> parallel* - <mcsim> no - <mcsim> I run several fsx's parallel on tmpfs, but they break on file with - size 8mb. - <braunr> that must match something in mach - <braunr> s/must/could/ :) - <mcsim> braunr: I've pushed my commits to mplaneta/tmpfs/master branch in - hurd repository, so you could review it. - <braunr> you shouldn't do that just for me :p - <braunr> you should do that regularly, and ask for reviews after - (e.g. during the meetings) - <mcsim> everyone could do that :) - <braunr> i'm quite busy currently unfortunately - <braunr> i'll try when i have time, but the best would be to ask very - specific questions - <braunr> these are usually the faster to answer for people ho have the - necessary expertise to help you - <braunr> fastest* - <mcsim> ok. - <mcsim> braunr: probably, I was doing something wrong, because now parallel - works only for small sizes. Sorry, for disinformation. - - -### IRC, freenode, #hurd, 2012-01-25 - - <antrik> braunr: actually, the paging errors are *precisely* the way my - system tends to die... - <antrik> (it's after about a month of uptime usually though, not a week...) - <antrik> tschwinge: in my case at least, I have still plenty of swap when - this happens. swap usage is generally at about the amount of physical - memory -- I have no idea though whether there is an actual connection, or - it's just coincidence - <braunr> antrik: ok, your hurd dies because of memory issues, my virtual - machines die because of something else (though idk what) - <antrik> before I aquired the habit of running my box 24/7 and thus hitting - this issue, most of the hangs I experienced were also of a different - nature... but very rare in general, except when doing specific - problematic actions - <mcsim> antrik: yes. Do you get messages like that I posted? - <mcsim> here is it: memory_object_data_request(0x0, 0x0, 0xf000, 0x1000, - 0x1) failed, 10000003 - <antrik> mcsim: I can't tell for sure (never noted them down, silly me...) - <antrik> but I definitely get paging errors right before it hangs - <antrik> I guess that was unclear... what I'm trying to say is: I do get - memory_object_data_request() failed; but I'm not sure about the - parameters - <mcsim> antrik: ok. Thank you. - <mcsim> I'll try to find something in defpager, but there should be errors - in mach too. At least because sometimes computer suddenly reboots during - test. - <antrik> mcsim: I don't get sudden reboots - <antrik> might be a different error - <antrik> do you have debugging mode activated in Mach? otherwise it reboots - on kernel panics... - <mcsim> antrik: no. But usually on kernel panics mach waits for some time - showing the error message and only than reboots. - <antrik> OK - <mcsim> how can I know that tmpfs is stable enough? Correcting errors in - kernel to make fsx test work seems to be very complex. - <mcsim> *If errors are in kernel. - <antrik> well, it seems that you tested it already much more thoroughly - than any other code in the Hurd was ever tested :-) - <antrik> of course it would be great if you could pinpoint some of the - problems you see nevertheless :-) - <antrik> but that's not really necessary before declaring tmpfs good enough - I'd say - <mcsim> ok. I'll describe every error I meet on my userpage - <mcsim> but it will take some time, not before weekend. - <antrik> don't worry, it's not urgent - <antrik> the reason I'd really love to see those errors investigated is - that most likely they are the same ones that cause stability problems in - actual use... - <antrik> having an easy method for reproducing them is already a good start - <mcsim> no. they are not the same - <mcsim> every time i get different one - <mcsim> especially when i just start one process fsx and wait error - <antrik> mcsim: have you watched memory stats while running it? if it's - related to the problems I'm experiencing, you will probably see rising - memory use while the test is running - <mcsim> it could be reboot, message, I posted and also fsx could stop - telling that something wrong with data - <antrik> you get all of these also on ext2? - <mcsim> i've done it only once. Here is the log: - http://paste.debian.net/153511/ - <mcsim> I saved "free" output every 30 seconds - <mcsim> no. I'll do it now - <antrik> would be better to log with "vmstat 1" - <mcsim> ok. - <mcsim> as you can see, there is now any leek during work. But near end - free memory suddenly decreases - <antrik> yeah... it's a bit odd, as there is a single large drop, but seems - stable again afterwards... - <antrik> a more detailed log might shed some light - <mcsim> drop at the beginning was when I started translator. - <mcsim> what kind of log do you mean? - <antrik> vmstat 1 I mean - <mcsim> ah... - - -## IRC, freenode, #hurd, 2012-02-01 - - <mcsim> I run fsx with this command: fsx -N3000 foo/bar -S4 - -l$((1024*1024*8)). And after 70 commands it breaks. - <mcsim> The strangeness is at address 0xc000 there is text, which was - printed in fsx with vfprintf - <mcsim> I've lost log. Wait a bit, while I generate new - <jkoenig_> mcsim, what's fsx / where can I find it ? - <mcsim> fsx is filesystem exersiser - <mcsim> http://codemonkey.org.uk/projects/fsx/ - <jkoenig_> ok thanks - <mcsim> i use it to test tmpfs - <mcsim> here is fsx that compiles on linux: http://paste.debian.net/154390/ - and Makefile for it: http://paste.debian.net/154392/ - <jkoenig_> mcsim, hmm, I get a failure with ext2fs too, is it expected? - <mcsim> yes - <mcsim> i'll show you logs with tmpfs. They slightly differ - <mcsim> here: http://paste.debian.net/154399/ - <mcsim> pre last operation is truncate - <mcsim> and last is read - <mcsim> during pre-last (or last) starting from address 0xa000, every - 0x1000 bytes appears text - <mcsim> skipping zero size read - <mcsim> skipping zero size read - <mcsim> truncating to largest ever: 0x705f4b - <mcsim> signal 2 - <mcsim> testcalls = 38 - <mcsim> this text is printed by fsx, by function prt - <mcsim> I've mistaken: this text appears even from every beginning - <mcsim> I know that this text appears exactly at this moment, because I - added check of the whole file after every step. And this error appeared - only after last truncation. - <mcsim> I think that the problem is in defpager (I'm fixing it), but I - don't understand where defpager could get this text - <jkoenig_> wow I get java code and debconf templates - <mcsim> So, my question is: is it possible for defpager to get somehow this - text? - <jkoenig_> possibly recycled, non-zeroed pages? - <mcsim> hmmm... probably you're right - <jkoenig_> 0x1000 bytes is consistent with the page size - <mcsim> Should I clean these pages in tmpfs? - <mcsim> or in defpager? - <mcsim> What is proper way? - <jkoenig_> mcsim, I'd say defpager should do it, to avoid leaking - information, I'm not sure though. - <jkoenig_> maybe tmpfs should also not assume the pages have been blanked - out. - <mcsim> if i do it in both, it could have big influence on performance. - <mcsim> i'll do it only in defpager so far. - <mcsim> jkoenig_: Thank you a lot - <jkoenig_> mcsim, no problem. - - -## IRC, freenode, #hurd, 2012-02-08 - - <tschwinge> mcsim: You pushed another branch with cleaned-up patches? - <mcsim> yes. - <tschwinge> mcsim: Anyway, any data from your report that we could be - interested in? (Though it's not in English.) - <mcsim> It's completely in ukrainian an and mostly describes some aspects - of hurd's work. - <tschwinge> mcsim: OK. So you ran out of time to do the benchmarking, - etc.? - <tschwinge> Comparing tmpfs to ext2fs with RAM backend, etc., I mean. - <mcsim> tschwinge: I made benchmarking and it turned out that tmpfs up to 6 - times faster than ext2fs - <mcsim> tschwinge: is it possible to have a review of work, I've already - done, even if parallel writing doesn't work? - <tschwinge> mcsim: Do you need this for university or just a general review - for inclusion in the Git master branch? - <mcsim> general review - <tschwinge> Will need to find someone who feels competent to do that... - <mcsim> the branch that should be checked is tmpfs-final - <pinotree> cool, i guess you tested also special types of files like - sockets and pipes? (they are used in eg /run, /var/run or similar) - <mcsim> Oh. I accidentally created this branch. It is my private - branch. I'll delete it now and merge everything to mplaneta/tmpfs/master - <mcsim> pinotree: Completely forgot about them :( I'll do it by all means - <pinotree> mcsim: no worries :) - <mcsim> tschwinge: Ready. The right branch is mplaneta/tmpfs/master - - -## IRC, freenode, #hurd, 2012-03-07 - - <pinotree> did you test it with sockets and pipes? - <mcsim> pinotree: pipes work and sockets seems to work too (I've created - new pfinet device for them and pinged it). - <pinotree> try with simple C apps - <mcsim> Anyway all these are just translators, so there shouldn't be any - problems. - <mcsim> pinotree: works - - -## IRC, freenode, #hurd, 2012-03-22 - - <mcsim> Hello. Is it normal that when i try to run du at directory where - translator is mounted it says that directory is 'Not a directory'? Here - are some examples with different filesystems: paste.debian.net/160699 - First is ramdisk+ext2fs, second is tmpfs, third is ext2fs. - <civodul> i can't reproduce the problem with ext2fs - <civodul> perhaps you can try rpctracing it to see where ENOTDIR comes from - <mcsim> civodul: when I run du io_stat_request ipc is called. But reply is - ((os/kern) invalid address). Where is server code for this ipc? I only - found its definition in defs file and that's all. - <civodul> mcsim: server code is in libdiskfs + ext2fs, for instance - <mcsim> civodul: Does io_stat_request have changed name in server code? I - just can't find it. Here are my grep results fore io_stat_request (i was - grepping in root of hurd repository: paste.debian.net/160708 - <youpi> remove _request - <youpi> it's just io_stat - <mcsim> youpi: thank you - - -## IRC, freenode, #hurd, 2012-04-08 - - <mcsim> youpi: I've corrected everything you said, and pushed code to new - branch mplaneta/tmpfs/master-v2 - <youpi> mcsim: all applied, thanks ! - <youpi> I'll probably test it a bit and upload a new version of hurd - <youpi> mcsim: it seems to be working fine indeed! - <mcsim> youpi: thank you for all your reviews, suggestions you gave and - corrections you made :) - <youpi> and it seems translators indeed work there too - <youpi> hopefully it'll work to run the debian installer - <youpi> that'd permit to solve memory consumption - <pinotree> (so tmpfs works really fine now? great!) - <youpi> I could reboot with tmpfs on /tmp and build a package there, yes - <mcsim> youpi: yes, I've compiled several packages already, but it does not - give big advantage in performance. - <youpi> I wasn't really looking for performance, but for correctness :) - <youpi> are you using writeback for your /, actually ? - <youpi> argl, /run gets triggered before mach-defpager is started - <youpi> the X11 socket works there too - <youpi> gnu_srs: might your mouse issue with Xorg be related with vnc usage - too? - <youpi> it seems ENOSPC works fine too - <mcsim> youpi: as to writeback. I think yes, because default pager is asked - to write data only when this data is evicted. - <youpi> I'm talking about kvm - <mcsim> youpi: I use real computer. - <youpi> ok - <youpi> but that indeed means writeback of ext2fs works, which is a good - sign :) diff --git a/hurd/translator/tmpfs/notes_bing.mdwn b/hurd/translator/tmpfs/notes_bing.mdwn deleted file mode 100644 index fa3eeac2..00000000 --- a/hurd/translator/tmpfs/notes_bing.mdwn +++ /dev/null @@ -1,101 +0,0 @@ -[[!meta copyright="Copyright © 2007, 2008, 2009 Free Software Foundation, -Inc."]] - -[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable -id="license" text="Permission is granted to copy, distribute and/or modify this -document under the terms of the GNU Free Documentation License, Version 1.2 or -any later version published by the Free Software Foundation; with no Invariant -Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license -is included in the section entitled -[[GNU Free Documentation License|/fdl]]."]]"""]] - -[[!tag open_issue_hurd]] - -1. to run tmpfs as a regular user, /servers/default-pager must be executable by - that user. by default it seems to be set to read/write. - - $ sudo chmod ugo+x /servers/default-pager - - Then I get this error: - - tmpfs: /build/mbanck/hurd-20060825/build-tree/hurd/tmpfs/dir.c:62: diskfs_get_directs: Assertion `__builtin_offsetof (struct tmpfs_dirent, name) >= __builtin_offsetof (struct dirent, d_name)' failed. - -2. i rearranged the struct tempfs_dirent in tmpfs.h to line up with the struct - dirent. now the assert passes at line 62 of dir.c passes. - - struct tmpfs_dirent - { - struct tmpfs_dirent *next; - struct disknode *dn; - + char padding[3]; - uint8_t namelen; - char name[0]; - }; - - now ls works on an empty directory. - you can touch files, and run `ls' on them. - mkdir, rmdir works too. - fsysopts works - df works - -3. creating a symlink fails. - - old patch to get symlinks working: - - http://www.mail-archive.com/bug-hurd@gnu.org/msg11844.html - --- node.c.orig 2005-07-24 09:56:39.000000000 -0400 - +++ node.c 2005-07-24 09:55:46.000000000 -0400 - @@ -330,6 +330,7 @@ - create_symlink_hook (struct node *np, const char *target) - { - assert (np->dn->u.lnk == 0); - + np->dn_stat.st_size = strlen (target); - if (np->dn_stat.st_size > 0) - { - const size_t size = np->dn_stat.st_size + 1; - @@ -337,6 +338,7 @@ - if (np->dn->u.lnk == 0) - return ENOSPC; - memcpy (np->dn->u.lnk, target, size); - + np->dn->type = DT_LNK; - adjust_used (size); - recompute_blocks (np); - } - @@ -380,8 +382,6 @@ - error_t - diskfs_truncate (struct node *np, off_t size) - { - - if (np->allocsize <= size) - - return 0; - - if (np->dn->type == DT_LNK) - { - @@ -392,6 +392,9 @@ - return 0; - } - - + if (np->allocsize <= size) - + return 0; - + - assert (np->dn->type == DT_REG); - - if (default_pager == MACH_PORT_NULL) - - now symlinks work. - -4. can't write data to a file - - ---- - -miscellaneous notes: - -`diskfs_disk_name` could be `NULL`, but it is `"swap"` - -using `default_pager_object_set_size (np->dn->u.reg.memobj, size);` to truncate and grow. - -why are our blocks 512? shouldn't it something else? or at least settable? -or does [[libdiskfs]] demand this? - -`diskfs_get_filemap_pager_struct (struct node *np)` returns null. -shouldn't it return `default_pager`? diff --git a/hurd/translator/tmpfs/notes_various.mdwn b/hurd/translator/tmpfs/notes_various.mdwn deleted file mode 100644 index d1c5cf62..00000000 --- a/hurd/translator/tmpfs/notes_various.mdwn +++ /dev/null @@ -1,222 +0,0 @@ -[[!meta copyright="Copyright © 2005, 2006, 2007, 2008, 2009, 2011 Free Software -Foundation, Inc."]] - -[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable -id="license" text="Permission is granted to copy, distribute and/or modify this -document under the terms of the GNU Free Documentation License, Version 1.2 or -any later version published by the Free Software Foundation; with no Invariant -Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license -is included in the section entitled [[GNU Free Documentation -License|/fdl]]."]]"""]] - -[[!tag open_issue_hurd]] - - <antrik> hde: what's the status on tmpfs? - <hde> Broke - <hde> k0ro traced the errors like the assert show above to a pager problem. - See the pager cannot handle request from multiple ports and tmpfs sends - request using two differ ports, so to fix it the pager needs to be hacked - to support multiple requests. - <hde> You can enable debugging in the pager by changing a line from dprintf - to ddprintf I can tell you how if you want. - <antrik> and changing tmpfs to use a single port isn't possible?... - <hde> antrik, I am not sure. - <hde> IIRC k0ro was saying it cannot be changed and I cannot recall his - reasons why. - <sdschulze> antrik: Doing it the quick&dirty way, I'd just use an N-ary - tree for representing the directory structure and mmap one new page (or - more) for each file. - <hde> sdschulze, What are you talking about? - <sdschulze> hde: about how I would implement tmpfs - <hde> O - <azeem> sdschulze: you don't need to reimplement it, just fix it :) - <sdschulze> azeem: Well, it seems a bit more difficult than I considered. - <sdschulze> I had assumed it was implemented the way I described. - <hde> O and the assert above gets triggered if you don't have a - default-pager setup on /servers/default-pager - <hde> the dir.c:62 assert that is. - <azeem> hde: you sure? I think I have one - <hde> I am almost sure. - <azeem> mbanck@beethoven:~$ showtrans /servers/default-pager - <azeem> /hurd/proxy-defpager - <azeem> isn't that enough? - <hde> It is suppose to be. - <hde> Try it as root - <hde> I was experiecing alot of bugs as a normal user, but according to - marcus it is suppose to work as root, but I was getting alot of hangs. - <azeem> hde: same issue, sudo doesn't work - <hde> sucky, well then there are alot of bugs. =) - <azeem> eh, no - <azeem> I still get the dir.c assert - <sdschulze> me too - <sdschulze> Without it, I already get an error message trying to set tmpfs - as an active translator. - ---- - - <hde> I think I found the colprit. - <hde> default_pager_object_set_size --> This is were tmpfs is hanging. - <hde> mmm Hangs on the message to the default-pager. - ---- - - <hde> Well it looks like tmpfs is sending a message to the default-pager, - the default-pager then receives the message and, checks the seqno. I - checked the mig gen code and noticed that the seqno is the reply port, it - this does not check out then the default pager is put into a what it - seems infinte condition_wait hoping to get the correct seqno. - <hde> Now I am figuring out how to fix it, and debugging some more. - ---- - - <marco_g> hde: Still working on tmpfs? - <hde> Yea - <marco_g> Did you fix a lot already? - <hde> No, just trying to narrow down the reason why we cannot write file - greater then 4.5K. - <marco_g> ahh - <marco_g> What did you figure out so far? - <hde> I used the quick marcus fix for the reading assert. - <marco_g> reading assert? - <hde> Yea you know ls asserted. - <marco_g> oh? :) - <hde> Because, the offsets changed in sturct dirent in libc. - <hde> They added 64 bit checks. - <hde> So marcus suggested a while ago on bug-hurd to just add some padding - arrays to the struct tmpfs_dirent. - <hde> And low and behold it works. - <marco_g> Oh, that fix. - <hde> Yup - <hde> marco_g, I have figured out that tmpfs sends a message to the - default-pager, the default-pager does receive the message, but then - checks the seqno(The reply port) and if it is not the same as the - default-pagers structure->seqno then she waits hoping to get the correct - one. Unfortantly it puts the pager into a infinite lock and never come - out of it. - <marco_g> hde: That sucks... - <marco_g> But at least you know what the problem is. - <hde> marco_g, Yea, now I am figuring out how to fix it. - <hde> Which requires more debugging lol. - <hde> There is also another bug, default_pager_object_set_size in - <hde> mach-defpager does never return when called and makes tmpfs hang. I - <hde> will have a closer look at this later this week. - ---- - - <hde> Cool, now that I have two pagers running, hopefully I will have less - system crashes. - <marcus> running more than one pager sounds like trouble to me, but maybe - hde means something different than I think - <hde> Well the other pager is only for tmpfs to use. - <hde> So I can debug the pager without messing with the entire system. - <hde> marcus, I am trying ti figure out why diskfs_object_set_size waits - forever. This way when the pager becomes locked forever I can turn it - off and restart it. When I was doing this with only one mach-defpager - running the system would crash. - <marcus> hde: how were you able to start two default pagers?? - <hde> Well you most likely will not think my way of doing it was correct, - and I am also not sure if it is lol. I made my hacked version not stop - working if one is alreay started. - ---- - - <hde> See, the default-pager has a function called - default_pager_object_set_size this sets the size for a memory object, - well it checks the seqno for each object if it is wrong it goes into a - condition_wait, and waits for another thread to give it a correct seqno, - well this never happens. - <hde> Thus, you get a hung tmpfs and default-pager. - <hde> pager_memcpy (pager=0x0, memobj=33, offset=4096, other=0x20740, - size=0x129df54, prot=3) at pager-memcpy.c:43 - <hde> bddebian, See the problem? - <bddebian> pager=0x0? - <hde> Yup - <hde> Now wtf is the deal, I must debug. - <hde> -- Function: struct pager * diskfs_get_filemap_pager_struct - <hde> (struct node *NP) - <hde> Return a `struct pager *' that refers to the pager returned by - <hde> diskfs_get_filemap for locked node NP, suitable for use as an - <hde> argument to `pager_memcpy'. - <hde> That is failing. - <hde> If it is not one thing it is another. - <bddebian> All of Mach fails ;-) - <hde> It is alot of work to make a test program that uses libdiskfs. - ---- - - <bing> to run tmpfs as a regular user, /servers/default-pager must be - executable by that user. by default it seems to be set to read/write. - <bing> $ sudo chmod ugo+x /servers/default-pager - <bing> you can see the O_EXEC in tmpfs.c - <bing> maybe this is just a debian packaging problem - <bing> it's probably a fix to native-install i'd guess - ---- - - <bing> tmpfs is failing on default_pager_object_create with -308, which - means server died - <bing> i'm running it as a regular user, so it gets it's pager from - /servers/default-pager - <bing> and showtrans /servers/default-pager shows /hurd/proxy-defpager - <bing> so i'm guessing that's the server that died - ---- - - <bing> this is about /hurd/tmpfs - <bing> a filesystem in memory - <bing> such that each file is it's own memory object - <andar> what does that mean exactly? it differs from a "ramdisk"? - <bing> instead of the whole fs being a memory object - <andar> it only allocates memory as needed? - <bing> each file is it's own - <bing> andar: yeah - <bing> it's not ext2 or anything - <andar> yea - <bing> it's tmpfs :-) - <bing> first off, echo "this" > that - <bing> fails - <bing> with a hang - <bing> on default_pager_object_create - <andar> so writing to the memory object fails - <bing> well, it's on the create - <andar> ah - <bing> and it returns -308 - <bing> which is server died - <bing> in mig-speak - <bing> but if i run it as root - <bing> things behave differently - <bing> it gets passed the create - <bing> but then i don't know what - <bing> i want to make it work for the regular user - <bing> it doesn't work as root either, it hangs elsewhere - <andar> but it at least creates the memory object - <bing> that's the braindump - <bing> but it's great for symlinks! - <andar> do you know if it creates it? - <bing> i could do stowfs in it - ---- - - <antrik> bing: k0ro (I think) analized the tmpfs problem some two years ago - or so, remember?... - <antrik> it turns out that it broke due to some change in other stuff - (glibc I think) - <antrik> problem was something like getting RPCs to same port from two - different sources or so - <antrik> and the fix to that is non-trivial - <antrik> I don't remember in what situations it broke exactly, maybe when - writing larger files? - <bing> antrik: yeah i never understood the explanation - <bing> antrik: right now it doesn't write any files - <bing> the change in glibc was to struct dirent - <antrik> seems something more broke in the meantime :-( - <antrik> ah, right... but I the main problem was some other change - <antrik> (or maybe it never really worked, not sure anymore) - ---- - -IRC, freenode, #hurd, 2011-10-11: - - <mcsim> There is no patch for "tmpfs crashes on filling an empty file". For - second bug there is Zheng Da's patch, but it wasn't applied (at least I - didn't found). diff --git a/hurd/translator/tmpfs/tmpfs_vs_defpager.mdwn b/hurd/translator/tmpfs/tmpfs_vs_defpager.mdwn deleted file mode 100644 index 5228515f..00000000 --- a/hurd/translator/tmpfs/tmpfs_vs_defpager.mdwn +++ /dev/null @@ -1,272 +0,0 @@ -[[!meta copyright="Copyright © 2010, 2011 Free Software Foundation, Inc."]] - -[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable -id="license" text="Permission is granted to copy, distribute and/or modify this -document under the terms of the GNU Free Documentation License, Version 1.2 or -any later version published by the Free Software Foundation; with no Invariant -Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license -is included in the section entitled [[GNU Free Documentation -License|/fdl]]."]]"""]] - -[[!tag open_issue_gnumach open_issue_hurd]] - - -# IRC, freenode, #hurd, 2010 - - <slpz> humm... why does tmpfs try to use the default pager? that's a bad - idea, and probably will never work correctly... - * slpz is thinking about old issues - <slpz> tmpfs should create its own pagers, just like ext2fs, storeio... - <slpz> slopez@slp-hurd:~$ settrans -a tmp /hurd/tmpfs 10M - <slpz> slopez@slp-hurd:~$ echo "foo" > tmp/bar - <slpz> slopez@slp-hurd:~$ cat tmp/bar - <slpz> foo - <slpz> slopez@slp-hurd:~$ - <slpz> :-) - <pochu> slpz: woo you fixed it? - <slpz> pochu: well, it's WIP, but reading/writing works... - <slpz> I've replaced the use of default pager for the standard pager - creation mechanism - <antrik> slpz: err... how is it supposed to use swap space if not using the - default pager? - <antrik> slpz: or do you mean that it should act as a proxy, just - allocating anonymous memory (backed by the default pager) itself? - <youpi> antrik: the kernel uses the default pager if the application pager - isn't responsive enough - <slpz> antrik: it will just create memory objects and provide zerofilled - pages when requested by the kernel (after a page fault) - <antrik> youpi: that makes sense I guess... but how is that relevant to the - question at hand?... - <slpz> antrik: memory objects will contain the data by themselves - <slpz> antrik: as youpi said, when memory is scarce, GNU Mach will start - paging out data from memory objects to the default pager - <slpz> antrik: that's the way in which pages will get into swap space - <slpz> (if needed) - <youpi> the thing being that the tmpfs pager has a chance to select pages - he doesn't care any more about - <antrik> slpz: well, the point is that instead of writing the pages to a - backing store, tmpfs will just keep them in anonymous memory, and let the - default pager write them out when there is pressure, right? - <antrik> youpi: no idea what you are talking about. apparently I still - don't really understand this stuff :-( - <youpi> ah, but tmpfs doesn't have pages he doesn't care about, does it? - <slpz> antrik: yes, but the term "anonymous memory" could be a bit - confusing. - <slpz> antrik: in GNU Mach, anonymous memory is backed by a memory object - without a pager. In tmpfs, nodes will be allocated in memory objects, and - the pager for those memory objects will be tmpfs itself - <antrik> slpz: hm... I thought anynymous memory is backed by memory objects - created from the default pager? - <antrik> yes, I understand that tmpfs is supposed to be the pager for the - objects it provides. they are obviously not anonymoust -- they have - inodes in the tmpfs name space - <antrik> but my understanding so far was that when Mach returns pages to - the pager, they end up in anonymous memory allocated to the pager - process; and then this pager is responsible for writing them back to the - actual backing store - <antrik> am I totally off there?... - <antrik> (i.e. in my understanding the returned pages do not reside in the - actual memory object the pager provides, but in an anonymous memory - object) - <slpz> antrik: you're right. The trick here is, when does Mach return the - pages? - <slpz> antrik: if we set the attribute "can_persist" in a memory object, - Mach will keep it until object cache is full or memory is scarce - <slpz> or we change the attributes so it can no longer persist, of course - <slpz> without a backing store, if Mach starts sending us pages to be - written, we're in trouble - <slpz> so we must do something about it. One option, could be creating - another pager and copying the contents between objects. - <antrik> another pager? not sure what you mean - <antrik> BTW, you didn't really say why we can't use the default pager for - tmpfs objects :-) - <slpz> well, there're two problems when using the default pager as backing - store for translators - <slpz> 1) Mach relies on it to do swapping tasks, so meddling with it is - not a good idea - <slpz> 2) There're problems with seqnos when trying to work with the - default pager from tasks other the kernel itself - <slpz> (probably, the latter could be fixed) - <slpz> antrik: pager's terminology is a bit confusing. One can also say - creating another memory object (though the function in libpager is - "pager_create") - <antrik> not sure why "meddling" with it would be a problem... - <antrik> and yeah, I was vaguely aware that there is some seqno problem - with tmpfs... though so far I didn't really understand what it was about - :-) - <antrik> makes sense now - <antrik> anyways, AIUI now you are trying to come up with a mechanism where - the default pager is not used for tmpfs objects directly, but without - making it inefficient? - <antrik> slpz: still don't understand what you mean by creating another - memory object/pager... - <antrik> (and yeat, the terminology is pretty mixed up even in Mach itself) - <slpz> antrik: I meant creating another pager, in terms of calling again to - libpager's pager_create - <antrik> slpz: well, I understand what "create another pager" means... I - just don't understand what this other pager would be, when you would - create it, and what for... - <slpz> antrik: oh, ok, sorry - <slpz> antrik: creating another pager it's just a trick to avoid losing - information when Mach's objects cache is full, and it decides to purge - one of our objects - <slpz> anyway, IMHO object caching mechanism is obsolete and should be - replaced - <slpz> I'm writting a comment to bug #28730 which says something about this - <slpz> antrik: just one more thing :-) - <slpz> if you look at the code, for most time of their lives, anonymous - memory objects don't have a pager - <slpz> not even the default one - <slpz> only the pageout thread, when the system is running really low on - memory, gives them a reference to the default pager by calling - vm_object_pager_create - <slpz> this is not really important, but worth noting ;-) - - -# IRC, freenode, #hurd, 2011-09-28 - - <slpz> mcsim: "Fix tmpfs" task should be called "Fix default pager" :-) - <slpz> mcsim: I've been thinking about modifying tmpfs to actually have - it's own storeio based backend, even if a tmpfs with storage sounds a bit - stupid. - <slpz> mcsim: but I don't like the idea of having translators messing up - with the default pager... - <antrik> slpz: messing up?... - <slpz> antrik: in the sense of creating a number of arbitrarily sized - objects - <antrik> slpz: well, it doesn't really matter much whether a process - indirectly eats up arbitrary amounts of swap through tmpfs, or directly - through vm_allocate()... - <antrik> though admittedly it's harder to implement resource limits with - tmpfs - <slpz> antrik: but I've talked about having its own storeio device as - backend. This way Mach can pageout memory to tmpfs if it's needed. - <mcsim> Do I understand correctly that the goal of tmpfs task is to create - tmpfs in RAM? - <slpz> mcsim: It is. But it also needs some kind of backend, just in case - it's ordered to page out data to free some system's memory. - <slpz> mcsim: Nowadays, this backend is another translator that acts as - default pager for the whole system - <antrik> slpz: pageout memory to tmpfs? not sure what you mean - <slpz> antrik: I mean tmpfs acting as its own pager - <antrik> slpz: you mean tmpfs not using the swap partition, but some other - backing store? - <slpz> antrik: Yes. - -See also: [[open_issues/resource_management_problems/pagers]]. - - <antrik> slpz: I don't think an extra backing store for tmpfs is a good - idea. the whole point of tmpfs is not having a backing store... TBH, I'd - even like to see a single backing store for anonymous memory and named - files - <slpz> antrik: But you need a backing store, even if it's the default pager - :-) - <slpz> antrik: The question is, Should users share the same backing store - (swap space) or provide their own? - <antrik> slpz: not sure what you mean by "users" in this context :-) - <slpz> antrik: Real users with the ability of setting tmpfs translators - <antrik> essentially, I'd like to have a single partition that contains - both swap space and the main filesystem (at least /tmp, but probably also - all of /run, and possibly even /home...) - <antrik> but that's a bit off-topic :-) - <antrik> well, ideally all storage should be accounted to a user, - regardless whether it's swapped out anonymous storage, temporary named - files, or permanent files - <slpz> antrik: you could use a file as backend for tmpfs - <antrik> slpz: what's the point of using tmpfs then? :-) - <pinotree> (and then store the file in another tmpfs) - <slpz> antrik: mach-defpager could be modified to use storeio instead of - Mach's device_* operations, but by the way things work right now, that - could be dangerous, IMHO - <antrik> pinotree: hehe - <pinotree> .. recursive tmpfs'es ;) - <antrik> slpz: hm, sounds interesting - <slpz> antrik: tmpfs would try to keep data in memory always it's possible - (not calling m_o_lock_request would do the trick), but if memory is - scarce an Mach starts paging out, it would write it to that - file/device/whatever - <antrik> ideally, all storage used by system tasks for swapped out - anonymous memory as well as temporary named files would end up on the - /run partition; while all storage used by users would end up in /home/* - <antrik> if users share a partition, some explicit storage accounting would - be useful too... - <antrik> slpz: is that any different from what "normal" filesystems do?... - <antrik> (and *should* it be different?...) - <slpz> antrik: Yes, as most FS try to synchronize to disk at a reasonable - rate, to prevent data losses. - <slpz> antrik: tmpfs would be a FS that wouldn't synchronize until it's - forced to do that (which, by the way, it's what's currently happening - with everyone that uses the default pager). - <antrik> slpz: hm, good point... - <slpz> antrik: Also, metadata in never written to disk, only kept in memory - (which saves a lot of I/O, too). - <slpz> antrik: In fact, we would be doing the same as every other kernel - does, but doing it explicitly :-) - <antrik> I see the use in separating precious data (in permanent named - files) from temporary state (anonymous memory and temporary named files) - -- but I'm not sure whether having a completely separate FS for the - temporary data is the right approach for that... - <slpz> antrik: And giving the user the option to specify its own storage, - so we don't limit him to the size established for swap by the super-user. - <antrik> either way, that would be a rather radical change... still would - be good to fix tmpfs as it is first if possible - <antrik> as for limited swap, that's precisely why I'd prefer not to have - an extra swap partition at all... - <slpz> antrik: It's not much o fa change, it's how it works right now, with - the exception of replacing the default pager with its own. - <slpz> antrik: I think it's just a matter of 10-20 hours, as - much. Including testing. - <slpz> antrik: It could be forked with another name, though :-) - <antrik> slpz: I don't mean radical change in the implementation... but a - radical change in the way it would be used - <slpz> antrik: I suggest "almosttmpfs" as the name for the forked one :-P - <antrik> hehe - <antrik> how about lazyfs? - <slpz> antrik: That sound good to me, but probably we should use a more - descriptive name :-) - - -## 2011-09-29 - - <tschwinge> slpz, antrik: There is a defpager in the Hurd code. It is not - currently being used, and likely incomplete. It is backed by libstore. - I have never looked at it. - -[[open_issues/mach-defpager_vs_defpager]]. - - -# IRC, freenode, #hurd, 2011-11-08 - - <mcsim> who else uses defpager besides tmpfs and kernel? - <braunr> normally, nothing directly - <mcsim> than why tmpfs should use defpager? - <braunr> it's its backend - <braunr> backign store rather - <braunr> the backing store of most file systems are partitions - <braunr> tmpfs has none, it uses the swap space - <mcsim> if we allocate memory for tmpfs using vm_allocate, will it be able - to use swap partition? - <braunr> it should - <braunr> vm_allocate just maps anonymous memory - <braunr> anonymous memory uses swap space as its backing store too - <braunr> but be aware that this part of the vm system is known to have - deficiencies - <braunr> which is why all mach based implementations have rewritten their - default pager - <mcsim> what kind of deficiencies? - <braunr> bugs - <braunr> and design issues, making anonymous memory fragmentation horrible - <antrik> mcsim: vm_allocate doesn't return a memory object; so it can't be - passed to clients for mmap() - <mcsim> antrik: I use vm_allocate in pager_read_page - <antrik> mcsim: well, that means that you have to actually implement a - pager yourself - <antrik> also, when the kernel asks the pager to write back some pages, it - expects the memory to become free. if you are "paging" to ordinary - anonymous memory, this doesn't happen; so I expect it to have a very bad - effect on system performance - <antrik> both can be avoided by just passing a real anonymous memory - object, i.e. one provided by the defpager - <antrik> only problem is that the current defpager implementation can't - really handle that... - <antrik> at least that's my understanding of the situation diff --git a/hurd/translator/unionfs.mdwn b/hurd/translator/unionfs.mdwn deleted file mode 100644 index 2b692cf9..00000000 --- a/hurd/translator/unionfs.mdwn +++ /dev/null @@ -1,160 +0,0 @@ -[[!meta copyright="Copyright © 2008, 2009, 2010 Free Software Foundation, -Inc."]] - -[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable -id="license" text="Permission is granted to copy, distribute and/or modify this -document under the terms of the GNU Free Documentation License, Version 1.2 or -any later version published by the Free Software Foundation; with no Invariant -Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license -is included in the section entitled -[[GNU Free Documentation License|/fdl]]."]]"""]] - -# `unionfs` - -*Unionfs allows you to simply union one directory or translator into another one, so you see the files of both of them side by side.* - -Source repository: <http://git.savannah.gnu.org/cgit/hurd/unionfs.git/> - -Right now there are some problems with syncing, so please be aware -that it might not work as expected. - -<a name="unionmount"></a> -# `unionmount` - -... is a special mode of `unionfs`. - -## Project Idea - -When setting a translator on Hurd -- similar to mounting a file system on UNIX --- the new node(s) exported by the translator are obscuring the original node -where the translator is set, and any nodes below it in the directory tree. The -translator itself can access the underlying node (which is a very nice feature, -as it allows translators presenting the contents of the node in a different -format); but it's no longer accessible from the "outside". - -Plan9 has a feature where a file system can be mounted in union mode: the new -file system doesn't obscure the mount point in this case, but instead the -contents are combined. (This feature has also been under discussion in Linux -for a couple of years now, under the label "VFS-based union mounts".) - -This kind of union mounts is generally useful, as it's sometimes more -convenient than unioning existing filesystem locations with unionfs -- it's not -necessary to mount a file system that is to be unioned at some external -location first: just union-mount it directly at the target location. - -But union mounts also allow creating passive translator hierarchies: If there -is a passive translator on a parent node, and further passive translators on -child nodes, the union mount allows the child nodes with the further translator -settings still to be visible after the parent translator has started. - -This could be useful for device nodes for example: let's say we have an -ethernet multiplexer at /dev/veth. Now the virtual subnodes could all be -directly under /dev, i.e. /dev/veth0, /dev/veth1 etc., and explicitely refer to -the main /dev/veth node in the translator command line. It would be more -elegant however to store the virtual nodes direcly below the main multiplexer -node -- /dev/veth/0, /dev/veth/1 etc. - -There are two possible approaches how union mounts could be implemented in the -Hurd. The first one is to let the various translators handle union mounts -internally, i.e. let them present the underlying nodes to the clients in -addition to the actual nodes they export themselfs. This probably can be -implemented as some kind of extension to the existing netfs and diskfs -libraries. - -The other possible apporach is less efficient and probably more tricky, but -probably also more generic: create a special unionmount translator, which -serves as a kind of proxy: setting the union-mounted translator on some -internal node; and at the actual mount location, presenting a union of the -nodes exported by this translator, and the nodes from the underlying file -system. - -The goal of this project is implementing union mounts using either of the -approaches described above. (Though it might be useful initially to prototype -both for comparision.) The ethernet multiplexer shall serve as an example use -case -- any changes necessary to allow using it with the union mount -functionality are also to be considered part of the task. - -[[Sergiu Ivanov|scolobb]] has been working on this as a [[Google Summer of Code -2009 project|community/gsoc/2009]]. - -## Implementation - -### Source - -Union mounts are currently implemented as two additional command line -options of the `unionfs` translator. This implementation resides in -the master-unionmount branch of the unionfs git repository. To -checkout the code, do the following: - - $ git clone git://git.sv.gnu.org/hurd/unionfs.git - $ cd unionfs - $ git checkout -b master-unionmount - $ git pull origin master-unionmount - -You can skip the checkout step if you don't mind that the -`master-unionmount` branch gets merged into the `master` branch. - -### Short Documentation - -The `unionmount` project adds options "--mount" and "--no-mount" to -`unionfs` (short versions: "-t" and "-n" correspondingly). Both -options are used to implement union-mounting, but the first option -will create a *transparent* union mount, while the second option will -create a *nontransparent* union mount. - -One can create a transparent union mount with the following command: - - $ settrans -a <node> unionfs --underlying --mount=<translator> - -When running - - $ fsysopts <node> - -one will see the information about the `<translator>`, not the -`unionfs` translator. Although this might seem the only natural way -to do union mounts, one must keep in mind that such transparency -deprives one of the possibility to modify the unioned virtual -filesystem exported by `unionfs` at run-time (via `fsysopts`). - -One can create a nontransparent union mount with the following command: - - $ settrans -a <node> unionfs --underlying --no-mount=<translator> - -When running - - $ fsysopts <node> - -one will see the information about the `unionfs` translator. Although -this way allows modifying the contents of the unioned filesystem -exported by `unionfs` at runtime, the access to `<translator>` is -blocked. - -The filesystem exported by the *mountee* (`<translator>`) is actually -treated like a normal filesystem within `unionfs`, which means that -one can assign priorities to the *mountee* to achieve the desired -order of layering of the unioned directories. The following will make -`unionfs` query the underlying filesystem first and then the -*mountee*: - - $ settrans -a <node> unionfs --priority=2 --underlying --priority=1 --mount=<translator> - -Note that the same functionality can also be achieved by assigning -priority 1 to the underlying filesystem and keeping the priority of -the *mountee* at 0. - -<a name="stowfs"></a> -# `stowfs` - -... is a special mode of `unionfs`. - -# External Links - - * [*Unioning file systems for Linux*](http://valerieaurora.org/union/) - - * [FUSE page about - `unionfs`](http://sourceforge.net/apps/mediawiki/fuse/index.php?title=UnionFileSystems) - - * [Linux' overlay file system proposal, - 2010-09-20](http://thread.gmane.org/gmane.linux.kernel/1038413) - - How is this different? diff --git a/hurd/translator/unionmount.mdwn b/hurd/translator/unionmount.mdwn deleted file mode 100644 index 7384afc7..00000000 --- a/hurd/translator/unionmount.mdwn +++ /dev/null @@ -1,11 +0,0 @@ -[[!meta copyright="Copyright © 2009 Free Software Foundation, Inc."]] - -[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable -id="license" text="Permission is granted to copy, distribute and/or modify this -document under the terms of the GNU Free Documentation License, Version 1.2 or -any later version published by the Free Software Foundation; with no Invariant -Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license -is included in the section entitled -[[GNU Free Documentation License|/fdl]]."]]"""]] - -[[!meta redir=unionfs#unionmount]] diff --git a/hurd/translator/wishlist_1.mdwn b/hurd/translator/wishlist_1.mdwn deleted file mode 100644 index 36290883..00000000 --- a/hurd/translator/wishlist_1.mdwn +++ /dev/null @@ -1,129 +0,0 @@ -[[!meta copyright="Copyright © 2007, 2008, 2009 Free Software Foundation, -Inc."]] - -[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable -id="license" text="Permission is granted to copy, distribute and/or modify this -document under the terms of the GNU Free Documentation License, Version 1.2 or -any later version published by the Free Software Foundation; with no Invariant -Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license -is included in the section entitled -[[GNU Free Documentation License|/fdl]]."]]"""]] - -* [[devfs]] - -* FUSE(fuse.sourceforge.net/) compatilbility library. : just modify FUSE apps a and compile little to work as translator :-) - -* File Finder. (uses find, grep, a combination or a custom command (htdig, mp3 info) - * Files found will be available under one directory and then can be used like a normal directory - * usefull to generate Albums, Share only resulting files over the et, etc.. - * The filefinder can be scheduled or can be connected over some ipc like dbus to the VFS system if any to keep a watch for new files. - -* GNOKII, BitPim and openobex as translators - * grep through your SMSs! - * share your addressbook! - * "Attach" that funny SMS/MMS to your email. - * "svn commit" your joke collection :-D - -* Real Language Translator [[br]] - * cat /usr/translator/de-en/usr/share/doc/something.txt should take /usr/share/doc/something.txt , submit it to google's website and bring back results. - -* Mozilla Bookmarks = del.ici.ous - * Need more explanation ? ;-) - -* <http://hnb.sf.net> - * having a directory structure for a file can allow to "svn commit" the hnb document in a more "node-safe" manner, thus allowing multiple people to work on the same hnb document. - * This must be fairly easy as HNB can already export to XML and XMLfs already exists. - -* DavFS - * Just setup a 'WebDav share' as a directory. The implimentation of the protocol is already available in nautilus and konqueror. - -* Compiled form of your project - * you have your project in /somedir/project with source in /somedir/project/src .. /somedir/project/bin should always have the compiled version.. is it possible? - * The source has to have a MakeFile. - * creating /somedir/project/bin-somearch should aotomatically crosscompile - * Seems feasible for a small project. - -* Report generation FrameWork - an idea to be hugged by app developers..not kernel developers. - * You have financial data in some Spreadsheet like format in /yourFinance directory - * You add report templates to /yourFinance/repTemplates - * Once you save data in /yourFinance the next cat /yourFinance/reports/areportname will give you an uptodate report. - * This will be usefull for any purpose including serving by static page web servers, sharing over samba/nfs/ftp etc.! - * The advantage is any save to the spreadsheet will update the report.. not just from one software. - -* SVN (Subversion suite) - * like [[cvsfs]]. /svndir/version/version-no should automatically have subversion - * I think it is nice to write a generalised version control system framework library which will help in writing version control translators and other tools easily. - -* Flexi-acls - * First of all - Can this be done? : A translator bound to a directory must be able to access the contents of the directory which would have been accessible in the absence of the translator.. - * This will enable to wirte translators that can implement custom "Access Control Lists". Just imagine having advanced ACLs even if underlying FileSystem is dumb! Imagine changing the type of ACLs implemented with Just two commands - one to unattach previous translator and the next to attach a different ACL translator! The ACLs themselves may be stored in a different directory - -* The translator manager! - * Some translators will need to be inserted automatically - like for eg: hardware drivers .. - * Each hardware translator will pubish its capabilities. - * The "top" translator will query the capabilities of different hardware and match capabilities with the "slave" translators. That is it's only Job. - * The control is then handed over to the slave translator. - * The ranking not only looks who can handle the "most" capabilites of the hardware. If it finds that multiple translators can handle the same hardware, It will use other parameters to choose between them.. like may be the size in memory? The revision date? Stability (inferred from version number)? And to go to the extreme, the profiling data of the driver ;-P - * Advantage : The best driver wins! - -* An eg. Skip it if you understood the above :-): - * You have a driver that can handle VGA + SVGA + Super3d acceleration + Particle graphics + Works for nvidea card. - * You have a driver that can handle SVGA + VGA . - * You have a driver that can handle VGA. - * Case 1: Your card: A VGA card with some extra fonts.. - * First the VGA driver will be quireied .. ok can handle essential capability. - * Next SVGA driver: can handle but has extra module.. unnecassary weight . - * The Nvidia driver: can handle , but again unnecassary weight : ruled out. - * Winner : VGA driver: - * Case 2: Your card An SVGA card: - * First the VGA driver will be quireied .. ok can handle one essential capability. - * Next SVGA driver: can handle essential and one extra capability no extra weight.. - * The Nvidia driver: can handle , but unnecassary weight : ruled out. - * Winner : SVGA driver.. - * Case 3 : You have an VGA .. but no VGA driver .. then the SVGA driver would win. - -* Sound Server - * /ahsa - stands for Advanced HURD sound architecture :-) Just a temporary name .. for fun. - * /ahsa/out - directory wich will hold "plug"s where apllications come and join the server .. see below. - * /ahsa/out/mixer - main mixer - * /ahsa/out/nextfree - the file when "cat"ed gives the number of the next free plug - * /ahsa/plugins/ - info about available plugins - * /ahsa/out/[1..n]/ - dynamically generated directories for applications to plug on.. - * /ahsa/out/[1..n]/data this is where you should do a "cat somerawsoundfile>/ahsa/out/`cat /ahsa/nextfree`/data" - * /ahsa/out/[1..n]/plugins - the plugin stack .. volume is also a plugin.. - * /ahsa/out/[1..n]/plugins/[1..m]/ - echo "plugin param1 param2 param3" > /ahsa/out/[1..n]/plugins/`cat /ahsa/out/[1..n]/plugins/nextfree`/add - * /ahsa/out/[1..n]/plugins/[1..m]/params/{param1.. paramn} - * /ahsa/out/[1..n]/data.out - can be catted to get data processed through the server - * /ahsa/in - similar to /ahsa/out .. with except for an extra file to choose input devices. - * /ahsa/devs/{1..n} - devices detected .. can be dynamic .. there are usb soundcards and and midi devices. - * /ahsa/out/[1..n]/plugins/[1..m]/0/params/dev - * Dont get tempted for :/ahsa/out/[1..n]/params/{rate, channels, and other stuff} - * that goes into /ahsa/out/[1..n]/plugins/0/params if /ahsa/out/[1..n]/plugins/0/detected == "headerless audio" - * There are a lot more things I can continue about the "sound server" .. The Ideas simply dont seem to exhaust.. - * Some features/advantages - * set output's translator plugin as ordinary text -- have text to speech conversion done by sound server! - * Create and apply plugin presets by simply copying directories! - * Me getting dizzy thinking of the zillion more advantages. - * If you are really doing some ordinary output , all you need to do is "cat" data into next free "plug" and everything will be autodetected including the format of the data and sent to the final sound "merge"r - * Dizzy ... - -* /usr/share/menu !!!! extension for package management idea .. - * cat mymenuitem.menu >> /usr/share/menu/menu - * cat /usr/share/menu/debian/kde ... :-) - -* Spam/Malware Control - * /usr/antimalware/ - put your mail here.. it will automatically be scanned. when finished it will vanish from here .. - * /usr/antimalware/clean - ... and pop out from here - * /usr/antimalware/malware - or here. - -* NetDevice - * !JustImagine(tm)... settrans -ac /netdevices /hurd/netdevfs - [ host | net ] - * One can access device files remotely - * This could be acheived by allowing translators talk to one another over a network - * This will need translators to catch and handle ioctls (if there is such a thing in HURD). - * The device server which will listen to requests from the translators can be run even on a Linux machine!!! - * !JustImagine(tm)... accessing the crwriter/webcam on that GNU/Linux machine on the network using cdrecord of your local hurd machine! - * !JustImagine(tm)... running GNU/HURD on a minimalistic GNU/Linux(but with all the drivers) through a specially modified and optimised Qemu. The device server runs on the host machine, and the client translators access over the virtual network created by Qemu. You got most of the drivers for free! - -* Emacs File VFS - * I came to know from my Emacs loving friend that there are lots of VFS handlers in Emacs.. I was wondering if there can be translator which can tap into these Emacs VFS handlers. diff --git a/hurd/translator/wishlist_2.mdwn b/hurd/translator/wishlist_2.mdwn deleted file mode 100644 index 77f39644..00000000 --- a/hurd/translator/wishlist_2.mdwn +++ /dev/null @@ -1,201 +0,0 @@ -## <a name="Introduction"> Introduction </a> - -The idea behind file system translators is a powerful concept which hasn't recieved much attention in the mainstream computing world. So here is a list of interesting translators I've been able to dream up. I'm sure there are many more ideas floating around out there, so add them to the list! - -The [ferris project](http://witme.sourceforge.net/libferris.web/features.html) has some great ideas and code in the area of userspace dynamic filesystems, as has the [FUSE project](http://fuse.sourceforge.net/). - -## <a name="Audio_cdfs"> Audio\_cdfs </a> - -A translator which produces a directory of \*.wav files when you have an audio CD in the drive. - -## <a name="Ogg"> Ogg </a> - -This translator could be a sub-directory of the Audio\_cdfs translator and it would translate the \*.wav files into Ogg Vorbis/MP3 format. - -## <a name="CDDB"> </a> CDDB - -Of course it would be a lot nicer if the above two translators didn't name their files something worthless like track001.ogg. So we would want a translator which would hook up with a database on the web and produce meaningful file names. - -## <a name="Crypto"> Crypto </a> - -A cryptographic/steganographic seem like a nice match with the concept of user-land file systems. I like the idea of something like `settrans -a /secure stegfs --mpeg file001.mpg` - -## <a name="Revision_control"> Revision control </a> - -All of the empty space on your drive is now being wasted. Why not have a revision control translator which tracks changes to your documents? See also [this guy](http://www.linuxjournal.com/article.php?sid=5976). And then you'd do something like `cd /time-machine/2003/sept/14/` to see what your system looked like on the 14th of septempber 2003. - -## <a name="CVSFS"> </a> CVSFS - -See [cvsFS for Linux](http://cvsfs.sourceforge.net/). This provides a package which presents the CVS contents as mountable file system. It allows to view the versioned files as like they were ordinary files on a disk. There is also a possibility to check in/out some files for editing. A read-only version has been written by Stefan Siegl and is available at [Berlios](http://cvs.berlios.de/cgi-bin/viewcvs.cgi/cvsfs4hurd/cvsfs/). - -## <a name="tar_and_gzip"> tar and gzip </a> - -Rumor has it that they are on the way. Actually, a tar + gzip/bzip2 translator does exist (although it hasn't been used much...) : see [the Hurdextras project](http://savannah.nongnu.org/projects/hurdextras/) on Savannah. - -## <a name="ROM"> </a> ROM - -How about a translator which makes it look like you can write to read only media (like CDs), or change files which I don't have permission to change. This translator would make it seem like you could copy files to places where you normally couldn't. Think about combining this translator with the ftp translator and the tar and gzip translators. (cd /ftp/gnu.org/gnome.tar.gz/writes\_allowed; make install). It could be that unionfs does this very thing. - -## <a name="Super_FIFO"> Super\_FIFO </a> - -It's like a named pipe which is smart enough to start a process everytime something new tries to read from it. For example, let's say I have a script that reads in a JPEG image and spits out a smaller thumbnail \*.jpg to STDOUT. With a standard fifo (`mknod -p fifo`) this would almost works (`script big.jpg > fifo`). But what if there are two processes trying to read the fifo at once? Ick. And of course the standard way only works once without rerunning the command. I'm not quite sure what the syntax should look like, but I'm sure someone out there has a great idea waiting to happen. - -## <a name="Perl"> Perl </a> - -Perl is a wonderful language for hacking together something useful in a short amount of time. No concept is complete without being able to use it in a perl one-liner. And that goes for Hurd translators too. Right? - - #!/usr/bin/perl - use Hurd::translator; - - #file named "two" can produce an endless supply of twos, etc. (a la /dev/zero) - my $i=0; - for $filename ([zero one two three four]) - { - $libtrivfsread_codehash{$filename}= - sub{ $num_bytes=shift; my $data=$i; return chr($data) x $num_bytes; }; - #that's a hash of references to closures - $i++; - } - translator_startup(); - -A Perl translator has been started by [John Edwin Tobey](http://john-edwin-tobey.org/Hurd/) (pith). - -## <a name="Source_code"> Source code </a> - -Here's a crazy thought. How about a translator for source code. You have a C source file like `hello.c` which is your normal everyday file. But there's a translator sitting underneath, so when you `cd hello.c` you get a directory with files like `main()` which represent the subroutines in `hello.c`. And of course you should be able to edit/remove those and have it modify the original source. - -## <a name="Libraries"> Libraries </a> - -Here's an [idea](http://www.circlemud.org/~jelson/software/fusd/docs/node13.html) from the people making [userspace drivers in Linux](http://www.circlemud.org/~jelson/software/fusd/): - -* "One particularly interesting application of FUSD that we've found very useful is as a way to let regular user-space libraries export device file APIs. For example, imagine you had a library which factored large composite numbers. Typically, it might have a C interface--say, a function called `int *factorize(int bignum)`. With FUSD, it's possible to create a device file interface--say, a device called `/dev/factorize` to which clients can `write(2)` a big number, then `read(2)` back its factors. - -* This may sound strange, but device file APIs have at least three advantages - over a typical library API. First, it becomes much more language - independent--any language that can make [[system call]]s can access the - factorization library. Second, the factorization code is running in a - different address space; if it crashes, it won't crash or corrupt the - caller. Third, and most interestingly, it is possible to use `select(2)` to - wait for the factorization to complete. `select(2)` would make it easy for a - client to factor a large number while remaining responsive to other events - that might happen in the meantime. In other words, FUSD allows normal - user-space libraries to integrate seamlessly with UNIX's existing, - POSIX-standard event notification interface: `select(2)`." - -## <a name="Mail"> Mail </a> - -Am I off my rocker, or does an IMAP/POP translator sound like a good idea? It would make your remote mail servers look like local ones. Or what about a translator that makes a mbox format mail spool look like a directory. Can anyone think of a good use for an SMTP translator? - -*Definitely: Copy my email in there to send it.* -- [[ArneBab|community/weblogs/ArneBab]] - -## <a name="UUEncode"> </a> UUEncode - -How about a UUEncode translator for those places you can only store ASCII. Combine this with a NNTP translator and store your data in someone's Usenet archive. Or since, (as far as I know), there are no size limitations on file names in the Hurd, why not have a filesystem translator whose underlying store is a file name. (Now ls becomes cat). - -## <a name="Computation"> Computation </a> - -This is from the revenge of the command-line department. Make a directory translator whose contents are a result of the computation specified in the directory name. Here's an example... - - $ settrans -a /comp /hurd/computationfs - $ cd "/comp/3+4" - $ ls -l - total 0 - -rw-r--r-- 1 nobody users 0 Oct 16 11:41 7 - $ - $ cd "/comp/sqrt(2)" - $ ls -l - total 0 - -rw-r--r-- 1 nobody users 0 Oct 16 11:42 1.4142135623731 - $ - -...etc. Now think about your favorite GUI HTML editor and using File->Open on the following directory name, ``"/comp/for i in $( find / -name *.html ); do ln -s $i `basename $i`;done"`` Which would produce a directory listing with soft links to all of the \*.html files on your system. You could have all of the comforts of the shell from within that little File->Open dialog box. - -## <a name="Other"> Other </a> - -Just found Wolfgang J�hrling's translator [wishlist](http://www.8ung.at/shell/trans.html). - -## <a name="Bochs"> Bochs </a> - -A translator which works with [Bochs](http://bochs.sourceforge.net/) disk images would be nice. - -## <a name="Rollover"> Rollover </a> - -A translator that uses a circular buffer to store log files. The translated node only contains the last N (mega,kilo)bytes. - -## <a name="Birthday"> Birthday </a> - -A translator that provides an interface into the birthday program. - -You can cat your calendar, eg. bd/calendar/today bd/calendar/this-week or bd/calendar/this-month. - -And you could write new events into files located in bd/events/DATE/event-name. - -DATE is of the format the birthday expects DD/MM/YYYY. - -The contents of the file are any or none of the following birthday options: ann (An anniversary), bd (A birthday), ev (Some other event), wN (Warn N days in advance of date), toDATE (Event lasts until this DATE), forDAYS (Event runs for DAYS). - -You can optionally just edit the bd/birthdays file if you want to edit the configuration file by hand. It might make sense to write changes from bd/birthdays into ~/.birthdays. - - $ settrans -c bd /hurd/birthday -f ~/.birthdays - $ ls bd/ - birthdays calendar events - $ find bd -print - bd - bd/calendar - bd/calendar/daily - bd/calendar/this-week - bd/calendar/this-month - bd/events - bd/birthdays - $ - -## <a name="LVM"> </a> LVM - -A translator to access block devices from Linux's [Logical Volume Management](http://www.tldp.org/HOWTO/LVM-HOWTO/) would be an useful addition. - - # settrans -cap /dev/VolumeGroup0 /hurd/lvm /dev/PhysicalVolume0 /dev/PhysicalVolume1 ... - # ls /dev/VolumeGroup0/ - home - var - # settrans -cap /home /hurd/ext2fs /dev/VolumeGroup0/home - # settrans -cap /var /hurd/ext2fs /dev/VolumeGroup0/var - -Probably both [LVM2](http://sourceware.org/lvm2/) and the [Device-mapper](http://sourceware.org/dm/) need to be ported. - -## <a name="bridging_translator"> bridging translator </a> - -A [bridging](http://bridge.sourceforge.net/faq.html) translator could improve the Hurd's networking facilities. - - # settrans -cap /dev/br0 /hurd/bridge -i eth0 -i eth1 ... - # settrans -cap /servers/socket/2 /hurd/pfinet -i /dev/br0 -a ... -g ... -m ... - -Perhaps Linux's bridging code and [utilities](http://bridge.sourceforge.net/) can be ported (or glued in) or code from one of the BSDs. - -## <a name="SSH_translator"> </a> SSH translator - -Presenting remote file systems through SSH similar to what gnome-vfs does. - -## <a name="SMB_translator"> </a> SMB translator - -Presenting remote file systems through Samba similar to what gnome-vfs does. Guiseppe Scrivano has worked on this and smbfs is available at [hurdextras](http://savannah.nongnu.org/cgi-bin/viewcvs/hurdextras/smbfs/). - -## <a name="Crontab_translator"> Crontab translator </a> - -Presenting a user's crontab in a filesystem where cron entries are files. - -## <a name="globlink"> globlink </a> - -Firmlink to a file according to a filename matching pattern. When a file goes away, the next file that is matched is automatically linked to. - - $ settrans -ac libfoo.so /hurd/globlink '/lib/libfoo*' - -## <a name="alphabetfs"> alphabetfs </a> - -Organize a large group of files by their first letter. Present one subdirectory for each letter in the alphabet. - -## <a name="fsysoptsctl"> fsysoptsctl </a> - -Send an fsysopts command to a set of translators. When you have a directory full of translators and you want to send each of them the same runtime option, this translator can do it for you. - - $ settrans -ac all /hurd/fsysoptsctl '/tmp/mystuff/*' - $ fsysopts all --update diff --git a/hurd/translator/writing/example.mdwn b/hurd/translator/writing/example.mdwn deleted file mode 100644 index 0a3be4df..00000000 --- a/hurd/translator/writing/example.mdwn +++ /dev/null @@ -1,303 +0,0 @@ -[[!meta copyright="Copyright © 2007, 2008 Free Software Foundation, Inc."]] - -[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable -id="license" text="Permission is granted to copy, distribute and/or modify this -document under the terms of the GNU Free Documentation License, Version 1.2 or -any later version published by the Free Software Foundation; with no Invariant -Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license -is included in the section entitled -[[GNU Free Documentation License|/fdl]]."]]"""]] - -## Data User-Server Translator Example - -The code examples were written by Anand Babu. - -We have a data.h header file, a data.defs file, a data-user.c, data-server.c -sources files and a Makefile. - -data.h: -------- - - #ifndef _data_user_ - #define _data_user_ - - /* Module data */ - - #include <mach/kern_return.h> - #include <mach/port.h> - #include <mach/message.h> - - #include <mach/std_types.h> - #include <mach/mach_types.h> - #include <device/device_types.h> - #include <device/net_status.h> - #include <sys/types.h> - #include <sys/stat.h> - #include <sys/statfs.h> - #include <sys/resource.h> - #include <sys/utsname.h> - #include <hurd/hurd_types.h> - - /* Routine data_set_value */ - #ifdef mig_external - mig_external - #else - extern - #endif - kern_return_t S_data_set_value - #if defined(LINTLIBRARY) - (data_port, value) - mach_port_t data_port; - int value; - { return S_data_set_value(data_port, value); } - #else - ( - mach_port_t data_port, - int value - ); - #endif - - /* Routine data_get_value */ - #ifdef mig_external - mig_external - #else - extern - #endif - kern_return_t S_data_get_value - #if defined(LINTLIBRARY) - (data_port, value) - mach_port_t data_port; - int *value; - { return S_data_get_value(data_port, value); } - #else - ( - mach_port_t data_port, - int *value - ); - #endif - - #endif /* not defined(_data_user_) */ - -data.defs: ----------- - - /* Definitions for data interface - - This file is part of the GNU Hurd. - - The GNU Hurd is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2, or (at your option) - any later version. - - The GNU Hurd is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with the GNU Hurd; see the file COPYING. If not, write to - the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ - - subsystem data 45000; - - #include <hurd/hurd_types.defs> - - #ifdef STACK_IMPORTS - STACK_IMPORTS - #endif - - /* intr-rpc.defs defines the INTR_INTERFACE macro to make the generated RPC - stubs send-interruptible, and to prefix them with `hurd_intr_rpc_'. */ - INTR_INTERFACE - - /* set integer value to data */ - routine data_set_value ( - data_port: mach_port_t; - value: int); - - /* get integer value from data */ - routine data_get_value ( - data_port: mach_port_t; - out value: int); - -data-user.c: ------------- - - #include <stdio.h> - #include <hurd.h> - #include <hurd/hurd_types.h> - #include "data.h" - - #ifndef _GNU_SOURCE - #define _GNU_SOURCE - #endif - - int - main(int argc, char *argv[]) - { - int value=0; - mach_port_t data_server_port; - - data_server_port = file_name_lookup ("/tmp/trans", 0, 0); - printf ("data_server_port [%u]\n", data_server_port); - S_data_set_value (data_server_port, 99); - S_data_get_value (data_server_port, &value); - printf ("data->get_value: [%d]\n", value); - - return 0; - } - -data-server.c: --------------- - - #ifndef _GNU_SOURCE - #define _GNU_SOURCE - #endif - - #include <stdio.h> - #include <getopt.h> - #include <errno.h> - #include <sys/stat.h> - #include <error.h> - - #include <hurd/ports.h> - #include <hurd/hurd_types.h> - #include <hurd/trivfs.h> - - #include "data.h" - - extern boolean_t S_data_server - (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP); - - int trivfs_fstype = FSTYPE_MISC; - int trivfs_fsid = 0; - int trivfs_support_read = 0; - int trivfs_support_write = 0; - int trivfs_support_exec = 0; - int trivfs_allow_open = 0x00; - int trivfs_protid_nportclasses = 0; - int trivfs_cntl_nportclasses = 0; - - int data_value; - - int demuxer (mach_msg_header_t *inp, mach_msg_header_t *outp) - { - return (S_data_server(inp,outp)||trivfs_demuxer(inp,outp)); - } - - void trivfs_modify_stat (struct trivfs_protid *cred, io_statbuf_t *st) - { - } - error_t trivfs_goaway (struct trivfs_control *fsys, int flags) - { - exit (0); - } - - kern_return_t S_data_set_value (mach_port_t data_port, int value) - { - data_value = value; - return 0; - } - - kern_return_t S_data_get_value (mach_port_t data_port, int *value) - { - *value = data_value; - return 0; - } - - int - main(int argc, char *argv[]) - { - int err; - mach_port_t bootstrap; - struct trivfs_control *fsys; - - if (argc > 1) - { - fprintf(stderr, "Usage: settrans [opts] node %s\n", program_invocation_name); - exit (1); - } - - task_get_bootstrap_port (mach_task_self (), &bootstrap); - if (bootstrap == MACH_PORT_NULL) - error(2, 0, "Must be started as a translator"); - - /* Reply to our parent */ - err = trivfs_startup (bootstrap, 0, 0, 0, 0, 0,&fsys); - mach_port_deallocate (mach_task_self (), bootstrap); - if (err) { - return (0); - } - - ports_manage_port_operations_one_thread (fsys->pi.bucket, demuxer, 0); - - return 0; - } - -Makefile: ---------- - - CC = gcc - MIG = mig - CFLAGS = -Wall -g -D_GNU_SOURCE - LDFLAGS = -lthreads -lports -ltrivfs -lfshelp -lshouldbeinlibc - INCLUDES = -I. - LCHDRS = - MIGCOMSFLAGS = -prefix S_ - OBJS = $(SRCS:.c=.o) - TAGS = etags.emacs21 - - all: data-server data-user - tags: - $(TAGS) $(SRCS) $(LCHDRS) - - stubs: data.defs - $(MIG) $(MIGCOMSFLAGS) -server dataServer.c -user dataUser.c $^ - data-server: data-server.c dataServer.c - $(CC) $^ $(CFLAGS) $(INCLUDES) $(LDFLAGS) -o $@ - data-user: data-user.c dataUser.c - $(CC) $^ $(CFLAGS) $(INCLUDES) -o $@ - clean: - rm -f *.o data-server data-user - - start: data-server data-user - settrans -ac /tmp/trans data-server - ps -x | grep data-server - end: - settrans -fg /tmp/trans - -Building --------- - -Do - - make stubs - -to create the dataUser.c and dataServer.c files generated by mig. Create the -executables using: - - make all - -Testing -------- - -Start the data-server translator using: - - settrans -ac /tmp/trans data-server - -You can check if it is running using - - ps -x | grep data-server - -Run the data-user executable to get the resultant output. - -You can remove the translator using: - - settrans -fg /tmp/trans - -To remove the built files use: - - make clean - -Happy Hacking! diff --git a/hurd/translator/xmlfs.mdwn b/hurd/translator/xmlfs.mdwn deleted file mode 100644 index 769c43ce..00000000 --- a/hurd/translator/xmlfs.mdwn +++ /dev/null @@ -1,11 +0,0 @@ -[[!meta copyright="Copyright © 2008 Free Software Foundation, Inc."]] - -[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable -id="license" text="Permission is granted to copy, distribute and/or modify this -document under the terms of the GNU Free Documentation License, Version 1.2 or -any later version published by the Free Software Foundation; with no Invariant -Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license -is included in the section entitled -[[GNU Free Documentation License|/fdl]]."]]"""]] - -<http://www.nongnu.org/hurdextras/#xmlfs> |