summaryrefslogtreecommitdiff
path: root/devio/rdwr.c
blob: 67f087a00355ed962e7a772551ba330069656d7e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
/* Implements various types of I/O on top of raw devices.

   Copyright (C) 1995 Free Software Foundation, Inc.

   Written by Miles Bader <miles@gnu.ai.mit.edu>

   This program is free software; you can redistribute it and/or
   modify it under the terms of the GNU General Public License as
   published by the Free Software Foundation; either version 2, or (at
   your option) any later version.

   This program is distributed in the hope that it will be useful, but
   WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   General Public License for more details.

   You should have received a copy of the GNU General Public License
   along with this program; if not, write to the Free Software
   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */

#include <hurd.h>
#include <assert.h>
#include <string.h>

#include "open.h"
#include "dev.h"
#include "mem.h"
#include "window.h"

/* ---------------------------------------------------------------- */

/* Writes BUF to DEV, copying through an intermediate buffer to page-align
   it.  If STAGING_BUF isn't 0, it is used as the copy buffer for
   small-enough transfers (staging_buf is assumed to be one block in length).
   AMOUNT is the actual amount written, and LEN is the amount of source
   material actually in BUF; if LEN is smaller than AMOUNT, the remainder is
   zero.  */
static error_t
copying_block_write(struct dev *dev, vm_address_t staging_buf,
		    vm_address_t buf, vm_size_t len, vm_size_t amount,
		    vm_offset_t *offs)
{
  error_t err = 0;
  vm_address_t copy_buf = staging_buf;
  vm_size_t copy_buf_len = dev->block_size;

  if (amount > dev->block_size || staging_buf == 0)
    {
      copy_buf_len = amount;
      err = vm_allocate(mach_task_self(), &copy_buf, copy_buf_len, 1);
      if (err)
	return err;
    }

  bcopy((char *)buf, (char *)copy_buf, len);
  if (len < amount && copy_buf == staging_buf)
    /* We need to zero the rest of the bloc, but only if we didn't
       vm_allocate it (in which case it  will be zero-filled).  */
    bzero((char *)buf + len, amount - len);

  err = dev_write(dev, copy_buf, amount, offs);

  if (copy_buf != staging_buf)
    vm_deallocate(mach_task_self(), copy_buf, copy_buf_len);

  return err;
}

/* ---------------------------------------------------------------- */

/* Copies LEN bytes from BUF to DEV, using STAGING_BUF to do buffering of
   partial blocks, and returning the amount actually written in AMOUNT.
   *OFFS is incremented to reflect the amount read/written.  If an error
   occurs, the error code is returned, otherwise 0.  */
error_t
buffered_write(struct dev *dev, vm_address_t staging_buf,
	       vm_address_t buf, vm_size_t len, vm_size_t *amount,
	       vm_offset_t *offs)
{
  error_t err = 0;
  int bsize = dev->block_size;
  int staging_buf_loc = *offs % bsize;
  int left_in_staging_buf = bsize - staging_buf_loc;
  vm_offset_t start_offs = *offs;

  if (left_in_staging_buf > 0)
    /* Write what's buffered from the last I/O.  */
    {
      /* Amount of the current i/o we can put in the staging buffer.  */
      int stage = (left_in_staging_buf > len ? len : left_in_staging_buf);

      bcopy((char *)buf, (char *)staging_buf + staging_buf_loc, stage);
	  
      buf += stage;
      len -= stage;
      *offs += stage;

      if (stage == left_in_staging_buf)
	/* We've filled up STAGING_BUF so we can write it out now.  */
	{
	  /* Backup OFFS to reflect the beginning-of-block position.  */
	  *offs -= bsize;
	  err = dev_write(dev, staging_buf, bsize, offs);
	}
    }

  if (!err && len > bsize)
    /* Enough i/o pending to do whole block transfers.  */
    {
      /* The number of bytes at the end of the transfer that aren't a
	 multiple of the block-size.  We have to deal with these separately
	 because device i/o must be in block multiples.  */
      int excess = len % bsize;
      vm_size_t block_len = len - excess;

      if (dev_write_valid(dev, buf, block_len, offs))
	/* BUF is page-aligned, so we can do i/o directly to the device, or
	   it is small enough that it doesn't matter.  */
	err = dev_write(dev, buf, block_len, offs);
      else
	/* Argh!  BUF isn't page aligned!  We must filter the i/o though an
	   intermediate buffer...  */
	err = copying_block_write(dev, staging_buf,
				  buf, block_len, block_len, offs);

      if (*offs - start_offs < left_in_staging_buf + block_len)
	/* Didn't write out all the blocks, so suppress buffering the rest. */
	len = 0;
      else
	len = excess;
    }

  /* At this point, LEN should be < BLOCK_SIZE, so we use buffering again. */
  if (!err && len > 0)
    {
      bcopy((char *)staging_buf, (char *)buf, len);
      *offs += len;
    }

  *amount = *offs - start_offs;
  if (*amount > 0)
    /* If an error occurred, but we successfully wrote *something*, then
       pretend nothing bad happened; the error will probably get caught next
       time.  */
    err = 0;

  return err;
}

/* ---------------------------------------------------------------- */

/* Reads AMOUNT bytes from DEV and returns them in BUF and BUF_LEN (using the
   standard mach out-array conventions), using STAGING_BUF to do buffering of
   partial blocks.  *OFFS is incremented to reflect the amount read/written.
   If an error occurs, the error code is returned, otherwise 0.  */
error_t
buffered_read (struct dev *dev, vm_address_t staging_buf,
	       vm_address_t *buf, vm_size_t *buf_len, vm_size_t amount,
	       vm_offset_t *offs)
{
  error_t err = 0;
  int bsize = dev->block_size;
  vm_offset_t start_offs = *offs;
  int staging_buf_loc = *offs % bsize;
  int from_staging_buf = bsize - staging_buf_loc;
  vm_address_t block_buf = *buf;
  vm_size_t block_buf_size = *buf_len;
  vm_size_t block_amount = amount;

  if (staging_buf_loc > 0)
    {
      /* Read into a temporary buffer.  */
      block_buf = 0;
      block_buf_size = 0;

      if (from_staging_buf > amount)
	from_staging_buf = amount;

      block_amount -= from_staging_buf;
    }
  else
    from_staging_buf = 0;

  /* Read any new block required.  */
  if (block_amount > 0)
    {
      /* We read enough to get every full block of BLOCK_AMOUNT, plus an
	 additional whole block if there's any more; we just copy any excess
	 from that last block into STAGING_BUF for next time.  */
      block_amount = ((block_amount + bsize - 1) / bsize) * bsize;

      err = dev_read(dev, &block_buf, &block_buf_size, block_amount, offs);
      if (err && staging_buf_loc > 0)
	/* We got an error, but don't abort, since we did get the bit from
	   the buffer.  */
	{
	  err = 0;
	  amount = from_staging_buf;
	  block_amount = 0;
	}

      if (amount > *offs - start_offs)
	/* If we read less than we hoped, reflect this down below.  */
	amount = *offs - start_offs;
    }

  if (staging_buf_loc > 0)
    /* Coalesce what we have in STAGING_BUF with what we read.  */
    {
      err = allocate(buf, buf_len, amount);
      assert_perror(err);
      bcopy((char *)staging_buf + staging_buf_loc, (char *)*buf,
	    from_staging_buf);

      if (block_amount > 0)
	bcopy((char *)block_buf, (char *)*buf + from_staging_buf,
	      amount - from_staging_buf);
    }
  else
    /* Otherwise, BLOCK_BUF should already contain the correct data.  */
    {
      *buf = block_buf;
      *buf_len = block_buf_size;
    }

  if (*offs - start_offs > amount)
    /* We've read too far, so put some amount from the end back into
       STAGING_BUF.  */
    {
      int excess = (*offs - start_offs) - amount;

      bcopy((char *)block_buf + amount,
	    (char *)staging_buf + bsize - excess,
	    excess);
      *offs -= excess;

      if (excess >= vm_page_size)
	deallocate_excess(*buf, *buf_len, excess);
      *buf_len -= excess;
    }

  /* Deallocate any extra copy buffer if necessary.  */
  if (*buf != block_buf)
    vm_deallocate(mach_task_self(), block_buf, block_buf_size);

  return err;
}

/* ---------------------------------------------------------------- */

/* Write BUF_LEN bytes from BUF to DEV, padding with zeros as necessary to
   write whole blocks, and returning the amount actually written in AMOUNT.
   If successful, 0 is returned, otherwise an error code is returned.  *OFFS
   is incremented by the change in device location.  */
error_t
raw_write(struct dev *dev,
	  vm_address_t buf, vm_size_t buf_len,
	  vm_size_t *amount, vm_offset_t *offs)
{
  error_t err;
  int bsize = dev->block_size;
  int block_amount = ((buf_len + bsize - 1) / bsize) * bsize;
  vm_offset_t start_offs = *offs;

  if (start_offs % bsize != 0)
    return EINVAL;

  if (block_amount == buf_len && dev_write_valid(dev, buf, block_amount, offs))
    /* BUF is page-aligned, so we can do i/o directly to the device, or
       it is small enough that it doesn't matter.  */
    err = dev_write(dev, buf, block_amount, offs);
  else
    /* Argh!  BUF isn't page aligned!  We must filter the i/o though an
       intermediate buffer...  [We use DEV's io_state buffer, as we know
       that the io_state is locked in open_rdwr, and it isn't otherwise
       used...]  */
    err = copying_block_write(dev, dev->io_state.buffer,
			      buf, buf_len, block_amount, offs);

  if (!err && *offs - start_offs < buf_len)
    *amount = *offs - start_offs;
  else
    *amount = buf_len;

  return err;
}

/* Read AMOUNT bytes from DEV into BUF and BUF_LEN; only whole blocks are
   read, but anything greater than *AMOUNT bytes is discarded.  The standard
   mach out-array convention is used to return the data in BUF and BUF_LEN.
   If successful, 0 is returned, otherwise an error code is returned.  *OFFS
   is incremented by the change in device location.  */
error_t
raw_read(struct dev *dev,
	 vm_address_t *buf, vm_size_t *buf_len,
	 vm_size_t amount, vm_offset_t *offs)
{
  error_t err;
  int bsize = dev->block_size;
  int block_amount = ((amount + bsize - 1) / bsize) * bsize;

  if (*offs % bsize != 0)
    return EINVAL;

  err = dev_read(dev, buf, buf_len, block_amount, offs);
  if (!err)
    {
      int excess = *buf_len - amount;
      if (excess > vm_page_size)
	deallocate_excess(*buf, *buf_len, excess);
      if (excess > 0)
	*buf_len = amount;
    }

  return err;
}

/* ---------------------------------------------------------------- */

struct rdwr_state
{
  struct dev *dev;
  off_t user_offs;
  vm_offset_t *offs_p;
  struct io_state *io_state;
};

/* Setup state needed for I/O to/from OPEN, putting it into STATE.  OFFS
   should be the original user-supplied offset.  */
static void
rdwr_state_init(struct rdwr_state *state, struct open *open, off_t offs)
{
  state->dev = open->dev;
  state->io_state = open_get_io_state(open);
  state->user_offs = offs;

  if (dev_is(state->dev, DEV_SERIAL))
    /* For serial i/o, we always ignore the proffered offs, and use the
       actual device offset.  */
    state->user_offs = -1;

  if (state->user_offs == -1 || !dev_is(state->dev, DEV_BUFFERED))
    /* If we're going to use some bit of IO_STATE, lock it first.  This
       should only not happen if we're going to used windowed i/o with an
       explicit offset.  */
    io_state_lock(state->io_state);

  if (state->user_offs == -1)
    state->offs_p = &state->io_state->location;
  else
    state->offs_p = (vm_offset_t *)&state->user_offs;
}

/* Destroy any state created by rdwr_state_init.  */
static void
rdwr_state_finalize(struct rdwr_state *state)
{
  if (state->user_offs == -1 || !dev_is(state->dev, DEV_BUFFERED))
    io_state_unlock(state->io_state);
}

/* ---------------------------------------------------------------- */

/* Writes up to LEN bytes from BUF to OPEN's device at device offset OFFS
   (which may be ignored if the device doesn't support random access),
   and returns the number of bytes written in AMOUNT.  If no error occurs,
   zero is returned, otherwise the error code is returned.  */
error_t
open_write(struct open *open, vm_address_t buf, vm_size_t len,
	   vm_size_t *amount, off_t offs)
{
  error_t err;
  struct rdwr_state state;
  struct dev *dev = open->dev;

  rdwr_state_init(&state, open, offs);

  offs = *state.offs_p;
  if (offs < 0)
    err = EINVAL;
  if (offs + len > dev->size)
    err = EIO;
  else if (!dev_is(dev, DEV_BUFFERED))
    err = raw_write(dev, buf, len, amount, state.offs_p);
  else if (dev_is(dev, DEV_SERIAL))
    {
      state.io_state->buffer_use = IO_STATE_BUFFERED_WRITE;
      err = buffered_write(dev, state.io_state->buffer, buf, len,
			   amount, state.offs_p);
    }
  else
    err = window_write(open->window, buf, len, amount, state.offs_p);

  rdwr_state_finalize(&state);

  return err;
}

/* Reads up to AMOUNT bytes from the device into BUF and BUF_LEN using the
   standard mach out-array convention.  If no error occurs, zero is returned,
   otherwise the error code is returned.  */
error_t
open_read(struct open *open, vm_address_t *buf, vm_size_t *buf_len,
	  vm_size_t amount, off_t offs)
{
  error_t err;
  struct rdwr_state state;
  struct dev *dev = open->dev;

  rdwr_state_init(&state, open, offs);

  offs = *state.offs_p;
  if (offs < 0)
    err = EINVAL;
  if (offs + amount > dev->size)
    err = EIO;
  else if (!dev_is(dev, DEV_BUFFERED))
    err = raw_read(dev, buf, buf_len, amount, state.offs_p);
  else if (dev_is(dev, DEV_SERIAL))
    {
      state.io_state->buffer_use = IO_STATE_BUFFERED_READ;
      err = buffered_read(dev, state.io_state->buffer, buf, buf_len,
			  amount, state.offs_p);
    }
  else
    err = window_read(open->window, buf, buf_len, amount, state.offs_p);

  rdwr_state_finalize(&state);

  return err;
}

/* Set OPEN's location to OFFS, interpreted according to WHENCE as by seek.
   The new absolute location is returned in NEW_OFFS (and may not be the same
   as OFFS).  If no error occurs, zero is returned, otherwise the error code
   is returned.  */
error_t
open_seek (struct open *open, off_t offs, int whence, off_t *new_offs)
{
  error_t err = 0;
  struct io_state *io_state = open_get_io_state (open);

  if (!dev_is (open->dev, DEV_SEEKABLE))
    return ESPIPE;

  io_state_lock (io_state);

  switch (whence)
    {
    case SEEK_SET:
      *new_offs = offs; break;
    case SEEK_CUR:
      *new_offs = io_state->location + offs; break;
    case SEEK_END:
      *new_offs = open->dev->size - offs; break;
    default:
      err = EINVAL;
    }

  if (!err)
    {
      if (!dev_is (open->dev, DEV_BUFFERED))
	/* On unbuffered devices force seeks to the nearest block boundary.  */
	*new_offs -= *new_offs % open->dev->block_size;
      io_state->location = *new_offs;
    }

  io_state_unlock (io_state);

  return err;
}