Line data Source code
1 : /* secmem.c - memory allocation from a secure heap
2 : * Copyright (C) 1998, 1999, 2000, 2001, 2002,
3 : * 2003, 2007 Free Software Foundation, Inc.
4 : * Copyright (C) 2013, 2016 g10 Code GmbH
5 : *
6 : * This file is part of Libgcrypt.
7 : *
8 : * Libgcrypt is free software; you can redistribute it and/or modify
9 : * it under the terms of the GNU Lesser general Public License as
10 : * published by the Free Software Foundation; either version 2.1 of
11 : * the License, or (at your option) any later version.
12 : *
13 : * Libgcrypt is distributed in the hope that it will be useful,
14 : * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 : * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 : * GNU Lesser General Public License for more details.
17 : *
18 : * You should have received a copy of the GNU Lesser General Public
19 : * License along with this program; if not, see <http://www.gnu.org/licenses/>.
20 : */
21 :
22 : #include <config.h>
23 : #include <stdio.h>
24 : #include <stdlib.h>
25 : #include <string.h>
26 : #include <errno.h>
27 : #include <stdarg.h>
28 : #include <unistd.h>
29 : #include <stddef.h>
30 :
31 : #if defined(HAVE_MLOCK) || defined(HAVE_MMAP)
32 : #include <sys/mman.h>
33 : #include <sys/types.h>
34 : #include <fcntl.h>
35 : #ifdef USE_CAPABILITIES
36 : #include <sys/capability.h>
37 : #endif
38 : #endif
39 :
40 : #include "g10lib.h"
41 : #include "secmem.h"
42 :
43 : #if defined (MAP_ANON) && ! defined (MAP_ANONYMOUS)
44 : #define MAP_ANONYMOUS MAP_ANON
45 : #endif
46 :
47 : #define MINIMUM_POOL_SIZE 16384
48 : #define STANDARD_POOL_SIZE 32768
49 : #define DEFAULT_PAGE_SIZE 4096
50 :
51 : typedef struct memblock
52 : {
53 : unsigned size; /* Size of the memory available to the
54 : user. */
55 : int flags; /* See below. */
56 : PROPERLY_ALIGNED_TYPE aligned;
57 : } memblock_t;
58 :
59 : /* This flag specifies that the memory block is in use. */
60 : #define MB_FLAG_ACTIVE (1 << 0)
61 :
62 : /* An object describing a memory pool. */
63 : typedef struct pooldesc_s
64 : {
65 : /* A link to the next pool. This is used to connect the overflow
66 : * pools. */
67 : struct pooldesc_s *next;
68 :
69 : /* A memory buffer used as allocation pool. */
70 : void *mem;
71 :
72 : /* The allocated size of MEM. */
73 : size_t size;
74 :
75 : /* Flag indicating that this memory pool is ready for use. May be
76 : * checked in an atexit function. */
77 : volatile int okay;
78 :
79 : /* Flag indicating whether MEM is mmapped. */
80 : volatile int is_mmapped;
81 :
82 : /* The number of allocated bytes and the number of used blocks in
83 : * this pool. */
84 : unsigned int cur_alloced, cur_blocks;
85 : } pooldesc_t;
86 :
87 :
88 : /* The pool of secure memory. This is the head of a linked list with
89 : * the first element being the standard mlock-ed pool and the
90 : * following elements being the overflow pools. */
91 : static pooldesc_t mainpool;
92 :
93 :
94 : /* A couple of flags whith some beeing set early. */
95 : static int disable_secmem;
96 : static int show_warning;
97 : static int not_locked;
98 : static int no_warning;
99 : static int suspend_warning;
100 : static int no_mlock;
101 : static int no_priv_drop;
102 :
103 : /* Lock protecting accesses to the memory pools. */
104 : GPGRT_LOCK_DEFINE (secmem_lock);
105 :
106 : /* Convenient macros. */
107 : #define SECMEM_LOCK gpgrt_lock_lock (&secmem_lock)
108 : #define SECMEM_UNLOCK gpgrt_lock_unlock (&secmem_lock)
109 :
110 : /* The size of the memblock structure; this does not include the
111 : memory that is available to the user. */
112 : #define BLOCK_HEAD_SIZE \
113 : offsetof (memblock_t, aligned)
114 :
115 : /* Convert an address into the according memory block structure. */
116 : #define ADDR_TO_BLOCK(addr) \
117 : (memblock_t *) (void *) ((char *) addr - BLOCK_HEAD_SIZE)
118 :
119 : /* Check whether P points into POOL. */
120 : static inline int
121 0 : ptr_into_pool_p (pooldesc_t *pool, const void *p)
122 : {
123 : /* We need to convert pointers to addresses. This is required by
124 : C-99 6.5.8 to avoid undefined behaviour. See also
125 : http://lists.gnupg.org/pipermail/gcrypt-devel/2007-February/001102.html
126 : */
127 0 : uintptr_t p_addr = (uintptr_t)p;
128 0 : uintptr_t pool_addr = (uintptr_t)pool->mem;
129 :
130 0 : return p_addr >= pool_addr && p_addr < pool_addr + pool->size;
131 : }
132 :
133 : /* Update the stats. */
134 : static void
135 0 : stats_update (pooldesc_t *pool, size_t add, size_t sub)
136 : {
137 0 : if (add)
138 : {
139 0 : pool->cur_alloced += add;
140 0 : pool->cur_blocks++;
141 : }
142 0 : if (sub)
143 : {
144 0 : pool->cur_alloced -= sub;
145 0 : pool->cur_blocks--;
146 : }
147 0 : }
148 :
149 : /* Return the block following MB or NULL, if MB is the last block. */
150 : static memblock_t *
151 0 : mb_get_next (pooldesc_t *pool, memblock_t *mb)
152 : {
153 : memblock_t *mb_next;
154 :
155 0 : mb_next = (memblock_t *) (void *) ((char *) mb + BLOCK_HEAD_SIZE + mb->size);
156 :
157 0 : if (! ptr_into_pool_p (pool, mb_next))
158 0 : mb_next = NULL;
159 :
160 0 : return mb_next;
161 : }
162 :
163 : /* Return the block preceding MB or NULL, if MB is the first
164 : block. */
165 : static memblock_t *
166 0 : mb_get_prev (pooldesc_t *pool, memblock_t *mb)
167 : {
168 : memblock_t *mb_prev, *mb_next;
169 :
170 0 : if (mb == pool->mem)
171 0 : mb_prev = NULL;
172 : else
173 : {
174 0 : mb_prev = (memblock_t *) pool->mem;
175 : while (1)
176 : {
177 0 : mb_next = mb_get_next (pool, mb_prev);
178 0 : if (mb_next == mb)
179 0 : break;
180 : else
181 0 : mb_prev = mb_next;
182 : }
183 : }
184 :
185 0 : return mb_prev;
186 : }
187 :
188 : /* If the preceding block of MB and/or the following block of MB
189 : exist and are not active, merge them to form a bigger block. */
190 : static void
191 0 : mb_merge (pooldesc_t *pool, memblock_t *mb)
192 : {
193 : memblock_t *mb_prev, *mb_next;
194 :
195 0 : mb_prev = mb_get_prev (pool, mb);
196 0 : mb_next = mb_get_next (pool, mb);
197 :
198 0 : if (mb_prev && (! (mb_prev->flags & MB_FLAG_ACTIVE)))
199 : {
200 0 : mb_prev->size += BLOCK_HEAD_SIZE + mb->size;
201 0 : mb = mb_prev;
202 : }
203 0 : if (mb_next && (! (mb_next->flags & MB_FLAG_ACTIVE)))
204 0 : mb->size += BLOCK_HEAD_SIZE + mb_next->size;
205 0 : }
206 :
207 : /* Return a new block, which can hold SIZE bytes. */
208 : static memblock_t *
209 0 : mb_get_new (pooldesc_t *pool, memblock_t *block, size_t size)
210 : {
211 : memblock_t *mb, *mb_split;
212 :
213 0 : for (mb = block; ptr_into_pool_p (pool, mb); mb = mb_get_next (pool, mb))
214 0 : if (! (mb->flags & MB_FLAG_ACTIVE) && mb->size >= size)
215 : {
216 : /* Found a free block. */
217 0 : mb->flags |= MB_FLAG_ACTIVE;
218 :
219 0 : if (mb->size - size > BLOCK_HEAD_SIZE)
220 : {
221 : /* Split block. */
222 :
223 0 : mb_split = (memblock_t *) (void *) (((char *) mb) + BLOCK_HEAD_SIZE
224 0 : + size);
225 0 : mb_split->size = mb->size - size - BLOCK_HEAD_SIZE;
226 0 : mb_split->flags = 0;
227 :
228 0 : mb->size = size;
229 :
230 0 : mb_merge (pool, mb_split);
231 :
232 : }
233 :
234 0 : break;
235 : }
236 :
237 0 : if (! ptr_into_pool_p (pool, mb))
238 : {
239 0 : gpg_err_set_errno (ENOMEM);
240 0 : mb = NULL;
241 : }
242 :
243 0 : return mb;
244 : }
245 :
246 : /* Print a warning message. */
247 : static void
248 0 : print_warn (void)
249 : {
250 0 : if (!no_warning)
251 0 : log_info (_("Warning: using insecure memory!\n"));
252 0 : }
253 :
254 :
255 : /* Lock the memory pages of pool P of size N into core and drop
256 : * privileges. */
257 : static void
258 0 : lock_pool_pages (void *p, size_t n)
259 : {
260 : #if defined(USE_CAPABILITIES) && defined(HAVE_MLOCK)
261 : int err;
262 :
263 : {
264 : cap_t cap;
265 :
266 : if (!no_priv_drop)
267 : {
268 : cap = cap_from_text ("cap_ipc_lock+ep");
269 : cap_set_proc (cap);
270 : cap_free (cap);
271 : }
272 : err = no_mlock? 0 : mlock (p, n);
273 : if (err && errno)
274 : err = errno;
275 : if (!no_priv_drop)
276 : {
277 : cap = cap_from_text ("cap_ipc_lock+p");
278 : cap_set_proc (cap);
279 : cap_free(cap);
280 : }
281 : }
282 :
283 : if (err)
284 : {
285 : if (err != EPERM
286 : #ifdef EAGAIN /* BSD and also Linux may return EAGAIN */
287 : && err != EAGAIN
288 : #endif
289 : #ifdef ENOSYS /* Some SCOs return this (function not implemented) */
290 : && err != ENOSYS
291 : #endif
292 : #ifdef ENOMEM /* Linux might return this. */
293 : && err != ENOMEM
294 : #endif
295 : )
296 : log_error ("can't lock memory: %s\n", strerror (err));
297 : show_warning = 1;
298 : not_locked = 1;
299 : }
300 :
301 : #elif defined(HAVE_MLOCK)
302 : uid_t uid;
303 : int err;
304 :
305 0 : uid = getuid ();
306 :
307 : #ifdef HAVE_BROKEN_MLOCK
308 : /* Under HP/UX mlock segfaults if called by non-root. Note, we have
309 : noch checked whether mlock does really work under AIX where we
310 : also detected a broken nlock. Note further, that using plock ()
311 : is not a good idea under AIX. */
312 : if (uid)
313 : {
314 : errno = EPERM;
315 : err = errno;
316 : }
317 : else
318 : {
319 : err = no_mlock? 0 : mlock (p, n);
320 : if (err && errno)
321 : err = errno;
322 : }
323 : #else /* !HAVE_BROKEN_MLOCK */
324 0 : err = no_mlock? 0 : mlock (p, n);
325 0 : if (err && errno)
326 0 : err = errno;
327 : #endif /* !HAVE_BROKEN_MLOCK */
328 :
329 : /* Test whether we are running setuid(0). */
330 0 : if (uid && ! geteuid ())
331 : {
332 : /* Yes, we are. */
333 0 : if (!no_priv_drop)
334 : {
335 : /* Check that we really dropped the privs.
336 : * Note: setuid(0) should always fail */
337 0 : if (setuid (uid) || getuid () != geteuid () || !setuid (0))
338 0 : log_fatal ("failed to reset uid: %s\n", strerror (errno));
339 : }
340 : }
341 :
342 0 : if (err)
343 : {
344 0 : if (err != EPERM
345 : #ifdef EAGAIN /* BSD and also Linux may return this. */
346 0 : && err != EAGAIN
347 : #endif
348 : #ifdef ENOSYS /* Some SCOs return this (function not implemented). */
349 0 : && err != ENOSYS
350 : #endif
351 : #ifdef ENOMEM /* Linux might return this. */
352 0 : && err != ENOMEM
353 : #endif
354 : )
355 0 : log_error ("can't lock memory: %s\n", strerror (err));
356 0 : show_warning = 1;
357 0 : not_locked = 1;
358 : }
359 :
360 : #elif defined ( __QNX__ )
361 : /* QNX does not page at all, so the whole secure memory stuff does
362 : * not make much sense. However it is still of use because it
363 : * wipes out the memory on a free().
364 : * Therefore it is sufficient to suppress the warning. */
365 : (void)p;
366 : (void)n;
367 : #elif defined (HAVE_DOSISH_SYSTEM) || defined (__CYGWIN__)
368 : /* It does not make sense to print such a warning, given the fact that
369 : * this whole Windows !@#$% and their user base are inherently insecure. */
370 : (void)p;
371 : (void)n;
372 : #elif defined (__riscos__)
373 : /* No virtual memory on RISC OS, so no pages are swapped to disc,
374 : * besides we don't have mmap, so we don't use it! ;-)
375 : * But don't complain, as explained above. */
376 : (void)p;
377 : (void)n;
378 : #else
379 : (void)p;
380 : (void)n;
381 : if (!no_mlock)
382 : log_info ("Please note that you don't have secure memory on this system\n");
383 : #endif
384 0 : }
385 :
386 : /* Initialize POOL. */
387 : static void
388 0 : init_pool (pooldesc_t *pool, size_t n)
389 : {
390 : memblock_t *mb;
391 :
392 0 : pool->size = n;
393 :
394 0 : if (disable_secmem)
395 0 : log_bug ("secure memory is disabled");
396 :
397 :
398 : #if HAVE_MMAP
399 : {
400 : size_t pgsize;
401 : long int pgsize_val;
402 :
403 : # if defined(HAVE_SYSCONF) && defined(_SC_PAGESIZE)
404 0 : pgsize_val = sysconf (_SC_PAGESIZE);
405 : # elif defined(HAVE_GETPAGESIZE)
406 : pgsize_val = getpagesize ();
407 : # else
408 : pgsize_val = -1;
409 : # endif
410 0 : pgsize = (pgsize_val != -1 && pgsize_val > 0)? pgsize_val:DEFAULT_PAGE_SIZE;
411 :
412 0 : pool->size = (pool->size + pgsize - 1) & ~(pgsize - 1);
413 : # ifdef MAP_ANONYMOUS
414 0 : pool->mem = mmap (0, pool->size, PROT_READ | PROT_WRITE,
415 : MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
416 : # else /* map /dev/zero instead */
417 : {
418 : int fd;
419 :
420 : fd = open ("/dev/zero", O_RDWR);
421 : if (fd == -1)
422 : {
423 : log_error ("can't open /dev/zero: %s\n", strerror (errno));
424 : pool->mem = (void *) -1;
425 : }
426 : else
427 : {
428 : pool->mem = mmap (0, pool->size,
429 : (PROT_READ | PROT_WRITE), MAP_PRIVATE, fd, 0);
430 : close (fd);
431 : }
432 : }
433 : # endif
434 0 : if (pool->mem == (void *) -1)
435 0 : log_info ("can't mmap pool of %u bytes: %s - using malloc\n",
436 0 : (unsigned) pool->size, strerror (errno));
437 : else
438 : {
439 0 : pool->is_mmapped = 1;
440 0 : pool->okay = 1;
441 : }
442 : }
443 : #endif /*HAVE_MMAP*/
444 :
445 0 : if (!pool->okay)
446 : {
447 0 : pool->mem = malloc (pool->size);
448 0 : if (!pool->mem)
449 0 : log_fatal ("can't allocate memory pool of %u bytes\n",
450 0 : (unsigned) pool->size);
451 : else
452 0 : pool->okay = 1;
453 : }
454 :
455 : /* Initialize first memory block. */
456 0 : mb = (memblock_t *) pool->mem;
457 0 : mb->size = pool->size;
458 0 : mb->flags = 0;
459 0 : }
460 :
461 : void
462 0 : _gcry_secmem_set_flags (unsigned flags)
463 : {
464 : int was_susp;
465 :
466 0 : SECMEM_LOCK;
467 :
468 0 : was_susp = suspend_warning;
469 0 : no_warning = flags & GCRY_SECMEM_FLAG_NO_WARNING;
470 0 : suspend_warning = flags & GCRY_SECMEM_FLAG_SUSPEND_WARNING;
471 0 : no_mlock = flags & GCRY_SECMEM_FLAG_NO_MLOCK;
472 0 : no_priv_drop = flags & GCRY_SECMEM_FLAG_NO_PRIV_DROP;
473 :
474 : /* and now issue the warning if it is not longer suspended */
475 0 : if (was_susp && !suspend_warning && show_warning)
476 : {
477 0 : show_warning = 0;
478 0 : print_warn ();
479 : }
480 :
481 0 : SECMEM_UNLOCK;
482 0 : }
483 :
484 : unsigned int
485 0 : _gcry_secmem_get_flags (void)
486 : {
487 : unsigned flags;
488 :
489 0 : SECMEM_LOCK;
490 :
491 0 : flags = no_warning ? GCRY_SECMEM_FLAG_NO_WARNING : 0;
492 0 : flags |= suspend_warning ? GCRY_SECMEM_FLAG_SUSPEND_WARNING : 0;
493 0 : flags |= not_locked ? GCRY_SECMEM_FLAG_NOT_LOCKED : 0;
494 0 : flags |= no_mlock ? GCRY_SECMEM_FLAG_NO_MLOCK : 0;
495 0 : flags |= no_priv_drop ? GCRY_SECMEM_FLAG_NO_PRIV_DROP : 0;
496 :
497 0 : SECMEM_UNLOCK;
498 :
499 0 : return flags;
500 : }
501 :
502 :
503 : /* This function initializes the main memory pool MAINPOOL. Itis
504 : * expected to be called with the secmem lock held. */
505 : static void
506 0 : _gcry_secmem_init_internal (size_t n)
507 : {
508 : pooldesc_t *pool;
509 :
510 0 : pool = &mainpool;
511 0 : if (!n)
512 : {
513 : #ifdef USE_CAPABILITIES
514 : /* drop all capabilities */
515 : if (!no_priv_drop)
516 : {
517 : cap_t cap;
518 :
519 : cap = cap_from_text ("all-eip");
520 : cap_set_proc (cap);
521 : cap_free (cap);
522 : }
523 :
524 : #elif !defined(HAVE_DOSISH_SYSTEM)
525 : uid_t uid;
526 :
527 0 : disable_secmem = 1;
528 0 : uid = getuid ();
529 0 : if (uid != geteuid ())
530 : {
531 0 : if (setuid (uid) || getuid () != geteuid () || !setuid (0))
532 0 : log_fatal ("failed to drop setuid\n");
533 : }
534 : #endif
535 : }
536 : else
537 : {
538 0 : if (n < MINIMUM_POOL_SIZE)
539 0 : n = MINIMUM_POOL_SIZE;
540 0 : if (! pool->okay)
541 : {
542 0 : init_pool (pool, n);
543 0 : lock_pool_pages (pool->mem, n);
544 : }
545 : else
546 0 : log_error ("Oops, secure memory pool already initialized\n");
547 : }
548 0 : }
549 :
550 :
551 :
552 : /* Initialize the secure memory system. If running with the necessary
553 : privileges, the secure memory pool will be locked into the core in
554 : order to prevent page-outs of the data. Furthermore allocated
555 : secure memory will be wiped out when released. */
556 : void
557 0 : _gcry_secmem_init (size_t n)
558 : {
559 0 : SECMEM_LOCK;
560 :
561 0 : _gcry_secmem_init_internal (n);
562 :
563 0 : SECMEM_UNLOCK;
564 0 : }
565 :
566 :
567 : gcry_err_code_t
568 0 : _gcry_secmem_module_init ()
569 : {
570 : /* Not anymore needed. */
571 0 : return 0;
572 : }
573 :
574 :
575 : static void *
576 0 : _gcry_secmem_malloc_internal (size_t size, int xhint)
577 : {
578 : pooldesc_t *pool;
579 : memblock_t *mb;
580 :
581 0 : pool = &mainpool;
582 :
583 0 : if (!pool->okay)
584 : {
585 : /* Try to initialize the pool if the user forgot about it. */
586 0 : _gcry_secmem_init_internal (STANDARD_POOL_SIZE);
587 0 : if (!pool->okay)
588 : {
589 0 : log_info (_("operation is not possible without "
590 : "initialized secure memory\n"));
591 0 : gpg_err_set_errno (ENOMEM);
592 0 : return NULL;
593 : }
594 : }
595 0 : if (not_locked && fips_mode ())
596 : {
597 0 : log_info (_("secure memory pool is not locked while in FIPS mode\n"));
598 0 : gpg_err_set_errno (ENOMEM);
599 0 : return NULL;
600 : }
601 0 : if (show_warning && !suspend_warning)
602 : {
603 0 : show_warning = 0;
604 0 : print_warn ();
605 : }
606 :
607 : /* Blocks are always a multiple of 32. */
608 0 : size = ((size + 31) / 32) * 32;
609 :
610 0 : mb = mb_get_new (pool, (memblock_t *) pool->mem, size);
611 0 : if (mb)
612 : {
613 0 : stats_update (pool, size, 0);
614 0 : return &mb->aligned.c;
615 : }
616 :
617 : /* If we are called from xmalloc style function resort to the
618 : * overflow pools to return memory. We don't do this in FIPS mode,
619 : * though. */
620 0 : if (xhint && !fips_mode ())
621 : {
622 0 : for (pool = pool->next; pool; pool = pool->next)
623 : {
624 0 : mb = mb_get_new (pool, (memblock_t *) pool->mem, size);
625 0 : if (mb)
626 : {
627 0 : stats_update (pool, size, 0);
628 0 : return &mb->aligned.c;
629 : }
630 : }
631 : /* Allocate a new overflow pool. We put a new pool right after
632 : * the mainpool so that the next allocation will happen in that
633 : * pool and not in one of the older pools. When this new pool
634 : * gets full we will try to find space in the older pools. */
635 0 : pool = calloc (1, sizeof *pool);
636 0 : if (!pool)
637 0 : return NULL; /* Not enough memory for a new pool descriptor. */
638 0 : pool->size = STANDARD_POOL_SIZE;
639 0 : pool->mem = malloc (pool->size);
640 0 : if (!pool->mem)
641 0 : return NULL; /* Not enough memory available for a new pool. */
642 : /* Initialize first memory block. */
643 0 : mb = (memblock_t *) pool->mem;
644 0 : mb->size = pool->size;
645 0 : mb->flags = 0;
646 :
647 0 : pool->okay = 1;
648 :
649 : /* Take care: in _gcry_private_is_secure we do not lock and thus
650 : * we assume that the second assignment below is atomic. */
651 0 : pool->next = mainpool.next;
652 0 : mainpool.next = pool;
653 :
654 : /* After the first time we allocated an overflow pool, print a
655 : * warning. */
656 0 : if (!pool->next)
657 0 : print_warn ();
658 :
659 : /* Allocate. */
660 0 : mb = mb_get_new (pool, (memblock_t *) pool->mem, size);
661 0 : if (mb)
662 : {
663 0 : stats_update (pool, size, 0);
664 0 : return &mb->aligned.c;
665 : }
666 : }
667 :
668 0 : return NULL;
669 : }
670 :
671 :
672 : /* Allocate a block from the secmem of SIZE. With XHINT set assume
673 : * that the caller is a xmalloc style function. */
674 : void *
675 0 : _gcry_secmem_malloc (size_t size, int xhint)
676 : {
677 : void *p;
678 :
679 0 : SECMEM_LOCK;
680 0 : p = _gcry_secmem_malloc_internal (size, xhint);
681 0 : SECMEM_UNLOCK;
682 :
683 0 : return p;
684 : }
685 :
686 : static int
687 0 : _gcry_secmem_free_internal (void *a)
688 : {
689 : pooldesc_t *pool;
690 : memblock_t *mb;
691 : int size;
692 :
693 0 : for (pool = &mainpool; pool; pool = pool->next)
694 0 : if (pool->okay && ptr_into_pool_p (pool, a))
695 0 : break;
696 0 : if (!pool)
697 0 : return 0; /* A does not belong to use. */
698 :
699 0 : mb = ADDR_TO_BLOCK (a);
700 0 : size = mb->size;
701 :
702 : /* This does not make much sense: probably this memory is held in the
703 : * cache. We do it anyway: */
704 : #define MB_WIPE_OUT(byte) \
705 : wipememory2 (((char *) mb + BLOCK_HEAD_SIZE), (byte), size);
706 :
707 0 : MB_WIPE_OUT (0xff);
708 0 : MB_WIPE_OUT (0xaa);
709 0 : MB_WIPE_OUT (0x55);
710 0 : MB_WIPE_OUT (0x00);
711 :
712 : /* Update stats. */
713 0 : stats_update (pool, 0, size);
714 :
715 0 : mb->flags &= ~MB_FLAG_ACTIVE;
716 :
717 0 : mb_merge (pool, mb);
718 :
719 0 : return 1; /* Freed. */
720 : }
721 :
722 :
723 : /* Wipe out and release memory. Returns true if this function
724 : * actually released A. */
725 : int
726 0 : _gcry_secmem_free (void *a)
727 : {
728 : int mine;
729 :
730 0 : if (!a)
731 0 : return 1; /* Tell caller that we handled it. */
732 :
733 0 : SECMEM_LOCK;
734 0 : mine = _gcry_secmem_free_internal (a);
735 0 : SECMEM_UNLOCK;
736 0 : return mine;
737 : }
738 :
739 :
740 : static void *
741 0 : _gcry_secmem_realloc_internal (void *p, size_t newsize, int xhint)
742 : {
743 : memblock_t *mb;
744 : size_t size;
745 : void *a;
746 :
747 0 : mb = (memblock_t *) (void *) ((char *) p
748 : - ((size_t) &((memblock_t *) 0)->aligned.c));
749 0 : size = mb->size;
750 0 : if (newsize < size)
751 : {
752 : /* It is easier to not shrink the memory. */
753 0 : a = p;
754 : }
755 : else
756 : {
757 0 : a = _gcry_secmem_malloc_internal (newsize, xhint);
758 0 : if (a)
759 : {
760 0 : memcpy (a, p, size);
761 0 : memset ((char *) a + size, 0, newsize - size);
762 0 : _gcry_secmem_free_internal (p);
763 : }
764 : }
765 :
766 0 : return a;
767 : }
768 :
769 :
770 : /* Realloc memory. With XHINT set assume that the caller is a xmalloc
771 : * style function. */
772 : void *
773 0 : _gcry_secmem_realloc (void *p, size_t newsize, int xhint)
774 : {
775 : void *a;
776 :
777 0 : SECMEM_LOCK;
778 0 : a = _gcry_secmem_realloc_internal (p, newsize, xhint);
779 0 : SECMEM_UNLOCK;
780 :
781 0 : return a;
782 : }
783 :
784 :
785 : /* Return true if P points into the secure memory areas. */
786 : int
787 0 : _gcry_private_is_secure (const void *p)
788 : {
789 : pooldesc_t *pool;
790 :
791 : /* We do no lock here because once a pool is allocatred it will not
792 : * be removed anymore (except for gcry_secmem_term). Further,
793 : * adding a new pool to the list should be atomic. */
794 0 : for (pool = &mainpool; pool; pool = pool->next)
795 0 : if (pool->okay && ptr_into_pool_p (pool, p))
796 0 : return 1;
797 :
798 0 : return 0;
799 : }
800 :
801 :
802 : /****************
803 : * Warning: This code might be called by an interrupt handler
804 : * and frankly, there should really be such a handler,
805 : * to make sure that the memory is wiped out.
806 : * We hope that the OS wipes out mlocked memory after
807 : * receiving a SIGKILL - it really should do so, otherwise
808 : * there is no chance to get the secure memory cleaned.
809 : */
810 : void
811 0 : _gcry_secmem_term ()
812 : {
813 : pooldesc_t *pool, *next;
814 :
815 0 : for (pool = &mainpool; pool; pool = next)
816 : {
817 0 : next = pool->next;
818 0 : if (!pool->okay)
819 0 : continue;
820 :
821 0 : wipememory2 (pool->mem, 0xff, pool->size);
822 0 : wipememory2 (pool->mem, 0xaa, pool->size);
823 0 : wipememory2 (pool->mem, 0x55, pool->size);
824 0 : wipememory2 (pool->mem, 0x00, pool->size);
825 : if (0)
826 : ;
827 : #if HAVE_MMAP
828 0 : else if (pool->is_mmapped)
829 0 : munmap (pool->mem, pool->size);
830 : #endif
831 : else
832 0 : free (pool->mem);
833 0 : pool->mem = NULL;
834 0 : pool->okay = 0;
835 0 : pool->size = 0;
836 0 : if (pool != &mainpool)
837 0 : free (pool);
838 : }
839 0 : mainpool.next = NULL;
840 0 : not_locked = 0;
841 0 : }
842 :
843 :
844 : /* Print stats of the secmem allocator. With EXTENDED passwed as true
845 : * a detiled listing is returned (used for testing). */
846 : void
847 0 : _gcry_secmem_dump_stats (int extended)
848 : {
849 : pooldesc_t *pool;
850 : memblock_t *mb;
851 : int i, poolno;
852 :
853 0 : SECMEM_LOCK;
854 :
855 0 : for (pool = &mainpool, poolno = 0; pool; pool = pool->next, poolno++)
856 : {
857 0 : if (!extended)
858 : {
859 0 : if (pool->okay)
860 0 : log_info ("%-13s %u/%lu bytes in %u blocks\n",
861 : pool == &mainpool? "secmem usage:":"",
862 0 : pool->cur_alloced, (unsigned long)pool->size,
863 : pool->cur_blocks);
864 : }
865 : else
866 : {
867 0 : for (i = 0, mb = (memblock_t *) pool->mem;
868 0 : ptr_into_pool_p (pool, mb);
869 0 : mb = mb_get_next (pool, mb), i++)
870 0 : log_info ("SECMEM: pool %d %s block %i size %i\n",
871 : poolno,
872 0 : (mb->flags & MB_FLAG_ACTIVE) ? "used" : "free",
873 : i,
874 : mb->size);
875 : }
876 : }
877 0 : SECMEM_UNLOCK;
878 0 : }
|