| File: | obj-scan-build/usermux/../../usermux/mux.c |
| Location: | line 265, column 5 |
| Description: | Null pointer passed as an argument to a 'nonnull' parameter |
| 1 | /* Root usermux node | |||
| 2 | ||||
| 3 | Copyright (C) 1997, 1998, 1999, 2000, 2002, 2008 | |||
| 4 | Free Software Foundation, Inc. | |||
| 5 | Written by Miles Bader <miles@gnu.org> | |||
| 6 | This file is part of the GNU Hurd. | |||
| 7 | ||||
| 8 | The GNU Hurd is free software; you can redistribute it and/or | |||
| 9 | modify it under the terms of the GNU General Public License as | |||
| 10 | published by the Free Software Foundation; either version 2, or (at | |||
| 11 | your option) any later version. | |||
| 12 | ||||
| 13 | The GNU Hurd is distributed in the hope that it will be useful, but | |||
| 14 | WITHOUT ANY WARRANTY; without even the implied warranty of | |||
| 15 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |||
| 16 | General Public License for more details. | |||
| 17 | ||||
| 18 | You should have received a copy of the GNU General Public License | |||
| 19 | along with this program; if not, write to the Free Software | |||
| 20 | Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */ | |||
| 21 | ||||
| 22 | #include <stddef.h> | |||
| 23 | #include <string.h> | |||
| 24 | #include <dirent.h> | |||
| 25 | #include <pwd.h> | |||
| 26 | #include <sys/mman.h> | |||
| 27 | ||||
| 28 | #include "usermux.h" | |||
| 29 | ||||
| 30 | /* The granularity with which we allocate space to return our result. */ | |||
| 31 | #define DIRENTS_CHUNK_SIZE(128*1024) (128*1024)/* Enough for perhaps 8000 names. */ | |||
| 32 | ||||
| 33 | /* The number seconds we cache our directory return value, in seconds. */ | |||
| 34 | #define DIRENTS_CACHE_TIME90 90 | |||
| 35 | ||||
| 36 | /* Returned directory entries are aligned to blocks this many bytes long. | |||
| 37 | Must be a power of two. */ | |||
| 38 | #define DIRENT_ALIGN4 4 | |||
| 39 | #define DIRENT_NAME_OFFS__builtin_offsetof(struct dirent, d_name) offsetof (struct dirent, d_name)__builtin_offsetof(struct dirent, d_name) | |||
| 40 | ||||
| 41 | /* Length is structure before the name + the name + '\0', all | |||
| 42 | padded to a four-byte alignment. */ | |||
| 43 | #define DIRENT_LEN(name_len)((__builtin_offsetof(struct dirent, d_name) + (name_len) + 1 + (4 - 1)) & ~(4 - 1)) \ | |||
| 44 | ((DIRENT_NAME_OFFS__builtin_offsetof(struct dirent, d_name) + (name_len) + 1 + (DIRENT_ALIGN4 - 1)) \ | |||
| 45 | & ~(DIRENT_ALIGN4 - 1)) | |||
| 46 | ||||
| 47 | static error_t lookup_user (struct usermux *mux, const char *user, | |||
| 48 | struct node **node); /* fwd decl */ | |||
| 49 | ||||
| 50 | /* [root] Directory operations. */ | |||
| 51 | ||||
| 52 | /* Lookup NAME in DIR for USER; set *NODE to the found name upon return. If | |||
| 53 | the name was not found, then return ENOENT. On any error, clear *NODE. | |||
| 54 | (*NODE, if found, should be locked, this call should unlock DIR no matter | |||
| 55 | what.) */ | |||
| 56 | error_t | |||
| 57 | netfs_attempt_lookup (struct iouser *user, struct node *dir, | |||
| 58 | char *name, struct node **node) | |||
| 59 | { | |||
| 60 | error_t err; | |||
| 61 | ||||
| 62 | if (dir->nn->name) | |||
| 63 | err = ENOTDIR((0x10 << 26) | ((20) & 0x3fff)); | |||
| 64 | else | |||
| 65 | err = lookup_user (dir->nn->mux, name, node); | |||
| 66 | ||||
| 67 | fshelp_touch (&dir->nn_stat, TOUCH_ATIME0x1, usermux_maptime); | |||
| 68 | ||||
| 69 | pthread_mutex_unlock (&dir->lock); | |||
| 70 | ||||
| 71 | if (! err) | |||
| 72 | pthread_mutex_lock (&(*node)->lock); | |||
| 73 | ||||
| 74 | return err; | |||
| 75 | } | |||
| 76 | ||||
| 77 | /* Fetch a directory of user entries, as for netfs_get_dirents (that function | |||
| 78 | is actually a wrapper that caches the results for a while). */ | |||
| 79 | static error_t | |||
| 80 | get_dirents (struct node *dir, | |||
| 81 | int first_entry, int max_entries, char **data, | |||
| 82 | mach_msg_type_number_t *data_len, | |||
| 83 | vm_size_t max_data_len, int *data_entries) | |||
| 84 | { | |||
| 85 | error_t err = 0; | |||
| 86 | ||||
| 87 | if (dir->nn->name) | |||
| 88 | return ENOTDIR((0x10 << 26) | ((20) & 0x3fff)); | |||
| 89 | ||||
| 90 | /* Start scanning. */ | |||
| 91 | setpwent (); | |||
| 92 | ||||
| 93 | /* Find the first entry. */ | |||
| 94 | while (first_entry-- > 0) | |||
| 95 | if (! getpwent ()) | |||
| 96 | { | |||
| 97 | max_entries = 0; | |||
| 98 | break; | |||
| 99 | } | |||
| 100 | ||||
| 101 | if (max_entries != 0) | |||
| 102 | { | |||
| 103 | size_t size = (max_data_len == 0 ? DIRENTS_CHUNK_SIZE(128*1024) : max_data_len); | |||
| 104 | ||||
| 105 | *data = mmap (0, size, PROT_READ0x04|PROT_WRITE0x02, MAP_ANON0x0002, 0, 0); | |||
| 106 | err = (data != (void *) -1) ? errno(*__errno_location ()) : 0; | |||
| 107 | ||||
| 108 | if (! err) | |||
| 109 | { | |||
| 110 | struct passwd *pw; | |||
| 111 | char *p = *data; | |||
| 112 | int count = 0; | |||
| 113 | int entry_type = | |||
| 114 | (S_ISLNK (dir->nn->mux->stat_template.st_mode)((((dir->nn->mux->stat_template.st_mode)) & 0170000 ) == (0120000)) ? DT_LNKDT_LNK : DT_REGDT_REG); | |||
| 115 | ||||
| 116 | /* See how much space we need for the result. */ | |||
| 117 | while ((max_entries == -1 || count < max_entries) | |||
| 118 | && (pw = getpwent ())) | |||
| 119 | { | |||
| 120 | struct dirent hdr; | |||
| 121 | size_t name_len = strlen (pw->pw_name); | |||
| 122 | size_t sz = DIRENT_LEN (name_len)((__builtin_offsetof(struct dirent, d_name) + (name_len) + 1 + (4 - 1)) & ~(4 - 1)); | |||
| 123 | ||||
| 124 | if ((p - *data) + sz > size) | |||
| 125 | { | |||
| 126 | if (max_data_len > 0) | |||
| 127 | break; | |||
| 128 | else | |||
| 129 | /* Try to grow our return buffer. */ | |||
| 130 | { | |||
| 131 | vm_address_t extension = (vm_address_t)(*data + size); | |||
| 132 | err = vm_allocate (mach_task_self ()((__mach_task_self_ + 0)), &extension, | |||
| 133 | DIRENTS_CHUNK_SIZE(128*1024), 0); | |||
| 134 | if (err) | |||
| 135 | break; | |||
| 136 | size += DIRENTS_CHUNK_SIZE(128*1024); | |||
| 137 | } | |||
| 138 | } | |||
| 139 | ||||
| 140 | hdr.d_namlen = name_len; | |||
| 141 | hdr.d_filenod_ino = pw->pw_uid + USERMUX_FILENO_UID_OFFSET10; | |||
| 142 | hdr.d_reclen = sz; | |||
| 143 | hdr.d_type = entry_type; | |||
| 144 | ||||
| 145 | memcpy (p, &hdr, DIRENT_NAME_OFFS__builtin_offsetof(struct dirent, d_name)); | |||
| 146 | strcpy (p + DIRENT_NAME_OFFS__builtin_offsetof(struct dirent, d_name), pw->pw_name); | |||
| 147 | p += sz; | |||
| 148 | ||||
| 149 | count++; | |||
| 150 | } | |||
| 151 | ||||
| 152 | if (err) | |||
| 153 | munmap (*data, size); | |||
| 154 | else | |||
| 155 | { | |||
| 156 | vm_address_t alloc_end = (vm_address_t)(*data + size); | |||
| 157 | vm_address_t real_end = round_page (p)((((vm_offset_t) (p) + __vm_page_size - 1) / __vm_page_size) * __vm_page_size); | |||
| 158 | if (alloc_end > real_end) | |||
| 159 | munmap ((caddr_t) real_end, alloc_end - real_end); | |||
| 160 | *data_len = p - *data; | |||
| 161 | *data_entries = count; | |||
| 162 | } | |||
| 163 | } | |||
| 164 | } | |||
| 165 | ||||
| 166 | endpwent (); | |||
| 167 | ||||
| 168 | return err; | |||
| 169 | } | |||
| 170 | ||||
| 171 | /* Implement the netfs_get_directs callback as described in | |||
| 172 | <hurd/netfs.h>. */ | |||
| 173 | error_t | |||
| 174 | netfs_get_dirents (struct iouser *cred, struct node *dir, | |||
| 175 | int first_entry, int max_entries, char **data, | |||
| 176 | mach_msg_type_number_t *data_len, | |||
| 177 | vm_size_t max_data_len, int *data_entries) | |||
| 178 | { | |||
| 179 | error_t err; | |||
| 180 | static time_t cache_timestamp = 0; | |||
| 181 | static pthread_rwlock_t cache_lock = PTHREAD_RWLOCK_INITIALIZER{ ((__pthread_spinlock_t) 0), ((__pthread_spinlock_t) 0), 0, 0 , 0, 0, 0 }; | |||
| 182 | static char *cached_data = 0; | |||
| ||||
| 183 | static mach_msg_type_number_t cached_data_len = 0; | |||
| 184 | static int cached_data_entries = 0; | |||
| 185 | struct timeval tv; | |||
| 186 | char *first; | |||
| 187 | size_t bytes_left, entries_left; | |||
| 188 | ||||
| 189 | maptime_read (usermux_maptime, &tv); | |||
| 190 | if (tv.tv_sec > cache_timestamp + DIRENTS_CACHE_TIME90) | |||
| 191 | { | |||
| 192 | pthread_rwlock_wrlock (&cache_lock); | |||
| 193 | ||||
| 194 | if (cached_data_len > 0) | |||
| 195 | /* Free the old cache. */ | |||
| 196 | { | |||
| 197 | munmap (cached_data, cached_data_len); | |||
| 198 | cached_data = 0; | |||
| 199 | cached_data_len = 0; | |||
| 200 | } | |||
| 201 | ||||
| 202 | err = get_dirents (dir, 0, -1, &cached_data, &cached_data_len, 0, | |||
| 203 | &cached_data_entries); | |||
| 204 | ||||
| 205 | if (! err) | |||
| 206 | cache_timestamp = tv.tv_sec; | |||
| 207 | ||||
| 208 | pthread_rwlock_unlock (&cache_lock); | |||
| 209 | ||||
| 210 | if (err) | |||
| 211 | return err; | |||
| 212 | } | |||
| 213 | ||||
| 214 | pthread_rwlock_rdlock (&cache_lock); | |||
| 215 | ||||
| 216 | first = cached_data; | |||
| 217 | bytes_left = cached_data_len; | |||
| 218 | entries_left = cached_data_entries; | |||
| 219 | ||||
| 220 | while (first_entry > 0) | |||
| 221 | { | |||
| 222 | struct dirent *e = (struct dirent *)first; | |||
| 223 | ||||
| 224 | if (entries_left == 0) | |||
| 225 | { | |||
| 226 | pthread_rwlock_unlock (&cache_lock); | |||
| 227 | return EINVAL((0x10 << 26) | ((22) & 0x3fff)); | |||
| 228 | } | |||
| 229 | ||||
| 230 | first += e->d_reclen; | |||
| 231 | bytes_left -= e->d_reclen; | |||
| 232 | entries_left--; | |||
| 233 | } | |||
| 234 | ||||
| 235 | if ((max_data_len > 0 && max_data_len < bytes_left) | |||
| 236 | || (max_entries > 0 && max_entries < entries_left)) | |||
| 237 | /* If there's some limit on the return value, we can't just use our | |||
| 238 | values representing the whole cache, so we have to explicitly count | |||
| 239 | how much we're going to return. */ | |||
| 240 | { | |||
| 241 | char *lim = first; | |||
| 242 | int entries = 0; | |||
| 243 | ||||
| 244 | while (entries_left > 0 | |||
| 245 | && max_entries > 0 | |||
| 246 | && max_data_len > ((struct dirent *)lim)->d_reclen) | |||
| 247 | { | |||
| 248 | size_t reclen = ((struct dirent *)lim)->d_reclen; | |||
| 249 | max_data_len -= reclen; | |||
| 250 | max_entries--; | |||
| 251 | entries++; | |||
| 252 | lim += reclen; | |||
| 253 | } | |||
| 254 | ||||
| 255 | bytes_left = (lim - first); | |||
| 256 | entries_left = entries; | |||
| 257 | } | |||
| 258 | ||||
| 259 | *data_len = bytes_left; | |||
| 260 | *data_entries = entries_left; | |||
| 261 | ||||
| 262 | *data = mmap (0, bytes_left, PROT_READ0x04|PROT_WRITE0x02, MAP_ANON0x0002, 0, 0); | |||
| 263 | err = (*data == (void *) -1) ? errno(*__errno_location ()) : 0; | |||
| 264 | if (! err) | |||
| 265 | bcopy (cached_data, *data, bytes_left); | |||
| ||||
| 266 | ||||
| 267 | pthread_rwlock_unlock (&cache_lock); | |||
| 268 | ||||
| 269 | fshelp_touch (&dir->nn_stat, TOUCH_ATIME0x1, usermux_maptime); | |||
| 270 | ||||
| 271 | return err; | |||
| 272 | } | |||
| 273 | ||||
| 274 | /* User lookup. */ | |||
| 275 | ||||
| 276 | /* Free storage allocated consumed by the user mux name NM, but not the node | |||
| 277 | it points to. */ | |||
| 278 | static void | |||
| 279 | free_name (struct usermux_name *nm) | |||
| 280 | { | |||
| 281 | free ((char *)nm->name); | |||
| 282 | free (nm); | |||
| 283 | } | |||
| 284 | ||||
| 285 | /* See if there's an existing entry for the name USER, and if so, return its | |||
| 286 | node in NODE with an additional references. True is returned iff the | |||
| 287 | lookup succeeds. If PURGE is true, then any nodes with a null node are | |||
| 288 | removed. */ | |||
| 289 | static int | |||
| 290 | lookup_cached (struct usermux *mux, const char *user, int purge, | |||
| 291 | struct node **node) | |||
| 292 | { | |||
| 293 | struct usermux_name *nm = mux->names, **prevl = &mux->names; | |||
| 294 | ||||
| 295 | while (nm) | |||
| 296 | { | |||
| 297 | struct usermux_name *next = nm->next; | |||
| 298 | ||||
| 299 | if (strcasecmp (user, nm->name) == 0) | |||
| 300 | { | |||
| 301 | pthread_spin_lock (&netfs_node_refcnt_lock); | |||
| 302 | if (nm->node) | |||
| 303 | nm->node->references++; | |||
| 304 | pthread_spin_unlock (&netfs_node_refcnt_lock); | |||
| 305 | ||||
| 306 | if (nm->node) | |||
| 307 | { | |||
| 308 | *node = nm->node; | |||
| 309 | return 1; | |||
| 310 | } | |||
| 311 | } | |||
| 312 | ||||
| 313 | if (purge && !nm->node) | |||
| 314 | { | |||
| 315 | *prevl = nm->next; | |||
| 316 | free_name (nm); | |||
| 317 | } | |||
| 318 | else | |||
| 319 | prevl = &nm->next; | |||
| 320 | ||||
| 321 | nm = next; | |||
| 322 | } | |||
| 323 | ||||
| 324 | return 0; | |||
| 325 | } | |||
| 326 | ||||
| 327 | /* See if there's an existing entry for the name USER, and if so, return its | |||
| 328 | node in NODE, with an additional reference, otherwise, create a new node | |||
| 329 | for the user HE as referred to by USER, and return that instead, with a | |||
| 330 | single reference. The type of node created is either a translator node, | |||
| 331 | if USER refers to the official name of the user, or a symlink node to the | |||
| 332 | official name, if it doesn't. */ | |||
| 333 | static error_t | |||
| 334 | lookup_pwent (struct usermux *mux, const char *user, struct passwd *pw, | |||
| 335 | struct node **node) | |||
| 336 | { | |||
| 337 | error_t err; | |||
| 338 | struct usermux_name *nm = malloc (sizeof (struct usermux_name)); | |||
| 339 | ||||
| 340 | if (! nm) | |||
| 341 | return ENOMEM((0x10 << 26) | ((12) & 0x3fff)); | |||
| 342 | ||||
| 343 | nm->name = strdup (user); | |||
| 344 | err = create_user_node (mux, nm, pw, node); | |||
| 345 | if (err) | |||
| 346 | { | |||
| 347 | free_name (nm); | |||
| 348 | return err; | |||
| 349 | } | |||
| 350 | ||||
| 351 | pthread_rwlock_wrlock (&mux->names_lock); | |||
| 352 | if (lookup_cached (mux, user, 1, node)) | |||
| 353 | /* An entry for USER has already been created between the time we last | |||
| 354 | looked and now (which is possible because we didn't lock MUX). | |||
| 355 | Just throw away our version and return the one already in the cache. */ | |||
| 356 | { | |||
| 357 | pthread_rwlock_unlock (&mux->names_lock); | |||
| 358 | nm->node->nn->name = 0; /* Avoid touching the mux name list. */ | |||
| 359 | netfs_nrele (nm->node); /* Free the tentative new node. */ | |||
| 360 | free_name (nm); /* And the name it was under. */ | |||
| 361 | } | |||
| 362 | else | |||
| 363 | /* Enter NM into MUX's list of names, and return the new node. */ | |||
| 364 | { | |||
| 365 | nm->next = mux->names; | |||
| 366 | mux->names = nm; | |||
| 367 | pthread_rwlock_unlock (&mux->names_lock); | |||
| 368 | } | |||
| 369 | ||||
| 370 | return 0; | |||
| 371 | } | |||
| 372 | ||||
| 373 | /* Lookup the user USER in MUX, and return the resulting node in NODE, with | |||
| 374 | an additional reference, or an error. */ | |||
| 375 | static error_t | |||
| 376 | lookup_user (struct usermux *mux, const char *user, struct node **node) | |||
| 377 | { | |||
| 378 | int was_cached; | |||
| 379 | struct passwd _pw, *pw; | |||
| 380 | char pwent_data[2048]; /* XXX what size should this be???? */ | |||
| 381 | ||||
| 382 | pthread_rwlock_rdlock (&mux->names_lock); | |||
| 383 | was_cached = lookup_cached (mux, user, 0, node); | |||
| 384 | pthread_rwlock_unlock (&mux->names_lock); | |||
| 385 | ||||
| 386 | if (was_cached) | |||
| 387 | return 0; | |||
| 388 | else | |||
| 389 | { | |||
| 390 | if (getpwnam_r (user, &_pw, pwent_data, sizeof pwent_data, &pw)) | |||
| 391 | return ENOENT((0x10 << 26) | ((2) & 0x3fff)); | |||
| 392 | if (pw == NULL((void*)0)) | |||
| 393 | return ENOENT((0x10 << 26) | ((2) & 0x3fff)); | |||
| 394 | return lookup_pwent (mux, user, pw, node); | |||
| 395 | } | |||
| 396 | } | |||
| 397 | ||||
| 398 | /* This should sync the entire remote filesystem. If WAIT is set, return | |||
| 399 | only after sync is completely finished. */ | |||
| 400 | error_t | |||
| 401 | netfs_attempt_syncfs (struct iouser *cred, int wait) | |||
| 402 | { | |||
| 403 | return 0; | |||
| 404 | } | |||
| 405 | ||||
| 406 | /* This should attempt a chmod call for the user specified by CRED on node | |||
| 407 | NODE, to change the owner to UID and the group to GID. */ | |||
| 408 | error_t | |||
| 409 | netfs_attempt_chown (struct iouser *cred, struct node *node, uid_t uid, uid_t gid) | |||
| 410 | { | |||
| 411 | if (node->nn->name) | |||
| 412 | return EOPNOTSUPP((0x10 << 26) | ((45) & 0x3fff)); | |||
| 413 | else | |||
| 414 | { | |||
| 415 | struct usermux *mux = node->nn->mux; | |||
| 416 | error_t err = file_chown (mux->underlying, uid, gid); | |||
| 417 | ||||
| 418 | if (! err) | |||
| 419 | { | |||
| 420 | struct usermux_name *nm; | |||
| 421 | ||||
| 422 | /* Change NODE's owner. */ | |||
| 423 | mux->stat_template.st_uid = uid; | |||
| 424 | mux->stat_template.st_gid = gid; | |||
| 425 | node->nn_stat.st_uid = uid; | |||
| 426 | node->nn_stat.st_gid = gid; | |||
| 427 | ||||
| 428 | /* Change the owner of each leaf node. */ | |||
| 429 | pthread_rwlock_rdlock (&mux->names_lock); | |||
| 430 | for (nm = mux->names; nm; nm = nm->next) | |||
| 431 | if (nm->node) | |||
| 432 | { | |||
| 433 | nm->node->nn_stat.st_uid = uid; | |||
| 434 | nm->node->nn_stat.st_gid = gid; | |||
| 435 | } | |||
| 436 | pthread_rwlock_unlock (&mux->names_lock); | |||
| 437 | ||||
| 438 | fshelp_touch (&node->nn_stat, TOUCH_CTIME0x4, usermux_maptime); | |||
| 439 | } | |||
| 440 | ||||
| 441 | return err; | |||
| 442 | } | |||
| 443 | } | |||
| 444 | ||||
| 445 | /* This should attempt a chauthor call for the user specified by CRED on node | |||
| 446 | NODE, to change the author to AUTHOR. */ | |||
| 447 | error_t | |||
| 448 | netfs_attempt_chauthor (struct iouser *cred, struct node *node, uid_t author) | |||
| 449 | { | |||
| 450 | if (node->nn->name) | |||
| 451 | return EOPNOTSUPP((0x10 << 26) | ((45) & 0x3fff)); | |||
| 452 | else | |||
| 453 | { | |||
| 454 | struct usermux *mux = node->nn->mux; | |||
| 455 | error_t err = file_chauthor (mux->underlying, author); | |||
| 456 | ||||
| 457 | if (! err) | |||
| 458 | { | |||
| 459 | struct usermux_name *nm; | |||
| 460 | ||||
| 461 | /* Change NODE's owner. */ | |||
| 462 | mux->stat_template.st_author = author; | |||
| 463 | node->nn_stat.st_author = author; | |||
| 464 | ||||
| 465 | /* Change the owner of each leaf node. */ | |||
| 466 | pthread_rwlock_rdlock (&mux->names_lock); | |||
| 467 | for (nm = mux->names; nm; nm = nm->next) | |||
| 468 | if (nm->node) | |||
| 469 | nm->node->nn_stat.st_author = author; | |||
| 470 | pthread_rwlock_unlock (&mux->names_lock); | |||
| 471 | ||||
| 472 | fshelp_touch (&node->nn_stat, TOUCH_CTIME0x4, usermux_maptime); | |||
| 473 | } | |||
| 474 | ||||
| 475 | return err; | |||
| 476 | } | |||
| 477 | } | |||
| 478 | ||||
| 479 | /* This should attempt a chmod call for the user specified by CRED on node | |||
| 480 | NODE, to change the mode to MODE. Unlike the normal Unix and Hurd meaning | |||
| 481 | of chmod, this function is also used to attempt to change files into other | |||
| 482 | types. If such a transition is attempted which is impossible, then return | |||
| 483 | EOPNOTSUPP. */ | |||
| 484 | error_t | |||
| 485 | netfs_attempt_chmod (struct iouser *cred, struct node *node, mode_t mode) | |||
| 486 | { | |||
| 487 | mode &= ~S_ITRANS000070000000; | |||
| 488 | if ((mode & S_IFMT0170000) == 0) | |||
| 489 | mode |= (node->nn_stat.st_mode & S_IFMT0170000); | |||
| 490 | if (node->nn->name || ((mode & S_IFMT0170000) != (node->nn_stat.st_mode & S_IFMT0170000))) | |||
| 491 | return EOPNOTSUPP((0x10 << 26) | ((45) & 0x3fff)); | |||
| 492 | else | |||
| 493 | { | |||
| 494 | error_t err = file_chmod (node->nn->mux->underlying, mode & ~S_IFMT0170000); | |||
| 495 | if (! err) | |||
| 496 | { | |||
| 497 | node->nn_stat.st_mode = mode; | |||
| 498 | fshelp_touch (&node->nn_stat, TOUCH_CTIME0x4, usermux_maptime); | |||
| 499 | } | |||
| 500 | return err; | |||
| 501 | } | |||
| 502 | } |