syscall.c (434471B)
1 /* 2 * Linux syscalls 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #define _ATFILE_SOURCE 20 #include "qemu/osdep.h" 21 #include "qemu/cutils.h" 22 #include "qemu/path.h" 23 #include "qemu/memfd.h" 24 #include "qemu/queue.h" 25 #include <elf.h> 26 #include <endian.h> 27 #include <grp.h> 28 #include <sys/ipc.h> 29 #include <sys/msg.h> 30 #include <sys/wait.h> 31 #include <sys/mount.h> 32 #include <sys/file.h> 33 #include <sys/fsuid.h> 34 #include <sys/personality.h> 35 #include <sys/prctl.h> 36 #include <sys/resource.h> 37 #include <sys/swap.h> 38 #include <linux/capability.h> 39 #include <sched.h> 40 #include <sys/timex.h> 41 #include <sys/socket.h> 42 #include <linux/sockios.h> 43 #include <sys/un.h> 44 #include <sys/uio.h> 45 #include <poll.h> 46 #include <sys/times.h> 47 #include <sys/shm.h> 48 #include <sys/sem.h> 49 #include <sys/statfs.h> 50 #include <utime.h> 51 #include <sys/sysinfo.h> 52 #include <sys/signalfd.h> 53 //#include <sys/user.h> 54 #include <netinet/in.h> 55 #include <netinet/ip.h> 56 #include <netinet/tcp.h> 57 #include <netinet/udp.h> 58 #include <linux/wireless.h> 59 #include <linux/icmp.h> 60 #include <linux/icmpv6.h> 61 #include <linux/if_tun.h> 62 #include <linux/in6.h> 63 #include <linux/errqueue.h> 64 #include <linux/random.h> 65 #ifdef CONFIG_TIMERFD 66 #include <sys/timerfd.h> 67 #endif 68 #ifdef CONFIG_EVENTFD 69 #include <sys/eventfd.h> 70 #endif 71 #ifdef CONFIG_EPOLL 72 #include <sys/epoll.h> 73 #endif 74 #ifdef CONFIG_ATTR 75 #include "qemu/xattr.h" 76 #endif 77 #ifdef CONFIG_SENDFILE 78 #include <sys/sendfile.h> 79 #endif 80 #ifdef HAVE_SYS_KCOV_H 81 #include <sys/kcov.h> 82 #endif 83 84 #define termios host_termios 85 #define winsize host_winsize 86 #define termio host_termio 87 #define sgttyb host_sgttyb /* same as target */ 88 #define tchars host_tchars /* same as target */ 89 #define ltchars host_ltchars /* same as target */ 90 91 #include <linux/termios.h> 92 #include <linux/unistd.h> 93 #include <linux/cdrom.h> 94 #include <linux/hdreg.h> 95 #include <linux/soundcard.h> 96 #include <linux/kd.h> 97 #include <linux/mtio.h> 98 99 #ifdef HAVE_SYS_MOUNT_FSCONFIG 100 /* 101 * glibc >= 2.36 linux/mount.h conflicts with sys/mount.h, 102 * which in turn prevents use of linux/fs.h. So we have to 103 * define the constants ourselves for now. 104 */ 105 #define FS_IOC_GETFLAGS _IOR('f', 1, long) 106 #define FS_IOC_SETFLAGS _IOW('f', 2, long) 107 #define FS_IOC_GETVERSION _IOR('v', 1, long) 108 #define FS_IOC_SETVERSION _IOW('v', 2, long) 109 #define FS_IOC_FIEMAP _IOWR('f', 11, struct fiemap) 110 #define FS_IOC32_GETFLAGS _IOR('f', 1, int) 111 #define FS_IOC32_SETFLAGS _IOW('f', 2, int) 112 #define FS_IOC32_GETVERSION _IOR('v', 1, int) 113 #define FS_IOC32_SETVERSION _IOW('v', 2, int) 114 115 #define BLKGETSIZE64 _IOR(0x12,114,size_t) 116 #define BLKDISCARD _IO(0x12,119) 117 #define BLKIOMIN _IO(0x12,120) 118 #define BLKIOOPT _IO(0x12,121) 119 #define BLKALIGNOFF _IO(0x12,122) 120 #define BLKPBSZGET _IO(0x12,123) 121 #define BLKDISCARDZEROES _IO(0x12,124) 122 #define BLKSECDISCARD _IO(0x12,125) 123 #define BLKROTATIONAL _IO(0x12,126) 124 #define BLKZEROOUT _IO(0x12,127) 125 126 #define FIBMAP _IO(0x00,1) 127 #define FIGETBSZ _IO(0x00,2) 128 129 struct file_clone_range { 130 __s64 src_fd; 131 __u64 src_offset; 132 __u64 src_length; 133 __u64 dest_offset; 134 }; 135 136 #define FICLONE _IOW(0x94, 9, int) 137 #define FICLONERANGE _IOW(0x94, 13, struct file_clone_range) 138 139 #else 140 #include <linux/fs.h> 141 #endif 142 #include <linux/fd.h> 143 #if defined(CONFIG_FIEMAP) 144 #include <linux/fiemap.h> 145 #endif 146 #include <linux/fb.h> 147 #if defined(CONFIG_USBFS) 148 #include <linux/usbdevice_fs.h> 149 #include <linux/usb/ch9.h> 150 #endif 151 #include <linux/vt.h> 152 #include <linux/dm-ioctl.h> 153 #include <linux/reboot.h> 154 #include <linux/route.h> 155 #include <linux/filter.h> 156 #include <linux/blkpg.h> 157 #include <netpacket/packet.h> 158 #include <linux/netlink.h> 159 #include <linux/if_alg.h> 160 #include <linux/rtc.h> 161 #include <sound/asound.h> 162 #ifdef HAVE_BTRFS_H 163 #include <linux/btrfs.h> 164 #endif 165 #ifdef HAVE_DRM_H 166 #include <libdrm/drm.h> 167 #include <libdrm/i915_drm.h> 168 #endif 169 #include <linux/binfmts.h> 170 #include "linux_loop.h" 171 #include "uname.h" 172 173 #include "qemu.h" 174 #include "user-internals.h" 175 #include "strace.h" 176 #include "signal-common.h" 177 #include "loader.h" 178 #include "user-mmap.h" 179 #include "user/safe-syscall.h" 180 #include "qemu/guest-random.h" 181 #include "qemu/selfmap.h" 182 #include "user/syscall-trace.h" 183 #include "special-errno.h" 184 #include "qapi/error.h" 185 #include "fd-trans.h" 186 #include "tcg/tcg.h" 187 #include "cpu_loop-common.h" 188 189 #ifndef CLONE_IO 190 #define CLONE_IO 0x80000000 /* Clone io context */ 191 #endif 192 193 /* We can't directly call the host clone syscall, because this will 194 * badly confuse libc (breaking mutexes, for example). So we must 195 * divide clone flags into: 196 * * flag combinations that look like pthread_create() 197 * * flag combinations that look like fork() 198 * * flags we can implement within QEMU itself 199 * * flags we can't support and will return an error for 200 */ 201 /* For thread creation, all these flags must be present; for 202 * fork, none must be present. 203 */ 204 #define CLONE_THREAD_FLAGS \ 205 (CLONE_VM | CLONE_FS | CLONE_FILES | \ 206 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM) 207 208 /* These flags are ignored: 209 * CLONE_DETACHED is now ignored by the kernel; 210 * CLONE_IO is just an optimisation hint to the I/O scheduler 211 */ 212 #define CLONE_IGNORED_FLAGS \ 213 (CLONE_DETACHED | CLONE_IO) 214 215 /* Flags for fork which we can implement within QEMU itself */ 216 #define CLONE_OPTIONAL_FORK_FLAGS \ 217 (CLONE_SETTLS | CLONE_PARENT_SETTID | \ 218 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID) 219 220 /* Flags for thread creation which we can implement within QEMU itself */ 221 #define CLONE_OPTIONAL_THREAD_FLAGS \ 222 (CLONE_SETTLS | CLONE_PARENT_SETTID | \ 223 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT) 224 225 #define CLONE_INVALID_FORK_FLAGS \ 226 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS)) 227 228 #define CLONE_INVALID_THREAD_FLAGS \ 229 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \ 230 CLONE_IGNORED_FLAGS)) 231 232 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits 233 * have almost all been allocated. We cannot support any of 234 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC, 235 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED. 236 * The checks against the invalid thread masks above will catch these. 237 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.) 238 */ 239 240 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted 241 * once. This exercises the codepaths for restart. 242 */ 243 //#define DEBUG_ERESTARTSYS 244 245 //#include <linux/msdos_fs.h> 246 #define VFAT_IOCTL_READDIR_BOTH \ 247 _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2) 248 #define VFAT_IOCTL_READDIR_SHORT \ 249 _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2) 250 251 #undef _syscall0 252 #undef _syscall1 253 #undef _syscall2 254 #undef _syscall3 255 #undef _syscall4 256 #undef _syscall5 257 #undef _syscall6 258 259 #define _syscall0(type,name) \ 260 static type name (void) \ 261 { \ 262 return syscall(__NR_##name); \ 263 } 264 265 #define _syscall1(type,name,type1,arg1) \ 266 static type name (type1 arg1) \ 267 { \ 268 return syscall(__NR_##name, arg1); \ 269 } 270 271 #define _syscall2(type,name,type1,arg1,type2,arg2) \ 272 static type name (type1 arg1,type2 arg2) \ 273 { \ 274 return syscall(__NR_##name, arg1, arg2); \ 275 } 276 277 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ 278 static type name (type1 arg1,type2 arg2,type3 arg3) \ 279 { \ 280 return syscall(__NR_##name, arg1, arg2, arg3); \ 281 } 282 283 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ 284 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \ 285 { \ 286 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \ 287 } 288 289 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 290 type5,arg5) \ 291 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ 292 { \ 293 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \ 294 } 295 296 297 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 298 type5,arg5,type6,arg6) \ 299 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \ 300 type6 arg6) \ 301 { \ 302 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \ 303 } 304 305 306 #define __NR_sys_uname __NR_uname 307 #define __NR_sys_getcwd1 __NR_getcwd 308 #define __NR_sys_getdents __NR_getdents 309 #define __NR_sys_getdents64 __NR_getdents64 310 #define __NR_sys_getpriority __NR_getpriority 311 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo 312 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo 313 #define __NR_sys_syslog __NR_syslog 314 #if defined(__NR_futex) 315 # define __NR_sys_futex __NR_futex 316 #endif 317 #if defined(__NR_futex_time64) 318 # define __NR_sys_futex_time64 __NR_futex_time64 319 #endif 320 #define __NR_sys_statx __NR_statx 321 322 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__) 323 #define __NR__llseek __NR_lseek 324 #endif 325 326 /* Newer kernel ports have llseek() instead of _llseek() */ 327 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek) 328 #define TARGET_NR__llseek TARGET_NR_llseek 329 #endif 330 331 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */ 332 #ifndef TARGET_O_NONBLOCK_MASK 333 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK 334 #endif 335 336 #define __NR_sys_gettid __NR_gettid 337 _syscall0(int, sys_gettid) 338 339 /* For the 64-bit guest on 32-bit host case we must emulate 340 * getdents using getdents64, because otherwise the host 341 * might hand us back more dirent records than we can fit 342 * into the guest buffer after structure format conversion. 343 * Otherwise we emulate getdents with getdents if the host has it. 344 */ 345 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS 346 #define EMULATE_GETDENTS_WITH_GETDENTS 347 #endif 348 349 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS) 350 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count); 351 #endif 352 #if (defined(TARGET_NR_getdents) && \ 353 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \ 354 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64)) 355 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count); 356 #endif 357 #if defined(TARGET_NR__llseek) && defined(__NR_llseek) 358 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo, 359 loff_t *, res, uint, wh); 360 #endif 361 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo) 362 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig, 363 siginfo_t *, uinfo) 364 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len) 365 #ifdef __NR_exit_group 366 _syscall1(int,exit_group,int,error_code) 367 #endif 368 #if defined(__NR_close_range) && defined(TARGET_NR_close_range) 369 #define __NR_sys_close_range __NR_close_range 370 _syscall3(int,sys_close_range,int,first,int,last,int,flags) 371 #ifndef CLOSE_RANGE_CLOEXEC 372 #define CLOSE_RANGE_CLOEXEC (1U << 2) 373 #endif 374 #endif 375 #if defined(__NR_futex) 376 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val, 377 const struct timespec *,timeout,int *,uaddr2,int,val3) 378 #endif 379 #if defined(__NR_futex_time64) 380 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val, 381 const struct timespec *,timeout,int *,uaddr2,int,val3) 382 #endif 383 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open) 384 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags); 385 #endif 386 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal) 387 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info, 388 unsigned int, flags); 389 #endif 390 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd) 391 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags); 392 #endif 393 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity 394 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len, 395 unsigned long *, user_mask_ptr); 396 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity 397 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len, 398 unsigned long *, user_mask_ptr); 399 /* sched_attr is not defined in glibc */ 400 struct sched_attr { 401 uint32_t size; 402 uint32_t sched_policy; 403 uint64_t sched_flags; 404 int32_t sched_nice; 405 uint32_t sched_priority; 406 uint64_t sched_runtime; 407 uint64_t sched_deadline; 408 uint64_t sched_period; 409 uint32_t sched_util_min; 410 uint32_t sched_util_max; 411 }; 412 #define __NR_sys_sched_getattr __NR_sched_getattr 413 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr, 414 unsigned int, size, unsigned int, flags); 415 #define __NR_sys_sched_setattr __NR_sched_setattr 416 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr, 417 unsigned int, flags); 418 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler 419 _syscall1(int, sys_sched_getscheduler, pid_t, pid); 420 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler 421 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy, 422 const struct sched_param *, param); 423 #define __NR_sys_sched_getparam __NR_sched_getparam 424 _syscall2(int, sys_sched_getparam, pid_t, pid, 425 struct sched_param *, param); 426 #define __NR_sys_sched_setparam __NR_sched_setparam 427 _syscall2(int, sys_sched_setparam, pid_t, pid, 428 const struct sched_param *, param); 429 #define __NR_sys_getcpu __NR_getcpu 430 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache); 431 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd, 432 void *, arg); 433 _syscall2(int, capget, struct __user_cap_header_struct *, header, 434 struct __user_cap_data_struct *, data); 435 _syscall2(int, capset, struct __user_cap_header_struct *, header, 436 struct __user_cap_data_struct *, data); 437 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get) 438 _syscall2(int, ioprio_get, int, which, int, who) 439 #endif 440 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set) 441 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio) 442 #endif 443 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom) 444 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags) 445 #endif 446 447 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp) 448 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type, 449 unsigned long, idx1, unsigned long, idx2) 450 #endif 451 452 /* 453 * It is assumed that struct statx is architecture independent. 454 */ 455 #if defined(TARGET_NR_statx) && defined(__NR_statx) 456 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags, 457 unsigned int, mask, struct target_statx *, statxbuf) 458 #endif 459 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier) 460 _syscall2(int, membarrier, int, cmd, int, flags) 461 #endif 462 463 static const bitmask_transtbl fcntl_flags_tbl[] = { 464 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, }, 465 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, }, 466 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, }, 467 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, }, 468 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, }, 469 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, }, 470 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, }, 471 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, }, 472 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, }, 473 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, }, 474 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, }, 475 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, }, 476 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, }, 477 #if defined(O_DIRECT) 478 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, }, 479 #endif 480 #if defined(O_NOATIME) 481 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME }, 482 #endif 483 #if defined(O_CLOEXEC) 484 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC }, 485 #endif 486 #if defined(O_PATH) 487 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH }, 488 #endif 489 #if defined(O_TMPFILE) 490 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE }, 491 #endif 492 /* Don't terminate the list prematurely on 64-bit host+guest. */ 493 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0 494 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, }, 495 #endif 496 { 0, 0, 0, 0 } 497 }; 498 499 _syscall2(int, sys_getcwd1, char *, buf, size_t, size) 500 501 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64) 502 #if defined(__NR_utimensat) 503 #define __NR_sys_utimensat __NR_utimensat 504 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname, 505 const struct timespec *,tsp,int,flags) 506 #else 507 static int sys_utimensat(int dirfd, const char *pathname, 508 const struct timespec times[2], int flags) 509 { 510 errno = ENOSYS; 511 return -1; 512 } 513 #endif 514 #endif /* TARGET_NR_utimensat */ 515 516 #ifdef TARGET_NR_renameat2 517 #if defined(__NR_renameat2) 518 #define __NR_sys_renameat2 __NR_renameat2 519 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd, 520 const char *, new, unsigned int, flags) 521 #else 522 static int sys_renameat2(int oldfd, const char *old, 523 int newfd, const char *new, int flags) 524 { 525 if (flags == 0) { 526 return renameat(oldfd, old, newfd, new); 527 } 528 errno = ENOSYS; 529 return -1; 530 } 531 #endif 532 #endif /* TARGET_NR_renameat2 */ 533 534 #ifdef CONFIG_INOTIFY 535 #include <sys/inotify.h> 536 #else 537 /* Userspace can usually survive runtime without inotify */ 538 #undef TARGET_NR_inotify_init 539 #undef TARGET_NR_inotify_init1 540 #undef TARGET_NR_inotify_add_watch 541 #undef TARGET_NR_inotify_rm_watch 542 #endif /* CONFIG_INOTIFY */ 543 544 #if defined(TARGET_NR_prlimit64) 545 #ifndef __NR_prlimit64 546 # define __NR_prlimit64 -1 547 #endif 548 #define __NR_sys_prlimit64 __NR_prlimit64 549 /* The glibc rlimit structure may not be that used by the underlying syscall */ 550 struct host_rlimit64 { 551 uint64_t rlim_cur; 552 uint64_t rlim_max; 553 }; 554 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource, 555 const struct host_rlimit64 *, new_limit, 556 struct host_rlimit64 *, old_limit) 557 #endif 558 559 560 #if defined(TARGET_NR_timer_create) 561 /* Maximum of 32 active POSIX timers allowed at any one time. */ 562 #define GUEST_TIMER_MAX 32 563 static timer_t g_posix_timers[GUEST_TIMER_MAX]; 564 static int g_posix_timer_allocated[GUEST_TIMER_MAX]; 565 566 static inline int next_free_host_timer(void) 567 { 568 int k; 569 for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) { 570 if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) { 571 return k; 572 } 573 } 574 return -1; 575 } 576 577 static inline void free_host_timer_slot(int id) 578 { 579 qatomic_store_release(g_posix_timer_allocated + id, 0); 580 } 581 #endif 582 583 static inline int host_to_target_errno(int host_errno) 584 { 585 switch (host_errno) { 586 #define E(X) case X: return TARGET_##X; 587 #include "errnos.c.inc" 588 #undef E 589 default: 590 return host_errno; 591 } 592 } 593 594 static inline int target_to_host_errno(int target_errno) 595 { 596 switch (target_errno) { 597 #define E(X) case TARGET_##X: return X; 598 #include "errnos.c.inc" 599 #undef E 600 default: 601 return target_errno; 602 } 603 } 604 605 abi_long get_errno(abi_long ret) 606 { 607 if (ret == -1) 608 return -host_to_target_errno(errno); 609 else 610 return ret; 611 } 612 613 const char *target_strerror(int err) 614 { 615 if (err == QEMU_ERESTARTSYS) { 616 return "To be restarted"; 617 } 618 if (err == QEMU_ESIGRETURN) { 619 return "Successful exit from sigreturn"; 620 } 621 622 return strerror(target_to_host_errno(err)); 623 } 624 625 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize) 626 { 627 int i; 628 uint8_t b; 629 if (usize <= ksize) { 630 return 1; 631 } 632 for (i = ksize; i < usize; i++) { 633 if (get_user_u8(b, addr + i)) { 634 return -TARGET_EFAULT; 635 } 636 if (b != 0) { 637 return 0; 638 } 639 } 640 return 1; 641 } 642 643 #define safe_syscall0(type, name) \ 644 static type safe_##name(void) \ 645 { \ 646 return safe_syscall(__NR_##name); \ 647 } 648 649 #define safe_syscall1(type, name, type1, arg1) \ 650 static type safe_##name(type1 arg1) \ 651 { \ 652 return safe_syscall(__NR_##name, arg1); \ 653 } 654 655 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \ 656 static type safe_##name(type1 arg1, type2 arg2) \ 657 { \ 658 return safe_syscall(__NR_##name, arg1, arg2); \ 659 } 660 661 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \ 662 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \ 663 { \ 664 return safe_syscall(__NR_##name, arg1, arg2, arg3); \ 665 } 666 667 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \ 668 type4, arg4) \ 669 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \ 670 { \ 671 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \ 672 } 673 674 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \ 675 type4, arg4, type5, arg5) \ 676 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ 677 type5 arg5) \ 678 { \ 679 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \ 680 } 681 682 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \ 683 type4, arg4, type5, arg5, type6, arg6) \ 684 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ 685 type5 arg5, type6 arg6) \ 686 { \ 687 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \ 688 } 689 690 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count) 691 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count) 692 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \ 693 int, flags, mode_t, mode) 694 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid) 695 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \ 696 struct rusage *, rusage) 697 #endif 698 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \ 699 int, options, struct rusage *, rusage) 700 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp) 701 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \ 702 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64) 703 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \ 704 fd_set *, exceptfds, struct timespec *, timeout, void *, sig) 705 #endif 706 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64) 707 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds, 708 struct timespec *, tsp, const sigset_t *, sigmask, 709 size_t, sigsetsize) 710 #endif 711 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events, 712 int, maxevents, int, timeout, const sigset_t *, sigmask, 713 size_t, sigsetsize) 714 #if defined(__NR_futex) 715 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \ 716 const struct timespec *,timeout,int *,uaddr2,int,val3) 717 #endif 718 #if defined(__NR_futex_time64) 719 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \ 720 const struct timespec *,timeout,int *,uaddr2,int,val3) 721 #endif 722 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize) 723 safe_syscall2(int, kill, pid_t, pid, int, sig) 724 safe_syscall2(int, tkill, int, tid, int, sig) 725 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig) 726 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt) 727 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt) 728 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt, 729 unsigned long, pos_l, unsigned long, pos_h) 730 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt, 731 unsigned long, pos_l, unsigned long, pos_h) 732 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr, 733 socklen_t, addrlen) 734 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len, 735 int, flags, const struct sockaddr *, addr, socklen_t, addrlen) 736 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len, 737 int, flags, struct sockaddr *, addr, socklen_t *, addrlen) 738 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags) 739 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags) 740 safe_syscall2(int, flock, int, fd, int, operation) 741 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64) 742 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo, 743 const struct timespec *, uts, size_t, sigsetsize) 744 #endif 745 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len, 746 int, flags) 747 #if defined(TARGET_NR_nanosleep) 748 safe_syscall2(int, nanosleep, const struct timespec *, req, 749 struct timespec *, rem) 750 #endif 751 #if defined(TARGET_NR_clock_nanosleep) || \ 752 defined(TARGET_NR_clock_nanosleep_time64) 753 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags, 754 const struct timespec *, req, struct timespec *, rem) 755 #endif 756 #ifdef __NR_ipc 757 #ifdef __s390x__ 758 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third, 759 void *, ptr) 760 #else 761 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third, 762 void *, ptr, long, fifth) 763 #endif 764 #endif 765 #ifdef __NR_msgsnd 766 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz, 767 int, flags) 768 #endif 769 #ifdef __NR_msgrcv 770 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz, 771 long, msgtype, int, flags) 772 #endif 773 #ifdef __NR_semtimedop 774 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops, 775 unsigned, nsops, const struct timespec *, timeout) 776 #endif 777 #if defined(TARGET_NR_mq_timedsend) || \ 778 defined(TARGET_NR_mq_timedsend_time64) 779 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr, 780 size_t, len, unsigned, prio, const struct timespec *, timeout) 781 #endif 782 #if defined(TARGET_NR_mq_timedreceive) || \ 783 defined(TARGET_NR_mq_timedreceive_time64) 784 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr, 785 size_t, len, unsigned *, prio, const struct timespec *, timeout) 786 #endif 787 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range) 788 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff, 789 int, outfd, loff_t *, poutoff, size_t, length, 790 unsigned int, flags) 791 #endif 792 793 /* We do ioctl like this rather than via safe_syscall3 to preserve the 794 * "third argument might be integer or pointer or not present" behaviour of 795 * the libc function. 796 */ 797 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__) 798 /* Similarly for fcntl. Note that callers must always: 799 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK 800 * use the flock64 struct rather than unsuffixed flock 801 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts. 802 */ 803 #ifdef __NR_fcntl64 804 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__) 805 #else 806 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__) 807 #endif 808 809 static inline int host_to_target_sock_type(int host_type) 810 { 811 int target_type; 812 813 switch (host_type & 0xf /* SOCK_TYPE_MASK */) { 814 case SOCK_DGRAM: 815 target_type = TARGET_SOCK_DGRAM; 816 break; 817 case SOCK_STREAM: 818 target_type = TARGET_SOCK_STREAM; 819 break; 820 default: 821 target_type = host_type & 0xf /* SOCK_TYPE_MASK */; 822 break; 823 } 824 825 #if defined(SOCK_CLOEXEC) 826 if (host_type & SOCK_CLOEXEC) { 827 target_type |= TARGET_SOCK_CLOEXEC; 828 } 829 #endif 830 831 #if defined(SOCK_NONBLOCK) 832 if (host_type & SOCK_NONBLOCK) { 833 target_type |= TARGET_SOCK_NONBLOCK; 834 } 835 #endif 836 837 return target_type; 838 } 839 840 static abi_ulong target_brk; 841 static abi_ulong target_original_brk; 842 static abi_ulong brk_page; 843 844 void target_set_brk(abi_ulong new_brk) 845 { 846 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk); 847 brk_page = HOST_PAGE_ALIGN(target_brk); 848 } 849 850 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0) 851 #define DEBUGF_BRK(message, args...) 852 853 /* do_brk() must return target values and target errnos. */ 854 abi_long do_brk(abi_ulong new_brk) 855 { 856 abi_long mapped_addr; 857 abi_ulong new_alloc_size; 858 859 /* brk pointers are always untagged */ 860 861 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk); 862 863 if (!new_brk) { 864 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk); 865 return target_brk; 866 } 867 if (new_brk < target_original_brk) { 868 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n", 869 target_brk); 870 return target_brk; 871 } 872 873 /* If the new brk is less than the highest page reserved to the 874 * target heap allocation, set it and we're almost done... */ 875 if (new_brk <= brk_page) { 876 /* Heap contents are initialized to zero, as for anonymous 877 * mapped pages. */ 878 if (new_brk > target_brk) { 879 memset(g2h_untagged(target_brk), 0, new_brk - target_brk); 880 } 881 target_brk = new_brk; 882 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk); 883 return target_brk; 884 } 885 886 /* We need to allocate more memory after the brk... Note that 887 * we don't use MAP_FIXED because that will map over the top of 888 * any existing mapping (like the one with the host libc or qemu 889 * itself); instead we treat "mapped but at wrong address" as 890 * a failure and unmap again. 891 */ 892 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page); 893 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size, 894 PROT_READ|PROT_WRITE, 895 MAP_ANON|MAP_PRIVATE, 0, 0)); 896 897 if (mapped_addr == brk_page) { 898 /* Heap contents are initialized to zero, as for anonymous 899 * mapped pages. Technically the new pages are already 900 * initialized to zero since they *are* anonymous mapped 901 * pages, however we have to take care with the contents that 902 * come from the remaining part of the previous page: it may 903 * contains garbage data due to a previous heap usage (grown 904 * then shrunken). */ 905 memset(g2h_untagged(target_brk), 0, brk_page - target_brk); 906 907 target_brk = new_brk; 908 brk_page = HOST_PAGE_ALIGN(target_brk); 909 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n", 910 target_brk); 911 return target_brk; 912 } else if (mapped_addr != -1) { 913 /* Mapped but at wrong address, meaning there wasn't actually 914 * enough space for this brk. 915 */ 916 target_munmap(mapped_addr, new_alloc_size); 917 mapped_addr = -1; 918 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk); 919 } 920 else { 921 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk); 922 } 923 924 #if defined(TARGET_ALPHA) 925 /* We (partially) emulate OSF/1 on Alpha, which requires we 926 return a proper errno, not an unchanged brk value. */ 927 return -TARGET_ENOMEM; 928 #endif 929 /* For everything else, return the previous break. */ 930 return target_brk; 931 } 932 933 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \ 934 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64) 935 static inline abi_long copy_from_user_fdset(fd_set *fds, 936 abi_ulong target_fds_addr, 937 int n) 938 { 939 int i, nw, j, k; 940 abi_ulong b, *target_fds; 941 942 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS); 943 if (!(target_fds = lock_user(VERIFY_READ, 944 target_fds_addr, 945 sizeof(abi_ulong) * nw, 946 1))) 947 return -TARGET_EFAULT; 948 949 FD_ZERO(fds); 950 k = 0; 951 for (i = 0; i < nw; i++) { 952 /* grab the abi_ulong */ 953 __get_user(b, &target_fds[i]); 954 for (j = 0; j < TARGET_ABI_BITS; j++) { 955 /* check the bit inside the abi_ulong */ 956 if ((b >> j) & 1) 957 FD_SET(k, fds); 958 k++; 959 } 960 } 961 962 unlock_user(target_fds, target_fds_addr, 0); 963 964 return 0; 965 } 966 967 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr, 968 abi_ulong target_fds_addr, 969 int n) 970 { 971 if (target_fds_addr) { 972 if (copy_from_user_fdset(fds, target_fds_addr, n)) 973 return -TARGET_EFAULT; 974 *fds_ptr = fds; 975 } else { 976 *fds_ptr = NULL; 977 } 978 return 0; 979 } 980 981 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr, 982 const fd_set *fds, 983 int n) 984 { 985 int i, nw, j, k; 986 abi_long v; 987 abi_ulong *target_fds; 988 989 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS); 990 if (!(target_fds = lock_user(VERIFY_WRITE, 991 target_fds_addr, 992 sizeof(abi_ulong) * nw, 993 0))) 994 return -TARGET_EFAULT; 995 996 k = 0; 997 for (i = 0; i < nw; i++) { 998 v = 0; 999 for (j = 0; j < TARGET_ABI_BITS; j++) { 1000 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j); 1001 k++; 1002 } 1003 __put_user(v, &target_fds[i]); 1004 } 1005 1006 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw); 1007 1008 return 0; 1009 } 1010 #endif 1011 1012 #if defined(__alpha__) 1013 #define HOST_HZ 1024 1014 #else 1015 #define HOST_HZ 100 1016 #endif 1017 1018 static inline abi_long host_to_target_clock_t(long ticks) 1019 { 1020 #if HOST_HZ == TARGET_HZ 1021 return ticks; 1022 #else 1023 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ; 1024 #endif 1025 } 1026 1027 static inline abi_long host_to_target_rusage(abi_ulong target_addr, 1028 const struct rusage *rusage) 1029 { 1030 struct target_rusage *target_rusage; 1031 1032 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0)) 1033 return -TARGET_EFAULT; 1034 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec); 1035 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec); 1036 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec); 1037 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec); 1038 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss); 1039 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss); 1040 target_rusage->ru_idrss = tswapal(rusage->ru_idrss); 1041 target_rusage->ru_isrss = tswapal(rusage->ru_isrss); 1042 target_rusage->ru_minflt = tswapal(rusage->ru_minflt); 1043 target_rusage->ru_majflt = tswapal(rusage->ru_majflt); 1044 target_rusage->ru_nswap = tswapal(rusage->ru_nswap); 1045 target_rusage->ru_inblock = tswapal(rusage->ru_inblock); 1046 target_rusage->ru_oublock = tswapal(rusage->ru_oublock); 1047 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd); 1048 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv); 1049 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals); 1050 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw); 1051 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw); 1052 unlock_user_struct(target_rusage, target_addr, 1); 1053 1054 return 0; 1055 } 1056 1057 #ifdef TARGET_NR_setrlimit 1058 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim) 1059 { 1060 abi_ulong target_rlim_swap; 1061 rlim_t result; 1062 1063 target_rlim_swap = tswapal(target_rlim); 1064 if (target_rlim_swap == TARGET_RLIM_INFINITY) 1065 return RLIM_INFINITY; 1066 1067 result = target_rlim_swap; 1068 if (target_rlim_swap != (rlim_t)result) 1069 return RLIM_INFINITY; 1070 1071 return result; 1072 } 1073 #endif 1074 1075 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit) 1076 static inline abi_ulong host_to_target_rlim(rlim_t rlim) 1077 { 1078 abi_ulong target_rlim_swap; 1079 abi_ulong result; 1080 1081 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim) 1082 target_rlim_swap = TARGET_RLIM_INFINITY; 1083 else 1084 target_rlim_swap = rlim; 1085 result = tswapal(target_rlim_swap); 1086 1087 return result; 1088 } 1089 #endif 1090 1091 static inline int target_to_host_resource(int code) 1092 { 1093 switch (code) { 1094 case TARGET_RLIMIT_AS: 1095 return RLIMIT_AS; 1096 case TARGET_RLIMIT_CORE: 1097 return RLIMIT_CORE; 1098 case TARGET_RLIMIT_CPU: 1099 return RLIMIT_CPU; 1100 case TARGET_RLIMIT_DATA: 1101 return RLIMIT_DATA; 1102 case TARGET_RLIMIT_FSIZE: 1103 return RLIMIT_FSIZE; 1104 case TARGET_RLIMIT_LOCKS: 1105 return RLIMIT_LOCKS; 1106 case TARGET_RLIMIT_MEMLOCK: 1107 return RLIMIT_MEMLOCK; 1108 case TARGET_RLIMIT_MSGQUEUE: 1109 return RLIMIT_MSGQUEUE; 1110 case TARGET_RLIMIT_NICE: 1111 return RLIMIT_NICE; 1112 case TARGET_RLIMIT_NOFILE: 1113 return RLIMIT_NOFILE; 1114 case TARGET_RLIMIT_NPROC: 1115 return RLIMIT_NPROC; 1116 case TARGET_RLIMIT_RSS: 1117 return RLIMIT_RSS; 1118 case TARGET_RLIMIT_RTPRIO: 1119 return RLIMIT_RTPRIO; 1120 #ifdef RLIMIT_RTTIME 1121 case TARGET_RLIMIT_RTTIME: 1122 return RLIMIT_RTTIME; 1123 #endif 1124 case TARGET_RLIMIT_SIGPENDING: 1125 return RLIMIT_SIGPENDING; 1126 case TARGET_RLIMIT_STACK: 1127 return RLIMIT_STACK; 1128 default: 1129 return code; 1130 } 1131 } 1132 1133 static inline abi_long copy_from_user_timeval(struct timeval *tv, 1134 abi_ulong target_tv_addr) 1135 { 1136 struct target_timeval *target_tv; 1137 1138 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) { 1139 return -TARGET_EFAULT; 1140 } 1141 1142 __get_user(tv->tv_sec, &target_tv->tv_sec); 1143 __get_user(tv->tv_usec, &target_tv->tv_usec); 1144 1145 unlock_user_struct(target_tv, target_tv_addr, 0); 1146 1147 return 0; 1148 } 1149 1150 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr, 1151 const struct timeval *tv) 1152 { 1153 struct target_timeval *target_tv; 1154 1155 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) { 1156 return -TARGET_EFAULT; 1157 } 1158 1159 __put_user(tv->tv_sec, &target_tv->tv_sec); 1160 __put_user(tv->tv_usec, &target_tv->tv_usec); 1161 1162 unlock_user_struct(target_tv, target_tv_addr, 1); 1163 1164 return 0; 1165 } 1166 1167 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME) 1168 static inline abi_long copy_from_user_timeval64(struct timeval *tv, 1169 abi_ulong target_tv_addr) 1170 { 1171 struct target__kernel_sock_timeval *target_tv; 1172 1173 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) { 1174 return -TARGET_EFAULT; 1175 } 1176 1177 __get_user(tv->tv_sec, &target_tv->tv_sec); 1178 __get_user(tv->tv_usec, &target_tv->tv_usec); 1179 1180 unlock_user_struct(target_tv, target_tv_addr, 0); 1181 1182 return 0; 1183 } 1184 #endif 1185 1186 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr, 1187 const struct timeval *tv) 1188 { 1189 struct target__kernel_sock_timeval *target_tv; 1190 1191 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) { 1192 return -TARGET_EFAULT; 1193 } 1194 1195 __put_user(tv->tv_sec, &target_tv->tv_sec); 1196 __put_user(tv->tv_usec, &target_tv->tv_usec); 1197 1198 unlock_user_struct(target_tv, target_tv_addr, 1); 1199 1200 return 0; 1201 } 1202 1203 #if defined(TARGET_NR_futex) || \ 1204 defined(TARGET_NR_rt_sigtimedwait) || \ 1205 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \ 1206 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \ 1207 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \ 1208 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \ 1209 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \ 1210 defined(TARGET_NR_timer_settime) || \ 1211 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)) 1212 static inline abi_long target_to_host_timespec(struct timespec *host_ts, 1213 abi_ulong target_addr) 1214 { 1215 struct target_timespec *target_ts; 1216 1217 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) { 1218 return -TARGET_EFAULT; 1219 } 1220 __get_user(host_ts->tv_sec, &target_ts->tv_sec); 1221 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec); 1222 unlock_user_struct(target_ts, target_addr, 0); 1223 return 0; 1224 } 1225 #endif 1226 1227 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \ 1228 defined(TARGET_NR_timer_settime64) || \ 1229 defined(TARGET_NR_mq_timedsend_time64) || \ 1230 defined(TARGET_NR_mq_timedreceive_time64) || \ 1231 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \ 1232 defined(TARGET_NR_clock_nanosleep_time64) || \ 1233 defined(TARGET_NR_rt_sigtimedwait_time64) || \ 1234 defined(TARGET_NR_utimensat) || \ 1235 defined(TARGET_NR_utimensat_time64) || \ 1236 defined(TARGET_NR_semtimedop_time64) || \ 1237 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64) 1238 static inline abi_long target_to_host_timespec64(struct timespec *host_ts, 1239 abi_ulong target_addr) 1240 { 1241 struct target__kernel_timespec *target_ts; 1242 1243 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) { 1244 return -TARGET_EFAULT; 1245 } 1246 __get_user(host_ts->tv_sec, &target_ts->tv_sec); 1247 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec); 1248 /* in 32bit mode, this drops the padding */ 1249 host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec; 1250 unlock_user_struct(target_ts, target_addr, 0); 1251 return 0; 1252 } 1253 #endif 1254 1255 static inline abi_long host_to_target_timespec(abi_ulong target_addr, 1256 struct timespec *host_ts) 1257 { 1258 struct target_timespec *target_ts; 1259 1260 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) { 1261 return -TARGET_EFAULT; 1262 } 1263 __put_user(host_ts->tv_sec, &target_ts->tv_sec); 1264 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec); 1265 unlock_user_struct(target_ts, target_addr, 1); 1266 return 0; 1267 } 1268 1269 static inline abi_long host_to_target_timespec64(abi_ulong target_addr, 1270 struct timespec *host_ts) 1271 { 1272 struct target__kernel_timespec *target_ts; 1273 1274 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) { 1275 return -TARGET_EFAULT; 1276 } 1277 __put_user(host_ts->tv_sec, &target_ts->tv_sec); 1278 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec); 1279 unlock_user_struct(target_ts, target_addr, 1); 1280 return 0; 1281 } 1282 1283 #if defined(TARGET_NR_gettimeofday) 1284 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr, 1285 struct timezone *tz) 1286 { 1287 struct target_timezone *target_tz; 1288 1289 if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) { 1290 return -TARGET_EFAULT; 1291 } 1292 1293 __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest); 1294 __put_user(tz->tz_dsttime, &target_tz->tz_dsttime); 1295 1296 unlock_user_struct(target_tz, target_tz_addr, 1); 1297 1298 return 0; 1299 } 1300 #endif 1301 1302 #if defined(TARGET_NR_settimeofday) 1303 static inline abi_long copy_from_user_timezone(struct timezone *tz, 1304 abi_ulong target_tz_addr) 1305 { 1306 struct target_timezone *target_tz; 1307 1308 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) { 1309 return -TARGET_EFAULT; 1310 } 1311 1312 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest); 1313 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime); 1314 1315 unlock_user_struct(target_tz, target_tz_addr, 0); 1316 1317 return 0; 1318 } 1319 #endif 1320 1321 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 1322 #include <mqueue.h> 1323 1324 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr, 1325 abi_ulong target_mq_attr_addr) 1326 { 1327 struct target_mq_attr *target_mq_attr; 1328 1329 if (!lock_user_struct(VERIFY_READ, target_mq_attr, 1330 target_mq_attr_addr, 1)) 1331 return -TARGET_EFAULT; 1332 1333 __get_user(attr->mq_flags, &target_mq_attr->mq_flags); 1334 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 1335 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 1336 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 1337 1338 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0); 1339 1340 return 0; 1341 } 1342 1343 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr, 1344 const struct mq_attr *attr) 1345 { 1346 struct target_mq_attr *target_mq_attr; 1347 1348 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr, 1349 target_mq_attr_addr, 0)) 1350 return -TARGET_EFAULT; 1351 1352 __put_user(attr->mq_flags, &target_mq_attr->mq_flags); 1353 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 1354 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 1355 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 1356 1357 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1); 1358 1359 return 0; 1360 } 1361 #endif 1362 1363 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) 1364 /* do_select() must return target values and target errnos. */ 1365 static abi_long do_select(int n, 1366 abi_ulong rfd_addr, abi_ulong wfd_addr, 1367 abi_ulong efd_addr, abi_ulong target_tv_addr) 1368 { 1369 fd_set rfds, wfds, efds; 1370 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 1371 struct timeval tv; 1372 struct timespec ts, *ts_ptr; 1373 abi_long ret; 1374 1375 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 1376 if (ret) { 1377 return ret; 1378 } 1379 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 1380 if (ret) { 1381 return ret; 1382 } 1383 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 1384 if (ret) { 1385 return ret; 1386 } 1387 1388 if (target_tv_addr) { 1389 if (copy_from_user_timeval(&tv, target_tv_addr)) 1390 return -TARGET_EFAULT; 1391 ts.tv_sec = tv.tv_sec; 1392 ts.tv_nsec = tv.tv_usec * 1000; 1393 ts_ptr = &ts; 1394 } else { 1395 ts_ptr = NULL; 1396 } 1397 1398 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, 1399 ts_ptr, NULL)); 1400 1401 if (!is_error(ret)) { 1402 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 1403 return -TARGET_EFAULT; 1404 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 1405 return -TARGET_EFAULT; 1406 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 1407 return -TARGET_EFAULT; 1408 1409 if (target_tv_addr) { 1410 tv.tv_sec = ts.tv_sec; 1411 tv.tv_usec = ts.tv_nsec / 1000; 1412 if (copy_to_user_timeval(target_tv_addr, &tv)) { 1413 return -TARGET_EFAULT; 1414 } 1415 } 1416 } 1417 1418 return ret; 1419 } 1420 1421 #if defined(TARGET_WANT_OLD_SYS_SELECT) 1422 static abi_long do_old_select(abi_ulong arg1) 1423 { 1424 struct target_sel_arg_struct *sel; 1425 abi_ulong inp, outp, exp, tvp; 1426 long nsel; 1427 1428 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) { 1429 return -TARGET_EFAULT; 1430 } 1431 1432 nsel = tswapal(sel->n); 1433 inp = tswapal(sel->inp); 1434 outp = tswapal(sel->outp); 1435 exp = tswapal(sel->exp); 1436 tvp = tswapal(sel->tvp); 1437 1438 unlock_user_struct(sel, arg1, 0); 1439 1440 return do_select(nsel, inp, outp, exp, tvp); 1441 } 1442 #endif 1443 #endif 1444 1445 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64) 1446 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3, 1447 abi_long arg4, abi_long arg5, abi_long arg6, 1448 bool time64) 1449 { 1450 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr; 1451 fd_set rfds, wfds, efds; 1452 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 1453 struct timespec ts, *ts_ptr; 1454 abi_long ret; 1455 1456 /* 1457 * The 6th arg is actually two args smashed together, 1458 * so we cannot use the C library. 1459 */ 1460 struct { 1461 sigset_t *set; 1462 size_t size; 1463 } sig, *sig_ptr; 1464 1465 abi_ulong arg_sigset, arg_sigsize, *arg7; 1466 1467 n = arg1; 1468 rfd_addr = arg2; 1469 wfd_addr = arg3; 1470 efd_addr = arg4; 1471 ts_addr = arg5; 1472 1473 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 1474 if (ret) { 1475 return ret; 1476 } 1477 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 1478 if (ret) { 1479 return ret; 1480 } 1481 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 1482 if (ret) { 1483 return ret; 1484 } 1485 1486 /* 1487 * This takes a timespec, and not a timeval, so we cannot 1488 * use the do_select() helper ... 1489 */ 1490 if (ts_addr) { 1491 if (time64) { 1492 if (target_to_host_timespec64(&ts, ts_addr)) { 1493 return -TARGET_EFAULT; 1494 } 1495 } else { 1496 if (target_to_host_timespec(&ts, ts_addr)) { 1497 return -TARGET_EFAULT; 1498 } 1499 } 1500 ts_ptr = &ts; 1501 } else { 1502 ts_ptr = NULL; 1503 } 1504 1505 /* Extract the two packed args for the sigset */ 1506 sig_ptr = NULL; 1507 if (arg6) { 1508 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1); 1509 if (!arg7) { 1510 return -TARGET_EFAULT; 1511 } 1512 arg_sigset = tswapal(arg7[0]); 1513 arg_sigsize = tswapal(arg7[1]); 1514 unlock_user(arg7, arg6, 0); 1515 1516 if (arg_sigset) { 1517 ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize); 1518 if (ret != 0) { 1519 return ret; 1520 } 1521 sig_ptr = &sig; 1522 sig.size = SIGSET_T_SIZE; 1523 } 1524 } 1525 1526 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, 1527 ts_ptr, sig_ptr)); 1528 1529 if (sig_ptr) { 1530 finish_sigsuspend_mask(ret); 1531 } 1532 1533 if (!is_error(ret)) { 1534 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) { 1535 return -TARGET_EFAULT; 1536 } 1537 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) { 1538 return -TARGET_EFAULT; 1539 } 1540 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) { 1541 return -TARGET_EFAULT; 1542 } 1543 if (time64) { 1544 if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) { 1545 return -TARGET_EFAULT; 1546 } 1547 } else { 1548 if (ts_addr && host_to_target_timespec(ts_addr, &ts)) { 1549 return -TARGET_EFAULT; 1550 } 1551 } 1552 } 1553 return ret; 1554 } 1555 #endif 1556 1557 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \ 1558 defined(TARGET_NR_ppoll_time64) 1559 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3, 1560 abi_long arg4, abi_long arg5, bool ppoll, bool time64) 1561 { 1562 struct target_pollfd *target_pfd; 1563 unsigned int nfds = arg2; 1564 struct pollfd *pfd; 1565 unsigned int i; 1566 abi_long ret; 1567 1568 pfd = NULL; 1569 target_pfd = NULL; 1570 if (nfds) { 1571 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) { 1572 return -TARGET_EINVAL; 1573 } 1574 target_pfd = lock_user(VERIFY_WRITE, arg1, 1575 sizeof(struct target_pollfd) * nfds, 1); 1576 if (!target_pfd) { 1577 return -TARGET_EFAULT; 1578 } 1579 1580 pfd = alloca(sizeof(struct pollfd) * nfds); 1581 for (i = 0; i < nfds; i++) { 1582 pfd[i].fd = tswap32(target_pfd[i].fd); 1583 pfd[i].events = tswap16(target_pfd[i].events); 1584 } 1585 } 1586 if (ppoll) { 1587 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts; 1588 sigset_t *set = NULL; 1589 1590 if (arg3) { 1591 if (time64) { 1592 if (target_to_host_timespec64(timeout_ts, arg3)) { 1593 unlock_user(target_pfd, arg1, 0); 1594 return -TARGET_EFAULT; 1595 } 1596 } else { 1597 if (target_to_host_timespec(timeout_ts, arg3)) { 1598 unlock_user(target_pfd, arg1, 0); 1599 return -TARGET_EFAULT; 1600 } 1601 } 1602 } else { 1603 timeout_ts = NULL; 1604 } 1605 1606 if (arg4) { 1607 ret = process_sigsuspend_mask(&set, arg4, arg5); 1608 if (ret != 0) { 1609 unlock_user(target_pfd, arg1, 0); 1610 return ret; 1611 } 1612 } 1613 1614 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts, 1615 set, SIGSET_T_SIZE)); 1616 1617 if (set) { 1618 finish_sigsuspend_mask(ret); 1619 } 1620 if (!is_error(ret) && arg3) { 1621 if (time64) { 1622 if (host_to_target_timespec64(arg3, timeout_ts)) { 1623 return -TARGET_EFAULT; 1624 } 1625 } else { 1626 if (host_to_target_timespec(arg3, timeout_ts)) { 1627 return -TARGET_EFAULT; 1628 } 1629 } 1630 } 1631 } else { 1632 struct timespec ts, *pts; 1633 1634 if (arg3 >= 0) { 1635 /* Convert ms to secs, ns */ 1636 ts.tv_sec = arg3 / 1000; 1637 ts.tv_nsec = (arg3 % 1000) * 1000000LL; 1638 pts = &ts; 1639 } else { 1640 /* -ve poll() timeout means "infinite" */ 1641 pts = NULL; 1642 } 1643 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0)); 1644 } 1645 1646 if (!is_error(ret)) { 1647 for (i = 0; i < nfds; i++) { 1648 target_pfd[i].revents = tswap16(pfd[i].revents); 1649 } 1650 } 1651 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds); 1652 return ret; 1653 } 1654 #endif 1655 1656 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes, 1657 int flags, int is_pipe2) 1658 { 1659 int host_pipe[2]; 1660 abi_long ret; 1661 ret = pipe2(host_pipe, flags); 1662 1663 if (is_error(ret)) 1664 return get_errno(ret); 1665 1666 /* Several targets have special calling conventions for the original 1667 pipe syscall, but didn't replicate this into the pipe2 syscall. */ 1668 if (!is_pipe2) { 1669 #if defined(TARGET_ALPHA) 1670 cpu_env->ir[IR_A4] = host_pipe[1]; 1671 return host_pipe[0]; 1672 #elif defined(TARGET_MIPS) 1673 cpu_env->active_tc.gpr[3] = host_pipe[1]; 1674 return host_pipe[0]; 1675 #elif defined(TARGET_SH4) 1676 cpu_env->gregs[1] = host_pipe[1]; 1677 return host_pipe[0]; 1678 #elif defined(TARGET_SPARC) 1679 cpu_env->regwptr[1] = host_pipe[1]; 1680 return host_pipe[0]; 1681 #endif 1682 } 1683 1684 if (put_user_s32(host_pipe[0], pipedes) 1685 || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int))) 1686 return -TARGET_EFAULT; 1687 return get_errno(ret); 1688 } 1689 1690 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn, 1691 abi_ulong target_addr, 1692 socklen_t len) 1693 { 1694 struct target_ip_mreqn *target_smreqn; 1695 1696 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1); 1697 if (!target_smreqn) 1698 return -TARGET_EFAULT; 1699 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr; 1700 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr; 1701 if (len == sizeof(struct target_ip_mreqn)) 1702 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex); 1703 unlock_user(target_smreqn, target_addr, 0); 1704 1705 return 0; 1706 } 1707 1708 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr, 1709 abi_ulong target_addr, 1710 socklen_t len) 1711 { 1712 const socklen_t unix_maxlen = sizeof (struct sockaddr_un); 1713 sa_family_t sa_family; 1714 struct target_sockaddr *target_saddr; 1715 1716 if (fd_trans_target_to_host_addr(fd)) { 1717 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len); 1718 } 1719 1720 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1); 1721 if (!target_saddr) 1722 return -TARGET_EFAULT; 1723 1724 sa_family = tswap16(target_saddr->sa_family); 1725 1726 /* Oops. The caller might send a incomplete sun_path; sun_path 1727 * must be terminated by \0 (see the manual page), but 1728 * unfortunately it is quite common to specify sockaddr_un 1729 * length as "strlen(x->sun_path)" while it should be 1730 * "strlen(...) + 1". We'll fix that here if needed. 1731 * Linux kernel has a similar feature. 1732 */ 1733 1734 if (sa_family == AF_UNIX) { 1735 if (len < unix_maxlen && len > 0) { 1736 char *cp = (char*)target_saddr; 1737 1738 if ( cp[len-1] && !cp[len] ) 1739 len++; 1740 } 1741 if (len > unix_maxlen) 1742 len = unix_maxlen; 1743 } 1744 1745 memcpy(addr, target_saddr, len); 1746 addr->sa_family = sa_family; 1747 if (sa_family == AF_NETLINK) { 1748 struct sockaddr_nl *nladdr; 1749 1750 nladdr = (struct sockaddr_nl *)addr; 1751 nladdr->nl_pid = tswap32(nladdr->nl_pid); 1752 nladdr->nl_groups = tswap32(nladdr->nl_groups); 1753 } else if (sa_family == AF_PACKET) { 1754 struct target_sockaddr_ll *lladdr; 1755 1756 lladdr = (struct target_sockaddr_ll *)addr; 1757 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex); 1758 lladdr->sll_hatype = tswap16(lladdr->sll_hatype); 1759 } 1760 unlock_user(target_saddr, target_addr, 0); 1761 1762 return 0; 1763 } 1764 1765 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr, 1766 struct sockaddr *addr, 1767 socklen_t len) 1768 { 1769 struct target_sockaddr *target_saddr; 1770 1771 if (len == 0) { 1772 return 0; 1773 } 1774 assert(addr); 1775 1776 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0); 1777 if (!target_saddr) 1778 return -TARGET_EFAULT; 1779 memcpy(target_saddr, addr, len); 1780 if (len >= offsetof(struct target_sockaddr, sa_family) + 1781 sizeof(target_saddr->sa_family)) { 1782 target_saddr->sa_family = tswap16(addr->sa_family); 1783 } 1784 if (addr->sa_family == AF_NETLINK && 1785 len >= sizeof(struct target_sockaddr_nl)) { 1786 struct target_sockaddr_nl *target_nl = 1787 (struct target_sockaddr_nl *)target_saddr; 1788 target_nl->nl_pid = tswap32(target_nl->nl_pid); 1789 target_nl->nl_groups = tswap32(target_nl->nl_groups); 1790 } else if (addr->sa_family == AF_PACKET) { 1791 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr; 1792 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex); 1793 target_ll->sll_hatype = tswap16(target_ll->sll_hatype); 1794 } else if (addr->sa_family == AF_INET6 && 1795 len >= sizeof(struct target_sockaddr_in6)) { 1796 struct target_sockaddr_in6 *target_in6 = 1797 (struct target_sockaddr_in6 *)target_saddr; 1798 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id); 1799 } 1800 unlock_user(target_saddr, target_addr, len); 1801 1802 return 0; 1803 } 1804 1805 static inline abi_long target_to_host_cmsg(struct msghdr *msgh, 1806 struct target_msghdr *target_msgh) 1807 { 1808 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1809 abi_long msg_controllen; 1810 abi_ulong target_cmsg_addr; 1811 struct target_cmsghdr *target_cmsg, *target_cmsg_start; 1812 socklen_t space = 0; 1813 1814 msg_controllen = tswapal(target_msgh->msg_controllen); 1815 if (msg_controllen < sizeof (struct target_cmsghdr)) 1816 goto the_end; 1817 target_cmsg_addr = tswapal(target_msgh->msg_control); 1818 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1); 1819 target_cmsg_start = target_cmsg; 1820 if (!target_cmsg) 1821 return -TARGET_EFAULT; 1822 1823 while (cmsg && target_cmsg) { 1824 void *data = CMSG_DATA(cmsg); 1825 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1826 1827 int len = tswapal(target_cmsg->cmsg_len) 1828 - sizeof(struct target_cmsghdr); 1829 1830 space += CMSG_SPACE(len); 1831 if (space > msgh->msg_controllen) { 1832 space -= CMSG_SPACE(len); 1833 /* This is a QEMU bug, since we allocated the payload 1834 * area ourselves (unlike overflow in host-to-target 1835 * conversion, which is just the guest giving us a buffer 1836 * that's too small). It can't happen for the payload types 1837 * we currently support; if it becomes an issue in future 1838 * we would need to improve our allocation strategy to 1839 * something more intelligent than "twice the size of the 1840 * target buffer we're reading from". 1841 */ 1842 qemu_log_mask(LOG_UNIMP, 1843 ("Unsupported ancillary data %d/%d: " 1844 "unhandled msg size\n"), 1845 tswap32(target_cmsg->cmsg_level), 1846 tswap32(target_cmsg->cmsg_type)); 1847 break; 1848 } 1849 1850 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) { 1851 cmsg->cmsg_level = SOL_SOCKET; 1852 } else { 1853 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level); 1854 } 1855 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type); 1856 cmsg->cmsg_len = CMSG_LEN(len); 1857 1858 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) { 1859 int *fd = (int *)data; 1860 int *target_fd = (int *)target_data; 1861 int i, numfds = len / sizeof(int); 1862 1863 for (i = 0; i < numfds; i++) { 1864 __get_user(fd[i], target_fd + i); 1865 } 1866 } else if (cmsg->cmsg_level == SOL_SOCKET 1867 && cmsg->cmsg_type == SCM_CREDENTIALS) { 1868 struct ucred *cred = (struct ucred *)data; 1869 struct target_ucred *target_cred = 1870 (struct target_ucred *)target_data; 1871 1872 __get_user(cred->pid, &target_cred->pid); 1873 __get_user(cred->uid, &target_cred->uid); 1874 __get_user(cred->gid, &target_cred->gid); 1875 } else { 1876 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n", 1877 cmsg->cmsg_level, cmsg->cmsg_type); 1878 memcpy(data, target_data, len); 1879 } 1880 1881 cmsg = CMSG_NXTHDR(msgh, cmsg); 1882 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg, 1883 target_cmsg_start); 1884 } 1885 unlock_user(target_cmsg, target_cmsg_addr, 0); 1886 the_end: 1887 msgh->msg_controllen = space; 1888 return 0; 1889 } 1890 1891 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh, 1892 struct msghdr *msgh) 1893 { 1894 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1895 abi_long msg_controllen; 1896 abi_ulong target_cmsg_addr; 1897 struct target_cmsghdr *target_cmsg, *target_cmsg_start; 1898 socklen_t space = 0; 1899 1900 msg_controllen = tswapal(target_msgh->msg_controllen); 1901 if (msg_controllen < sizeof (struct target_cmsghdr)) 1902 goto the_end; 1903 target_cmsg_addr = tswapal(target_msgh->msg_control); 1904 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0); 1905 target_cmsg_start = target_cmsg; 1906 if (!target_cmsg) 1907 return -TARGET_EFAULT; 1908 1909 while (cmsg && target_cmsg) { 1910 void *data = CMSG_DATA(cmsg); 1911 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1912 1913 int len = cmsg->cmsg_len - sizeof(struct cmsghdr); 1914 int tgt_len, tgt_space; 1915 1916 /* We never copy a half-header but may copy half-data; 1917 * this is Linux's behaviour in put_cmsg(). Note that 1918 * truncation here is a guest problem (which we report 1919 * to the guest via the CTRUNC bit), unlike truncation 1920 * in target_to_host_cmsg, which is a QEMU bug. 1921 */ 1922 if (msg_controllen < sizeof(struct target_cmsghdr)) { 1923 target_msgh->msg_flags |= tswap32(MSG_CTRUNC); 1924 break; 1925 } 1926 1927 if (cmsg->cmsg_level == SOL_SOCKET) { 1928 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET); 1929 } else { 1930 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level); 1931 } 1932 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type); 1933 1934 /* Payload types which need a different size of payload on 1935 * the target must adjust tgt_len here. 1936 */ 1937 tgt_len = len; 1938 switch (cmsg->cmsg_level) { 1939 case SOL_SOCKET: 1940 switch (cmsg->cmsg_type) { 1941 case SO_TIMESTAMP: 1942 tgt_len = sizeof(struct target_timeval); 1943 break; 1944 default: 1945 break; 1946 } 1947 break; 1948 default: 1949 break; 1950 } 1951 1952 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) { 1953 target_msgh->msg_flags |= tswap32(MSG_CTRUNC); 1954 tgt_len = msg_controllen - sizeof(struct target_cmsghdr); 1955 } 1956 1957 /* We must now copy-and-convert len bytes of payload 1958 * into tgt_len bytes of destination space. Bear in mind 1959 * that in both source and destination we may be dealing 1960 * with a truncated value! 1961 */ 1962 switch (cmsg->cmsg_level) { 1963 case SOL_SOCKET: 1964 switch (cmsg->cmsg_type) { 1965 case SCM_RIGHTS: 1966 { 1967 int *fd = (int *)data; 1968 int *target_fd = (int *)target_data; 1969 int i, numfds = tgt_len / sizeof(int); 1970 1971 for (i = 0; i < numfds; i++) { 1972 __put_user(fd[i], target_fd + i); 1973 } 1974 break; 1975 } 1976 case SO_TIMESTAMP: 1977 { 1978 struct timeval *tv = (struct timeval *)data; 1979 struct target_timeval *target_tv = 1980 (struct target_timeval *)target_data; 1981 1982 if (len != sizeof(struct timeval) || 1983 tgt_len != sizeof(struct target_timeval)) { 1984 goto unimplemented; 1985 } 1986 1987 /* copy struct timeval to target */ 1988 __put_user(tv->tv_sec, &target_tv->tv_sec); 1989 __put_user(tv->tv_usec, &target_tv->tv_usec); 1990 break; 1991 } 1992 case SCM_CREDENTIALS: 1993 { 1994 struct ucred *cred = (struct ucred *)data; 1995 struct target_ucred *target_cred = 1996 (struct target_ucred *)target_data; 1997 1998 __put_user(cred->pid, &target_cred->pid); 1999 __put_user(cred->uid, &target_cred->uid); 2000 __put_user(cred->gid, &target_cred->gid); 2001 break; 2002 } 2003 default: 2004 goto unimplemented; 2005 } 2006 break; 2007 2008 case SOL_IP: 2009 switch (cmsg->cmsg_type) { 2010 case IP_TTL: 2011 { 2012 uint32_t *v = (uint32_t *)data; 2013 uint32_t *t_int = (uint32_t *)target_data; 2014 2015 if (len != sizeof(uint32_t) || 2016 tgt_len != sizeof(uint32_t)) { 2017 goto unimplemented; 2018 } 2019 __put_user(*v, t_int); 2020 break; 2021 } 2022 case IP_RECVERR: 2023 { 2024 struct errhdr_t { 2025 struct sock_extended_err ee; 2026 struct sockaddr_in offender; 2027 }; 2028 struct errhdr_t *errh = (struct errhdr_t *)data; 2029 struct errhdr_t *target_errh = 2030 (struct errhdr_t *)target_data; 2031 2032 if (len != sizeof(struct errhdr_t) || 2033 tgt_len != sizeof(struct errhdr_t)) { 2034 goto unimplemented; 2035 } 2036 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno); 2037 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin); 2038 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type); 2039 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code); 2040 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad); 2041 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info); 2042 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data); 2043 host_to_target_sockaddr((unsigned long) &target_errh->offender, 2044 (void *) &errh->offender, sizeof(errh->offender)); 2045 break; 2046 } 2047 default: 2048 goto unimplemented; 2049 } 2050 break; 2051 2052 case SOL_IPV6: 2053 switch (cmsg->cmsg_type) { 2054 case IPV6_HOPLIMIT: 2055 { 2056 uint32_t *v = (uint32_t *)data; 2057 uint32_t *t_int = (uint32_t *)target_data; 2058 2059 if (len != sizeof(uint32_t) || 2060 tgt_len != sizeof(uint32_t)) { 2061 goto unimplemented; 2062 } 2063 __put_user(*v, t_int); 2064 break; 2065 } 2066 case IPV6_RECVERR: 2067 { 2068 struct errhdr6_t { 2069 struct sock_extended_err ee; 2070 struct sockaddr_in6 offender; 2071 }; 2072 struct errhdr6_t *errh = (struct errhdr6_t *)data; 2073 struct errhdr6_t *target_errh = 2074 (struct errhdr6_t *)target_data; 2075 2076 if (len != sizeof(struct errhdr6_t) || 2077 tgt_len != sizeof(struct errhdr6_t)) { 2078 goto unimplemented; 2079 } 2080 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno); 2081 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin); 2082 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type); 2083 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code); 2084 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad); 2085 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info); 2086 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data); 2087 host_to_target_sockaddr((unsigned long) &target_errh->offender, 2088 (void *) &errh->offender, sizeof(errh->offender)); 2089 break; 2090 } 2091 default: 2092 goto unimplemented; 2093 } 2094 break; 2095 2096 default: 2097 unimplemented: 2098 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n", 2099 cmsg->cmsg_level, cmsg->cmsg_type); 2100 memcpy(target_data, data, MIN(len, tgt_len)); 2101 if (tgt_len > len) { 2102 memset(target_data + len, 0, tgt_len - len); 2103 } 2104 } 2105 2106 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len)); 2107 tgt_space = TARGET_CMSG_SPACE(tgt_len); 2108 if (msg_controllen < tgt_space) { 2109 tgt_space = msg_controllen; 2110 } 2111 msg_controllen -= tgt_space; 2112 space += tgt_space; 2113 cmsg = CMSG_NXTHDR(msgh, cmsg); 2114 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg, 2115 target_cmsg_start); 2116 } 2117 unlock_user(target_cmsg, target_cmsg_addr, space); 2118 the_end: 2119 target_msgh->msg_controllen = tswapal(space); 2120 return 0; 2121 } 2122 2123 /* do_setsockopt() Must return target values and target errnos. */ 2124 static abi_long do_setsockopt(int sockfd, int level, int optname, 2125 abi_ulong optval_addr, socklen_t optlen) 2126 { 2127 abi_long ret; 2128 int val; 2129 struct ip_mreqn *ip_mreq; 2130 struct ip_mreq_source *ip_mreq_source; 2131 2132 switch(level) { 2133 case SOL_TCP: 2134 case SOL_UDP: 2135 /* TCP and UDP options all take an 'int' value. */ 2136 if (optlen < sizeof(uint32_t)) 2137 return -TARGET_EINVAL; 2138 2139 if (get_user_u32(val, optval_addr)) 2140 return -TARGET_EFAULT; 2141 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 2142 break; 2143 case SOL_IP: 2144 switch(optname) { 2145 case IP_TOS: 2146 case IP_TTL: 2147 case IP_HDRINCL: 2148 case IP_ROUTER_ALERT: 2149 case IP_RECVOPTS: 2150 case IP_RETOPTS: 2151 case IP_PKTINFO: 2152 case IP_MTU_DISCOVER: 2153 case IP_RECVERR: 2154 case IP_RECVTTL: 2155 case IP_RECVTOS: 2156 #ifdef IP_FREEBIND 2157 case IP_FREEBIND: 2158 #endif 2159 case IP_MULTICAST_TTL: 2160 case IP_MULTICAST_LOOP: 2161 val = 0; 2162 if (optlen >= sizeof(uint32_t)) { 2163 if (get_user_u32(val, optval_addr)) 2164 return -TARGET_EFAULT; 2165 } else if (optlen >= 1) { 2166 if (get_user_u8(val, optval_addr)) 2167 return -TARGET_EFAULT; 2168 } 2169 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 2170 break; 2171 case IP_ADD_MEMBERSHIP: 2172 case IP_DROP_MEMBERSHIP: 2173 if (optlen < sizeof (struct target_ip_mreq) || 2174 optlen > sizeof (struct target_ip_mreqn)) 2175 return -TARGET_EINVAL; 2176 2177 ip_mreq = (struct ip_mreqn *) alloca(optlen); 2178 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen); 2179 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen)); 2180 break; 2181 2182 case IP_BLOCK_SOURCE: 2183 case IP_UNBLOCK_SOURCE: 2184 case IP_ADD_SOURCE_MEMBERSHIP: 2185 case IP_DROP_SOURCE_MEMBERSHIP: 2186 if (optlen != sizeof (struct target_ip_mreq_source)) 2187 return -TARGET_EINVAL; 2188 2189 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1); 2190 if (!ip_mreq_source) { 2191 return -TARGET_EFAULT; 2192 } 2193 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen)); 2194 unlock_user (ip_mreq_source, optval_addr, 0); 2195 break; 2196 2197 default: 2198 goto unimplemented; 2199 } 2200 break; 2201 case SOL_IPV6: 2202 switch (optname) { 2203 case IPV6_MTU_DISCOVER: 2204 case IPV6_MTU: 2205 case IPV6_V6ONLY: 2206 case IPV6_RECVPKTINFO: 2207 case IPV6_UNICAST_HOPS: 2208 case IPV6_MULTICAST_HOPS: 2209 case IPV6_MULTICAST_LOOP: 2210 case IPV6_RECVERR: 2211 case IPV6_RECVHOPLIMIT: 2212 case IPV6_2292HOPLIMIT: 2213 case IPV6_CHECKSUM: 2214 case IPV6_ADDRFORM: 2215 case IPV6_2292PKTINFO: 2216 case IPV6_RECVTCLASS: 2217 case IPV6_RECVRTHDR: 2218 case IPV6_2292RTHDR: 2219 case IPV6_RECVHOPOPTS: 2220 case IPV6_2292HOPOPTS: 2221 case IPV6_RECVDSTOPTS: 2222 case IPV6_2292DSTOPTS: 2223 case IPV6_TCLASS: 2224 case IPV6_ADDR_PREFERENCES: 2225 #ifdef IPV6_RECVPATHMTU 2226 case IPV6_RECVPATHMTU: 2227 #endif 2228 #ifdef IPV6_TRANSPARENT 2229 case IPV6_TRANSPARENT: 2230 #endif 2231 #ifdef IPV6_FREEBIND 2232 case IPV6_FREEBIND: 2233 #endif 2234 #ifdef IPV6_RECVORIGDSTADDR 2235 case IPV6_RECVORIGDSTADDR: 2236 #endif 2237 val = 0; 2238 if (optlen < sizeof(uint32_t)) { 2239 return -TARGET_EINVAL; 2240 } 2241 if (get_user_u32(val, optval_addr)) { 2242 return -TARGET_EFAULT; 2243 } 2244 ret = get_errno(setsockopt(sockfd, level, optname, 2245 &val, sizeof(val))); 2246 break; 2247 case IPV6_PKTINFO: 2248 { 2249 struct in6_pktinfo pki; 2250 2251 if (optlen < sizeof(pki)) { 2252 return -TARGET_EINVAL; 2253 } 2254 2255 if (copy_from_user(&pki, optval_addr, sizeof(pki))) { 2256 return -TARGET_EFAULT; 2257 } 2258 2259 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex); 2260 2261 ret = get_errno(setsockopt(sockfd, level, optname, 2262 &pki, sizeof(pki))); 2263 break; 2264 } 2265 case IPV6_ADD_MEMBERSHIP: 2266 case IPV6_DROP_MEMBERSHIP: 2267 { 2268 struct ipv6_mreq ipv6mreq; 2269 2270 if (optlen < sizeof(ipv6mreq)) { 2271 return -TARGET_EINVAL; 2272 } 2273 2274 if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) { 2275 return -TARGET_EFAULT; 2276 } 2277 2278 ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface); 2279 2280 ret = get_errno(setsockopt(sockfd, level, optname, 2281 &ipv6mreq, sizeof(ipv6mreq))); 2282 break; 2283 } 2284 default: 2285 goto unimplemented; 2286 } 2287 break; 2288 case SOL_ICMPV6: 2289 switch (optname) { 2290 case ICMPV6_FILTER: 2291 { 2292 struct icmp6_filter icmp6f; 2293 2294 if (optlen > sizeof(icmp6f)) { 2295 optlen = sizeof(icmp6f); 2296 } 2297 2298 if (copy_from_user(&icmp6f, optval_addr, optlen)) { 2299 return -TARGET_EFAULT; 2300 } 2301 2302 for (val = 0; val < 8; val++) { 2303 icmp6f.data[val] = tswap32(icmp6f.data[val]); 2304 } 2305 2306 ret = get_errno(setsockopt(sockfd, level, optname, 2307 &icmp6f, optlen)); 2308 break; 2309 } 2310 default: 2311 goto unimplemented; 2312 } 2313 break; 2314 case SOL_RAW: 2315 switch (optname) { 2316 case ICMP_FILTER: 2317 case IPV6_CHECKSUM: 2318 /* those take an u32 value */ 2319 if (optlen < sizeof(uint32_t)) { 2320 return -TARGET_EINVAL; 2321 } 2322 2323 if (get_user_u32(val, optval_addr)) { 2324 return -TARGET_EFAULT; 2325 } 2326 ret = get_errno(setsockopt(sockfd, level, optname, 2327 &val, sizeof(val))); 2328 break; 2329 2330 default: 2331 goto unimplemented; 2332 } 2333 break; 2334 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE) 2335 case SOL_ALG: 2336 switch (optname) { 2337 case ALG_SET_KEY: 2338 { 2339 char *alg_key = g_malloc(optlen); 2340 2341 if (!alg_key) { 2342 return -TARGET_ENOMEM; 2343 } 2344 if (copy_from_user(alg_key, optval_addr, optlen)) { 2345 g_free(alg_key); 2346 return -TARGET_EFAULT; 2347 } 2348 ret = get_errno(setsockopt(sockfd, level, optname, 2349 alg_key, optlen)); 2350 g_free(alg_key); 2351 break; 2352 } 2353 case ALG_SET_AEAD_AUTHSIZE: 2354 { 2355 ret = get_errno(setsockopt(sockfd, level, optname, 2356 NULL, optlen)); 2357 break; 2358 } 2359 default: 2360 goto unimplemented; 2361 } 2362 break; 2363 #endif 2364 case TARGET_SOL_SOCKET: 2365 switch (optname) { 2366 case TARGET_SO_RCVTIMEO: 2367 { 2368 struct timeval tv; 2369 2370 optname = SO_RCVTIMEO; 2371 2372 set_timeout: 2373 if (optlen != sizeof(struct target_timeval)) { 2374 return -TARGET_EINVAL; 2375 } 2376 2377 if (copy_from_user_timeval(&tv, optval_addr)) { 2378 return -TARGET_EFAULT; 2379 } 2380 2381 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, 2382 &tv, sizeof(tv))); 2383 return ret; 2384 } 2385 case TARGET_SO_SNDTIMEO: 2386 optname = SO_SNDTIMEO; 2387 goto set_timeout; 2388 case TARGET_SO_ATTACH_FILTER: 2389 { 2390 struct target_sock_fprog *tfprog; 2391 struct target_sock_filter *tfilter; 2392 struct sock_fprog fprog; 2393 struct sock_filter *filter; 2394 int i; 2395 2396 if (optlen != sizeof(*tfprog)) { 2397 return -TARGET_EINVAL; 2398 } 2399 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) { 2400 return -TARGET_EFAULT; 2401 } 2402 if (!lock_user_struct(VERIFY_READ, tfilter, 2403 tswapal(tfprog->filter), 0)) { 2404 unlock_user_struct(tfprog, optval_addr, 1); 2405 return -TARGET_EFAULT; 2406 } 2407 2408 fprog.len = tswap16(tfprog->len); 2409 filter = g_try_new(struct sock_filter, fprog.len); 2410 if (filter == NULL) { 2411 unlock_user_struct(tfilter, tfprog->filter, 1); 2412 unlock_user_struct(tfprog, optval_addr, 1); 2413 return -TARGET_ENOMEM; 2414 } 2415 for (i = 0; i < fprog.len; i++) { 2416 filter[i].code = tswap16(tfilter[i].code); 2417 filter[i].jt = tfilter[i].jt; 2418 filter[i].jf = tfilter[i].jf; 2419 filter[i].k = tswap32(tfilter[i].k); 2420 } 2421 fprog.filter = filter; 2422 2423 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, 2424 SO_ATTACH_FILTER, &fprog, sizeof(fprog))); 2425 g_free(filter); 2426 2427 unlock_user_struct(tfilter, tfprog->filter, 1); 2428 unlock_user_struct(tfprog, optval_addr, 1); 2429 return ret; 2430 } 2431 case TARGET_SO_BINDTODEVICE: 2432 { 2433 char *dev_ifname, *addr_ifname; 2434 2435 if (optlen > IFNAMSIZ - 1) { 2436 optlen = IFNAMSIZ - 1; 2437 } 2438 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1); 2439 if (!dev_ifname) { 2440 return -TARGET_EFAULT; 2441 } 2442 optname = SO_BINDTODEVICE; 2443 addr_ifname = alloca(IFNAMSIZ); 2444 memcpy(addr_ifname, dev_ifname, optlen); 2445 addr_ifname[optlen] = 0; 2446 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, 2447 addr_ifname, optlen)); 2448 unlock_user (dev_ifname, optval_addr, 0); 2449 return ret; 2450 } 2451 case TARGET_SO_LINGER: 2452 { 2453 struct linger lg; 2454 struct target_linger *tlg; 2455 2456 if (optlen != sizeof(struct target_linger)) { 2457 return -TARGET_EINVAL; 2458 } 2459 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) { 2460 return -TARGET_EFAULT; 2461 } 2462 __get_user(lg.l_onoff, &tlg->l_onoff); 2463 __get_user(lg.l_linger, &tlg->l_linger); 2464 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER, 2465 &lg, sizeof(lg))); 2466 unlock_user_struct(tlg, optval_addr, 0); 2467 return ret; 2468 } 2469 /* Options with 'int' argument. */ 2470 case TARGET_SO_DEBUG: 2471 optname = SO_DEBUG; 2472 break; 2473 case TARGET_SO_REUSEADDR: 2474 optname = SO_REUSEADDR; 2475 break; 2476 #ifdef SO_REUSEPORT 2477 case TARGET_SO_REUSEPORT: 2478 optname = SO_REUSEPORT; 2479 break; 2480 #endif 2481 case TARGET_SO_TYPE: 2482 optname = SO_TYPE; 2483 break; 2484 case TARGET_SO_ERROR: 2485 optname = SO_ERROR; 2486 break; 2487 case TARGET_SO_DONTROUTE: 2488 optname = SO_DONTROUTE; 2489 break; 2490 case TARGET_SO_BROADCAST: 2491 optname = SO_BROADCAST; 2492 break; 2493 case TARGET_SO_SNDBUF: 2494 optname = SO_SNDBUF; 2495 break; 2496 case TARGET_SO_SNDBUFFORCE: 2497 optname = SO_SNDBUFFORCE; 2498 break; 2499 case TARGET_SO_RCVBUF: 2500 optname = SO_RCVBUF; 2501 break; 2502 case TARGET_SO_RCVBUFFORCE: 2503 optname = SO_RCVBUFFORCE; 2504 break; 2505 case TARGET_SO_KEEPALIVE: 2506 optname = SO_KEEPALIVE; 2507 break; 2508 case TARGET_SO_OOBINLINE: 2509 optname = SO_OOBINLINE; 2510 break; 2511 case TARGET_SO_NO_CHECK: 2512 optname = SO_NO_CHECK; 2513 break; 2514 case TARGET_SO_PRIORITY: 2515 optname = SO_PRIORITY; 2516 break; 2517 #ifdef SO_BSDCOMPAT 2518 case TARGET_SO_BSDCOMPAT: 2519 optname = SO_BSDCOMPAT; 2520 break; 2521 #endif 2522 case TARGET_SO_PASSCRED: 2523 optname = SO_PASSCRED; 2524 break; 2525 case TARGET_SO_PASSSEC: 2526 optname = SO_PASSSEC; 2527 break; 2528 case TARGET_SO_TIMESTAMP: 2529 optname = SO_TIMESTAMP; 2530 break; 2531 case TARGET_SO_RCVLOWAT: 2532 optname = SO_RCVLOWAT; 2533 break; 2534 default: 2535 goto unimplemented; 2536 } 2537 if (optlen < sizeof(uint32_t)) 2538 return -TARGET_EINVAL; 2539 2540 if (get_user_u32(val, optval_addr)) 2541 return -TARGET_EFAULT; 2542 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val))); 2543 break; 2544 #ifdef SOL_NETLINK 2545 case SOL_NETLINK: 2546 switch (optname) { 2547 case NETLINK_PKTINFO: 2548 case NETLINK_ADD_MEMBERSHIP: 2549 case NETLINK_DROP_MEMBERSHIP: 2550 case NETLINK_BROADCAST_ERROR: 2551 case NETLINK_NO_ENOBUFS: 2552 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) 2553 case NETLINK_LISTEN_ALL_NSID: 2554 case NETLINK_CAP_ACK: 2555 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */ 2556 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) 2557 case NETLINK_EXT_ACK: 2558 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */ 2559 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0) 2560 case NETLINK_GET_STRICT_CHK: 2561 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */ 2562 break; 2563 default: 2564 goto unimplemented; 2565 } 2566 val = 0; 2567 if (optlen < sizeof(uint32_t)) { 2568 return -TARGET_EINVAL; 2569 } 2570 if (get_user_u32(val, optval_addr)) { 2571 return -TARGET_EFAULT; 2572 } 2573 ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val, 2574 sizeof(val))); 2575 break; 2576 #endif /* SOL_NETLINK */ 2577 default: 2578 unimplemented: 2579 qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n", 2580 level, optname); 2581 ret = -TARGET_ENOPROTOOPT; 2582 } 2583 return ret; 2584 } 2585 2586 /* do_getsockopt() Must return target values and target errnos. */ 2587 static abi_long do_getsockopt(int sockfd, int level, int optname, 2588 abi_ulong optval_addr, abi_ulong optlen) 2589 { 2590 abi_long ret; 2591 int len, val; 2592 socklen_t lv; 2593 2594 switch(level) { 2595 case TARGET_SOL_SOCKET: 2596 level = SOL_SOCKET; 2597 switch (optname) { 2598 /* These don't just return a single integer */ 2599 case TARGET_SO_PEERNAME: 2600 goto unimplemented; 2601 case TARGET_SO_RCVTIMEO: { 2602 struct timeval tv; 2603 socklen_t tvlen; 2604 2605 optname = SO_RCVTIMEO; 2606 2607 get_timeout: 2608 if (get_user_u32(len, optlen)) { 2609 return -TARGET_EFAULT; 2610 } 2611 if (len < 0) { 2612 return -TARGET_EINVAL; 2613 } 2614 2615 tvlen = sizeof(tv); 2616 ret = get_errno(getsockopt(sockfd, level, optname, 2617 &tv, &tvlen)); 2618 if (ret < 0) { 2619 return ret; 2620 } 2621 if (len > sizeof(struct target_timeval)) { 2622 len = sizeof(struct target_timeval); 2623 } 2624 if (copy_to_user_timeval(optval_addr, &tv)) { 2625 return -TARGET_EFAULT; 2626 } 2627 if (put_user_u32(len, optlen)) { 2628 return -TARGET_EFAULT; 2629 } 2630 break; 2631 } 2632 case TARGET_SO_SNDTIMEO: 2633 optname = SO_SNDTIMEO; 2634 goto get_timeout; 2635 case TARGET_SO_PEERCRED: { 2636 struct ucred cr; 2637 socklen_t crlen; 2638 struct target_ucred *tcr; 2639 2640 if (get_user_u32(len, optlen)) { 2641 return -TARGET_EFAULT; 2642 } 2643 if (len < 0) { 2644 return -TARGET_EINVAL; 2645 } 2646 2647 crlen = sizeof(cr); 2648 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED, 2649 &cr, &crlen)); 2650 if (ret < 0) { 2651 return ret; 2652 } 2653 if (len > crlen) { 2654 len = crlen; 2655 } 2656 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) { 2657 return -TARGET_EFAULT; 2658 } 2659 __put_user(cr.pid, &tcr->pid); 2660 __put_user(cr.uid, &tcr->uid); 2661 __put_user(cr.gid, &tcr->gid); 2662 unlock_user_struct(tcr, optval_addr, 1); 2663 if (put_user_u32(len, optlen)) { 2664 return -TARGET_EFAULT; 2665 } 2666 break; 2667 } 2668 case TARGET_SO_PEERSEC: { 2669 char *name; 2670 2671 if (get_user_u32(len, optlen)) { 2672 return -TARGET_EFAULT; 2673 } 2674 if (len < 0) { 2675 return -TARGET_EINVAL; 2676 } 2677 name = lock_user(VERIFY_WRITE, optval_addr, len, 0); 2678 if (!name) { 2679 return -TARGET_EFAULT; 2680 } 2681 lv = len; 2682 ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC, 2683 name, &lv)); 2684 if (put_user_u32(lv, optlen)) { 2685 ret = -TARGET_EFAULT; 2686 } 2687 unlock_user(name, optval_addr, lv); 2688 break; 2689 } 2690 case TARGET_SO_LINGER: 2691 { 2692 struct linger lg; 2693 socklen_t lglen; 2694 struct target_linger *tlg; 2695 2696 if (get_user_u32(len, optlen)) { 2697 return -TARGET_EFAULT; 2698 } 2699 if (len < 0) { 2700 return -TARGET_EINVAL; 2701 } 2702 2703 lglen = sizeof(lg); 2704 ret = get_errno(getsockopt(sockfd, level, SO_LINGER, 2705 &lg, &lglen)); 2706 if (ret < 0) { 2707 return ret; 2708 } 2709 if (len > lglen) { 2710 len = lglen; 2711 } 2712 if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) { 2713 return -TARGET_EFAULT; 2714 } 2715 __put_user(lg.l_onoff, &tlg->l_onoff); 2716 __put_user(lg.l_linger, &tlg->l_linger); 2717 unlock_user_struct(tlg, optval_addr, 1); 2718 if (put_user_u32(len, optlen)) { 2719 return -TARGET_EFAULT; 2720 } 2721 break; 2722 } 2723 /* Options with 'int' argument. */ 2724 case TARGET_SO_DEBUG: 2725 optname = SO_DEBUG; 2726 goto int_case; 2727 case TARGET_SO_REUSEADDR: 2728 optname = SO_REUSEADDR; 2729 goto int_case; 2730 #ifdef SO_REUSEPORT 2731 case TARGET_SO_REUSEPORT: 2732 optname = SO_REUSEPORT; 2733 goto int_case; 2734 #endif 2735 case TARGET_SO_TYPE: 2736 optname = SO_TYPE; 2737 goto int_case; 2738 case TARGET_SO_ERROR: 2739 optname = SO_ERROR; 2740 goto int_case; 2741 case TARGET_SO_DONTROUTE: 2742 optname = SO_DONTROUTE; 2743 goto int_case; 2744 case TARGET_SO_BROADCAST: 2745 optname = SO_BROADCAST; 2746 goto int_case; 2747 case TARGET_SO_SNDBUF: 2748 optname = SO_SNDBUF; 2749 goto int_case; 2750 case TARGET_SO_RCVBUF: 2751 optname = SO_RCVBUF; 2752 goto int_case; 2753 case TARGET_SO_KEEPALIVE: 2754 optname = SO_KEEPALIVE; 2755 goto int_case; 2756 case TARGET_SO_OOBINLINE: 2757 optname = SO_OOBINLINE; 2758 goto int_case; 2759 case TARGET_SO_NO_CHECK: 2760 optname = SO_NO_CHECK; 2761 goto int_case; 2762 case TARGET_SO_PRIORITY: 2763 optname = SO_PRIORITY; 2764 goto int_case; 2765 #ifdef SO_BSDCOMPAT 2766 case TARGET_SO_BSDCOMPAT: 2767 optname = SO_BSDCOMPAT; 2768 goto int_case; 2769 #endif 2770 case TARGET_SO_PASSCRED: 2771 optname = SO_PASSCRED; 2772 goto int_case; 2773 case TARGET_SO_TIMESTAMP: 2774 optname = SO_TIMESTAMP; 2775 goto int_case; 2776 case TARGET_SO_RCVLOWAT: 2777 optname = SO_RCVLOWAT; 2778 goto int_case; 2779 case TARGET_SO_ACCEPTCONN: 2780 optname = SO_ACCEPTCONN; 2781 goto int_case; 2782 case TARGET_SO_PROTOCOL: 2783 optname = SO_PROTOCOL; 2784 goto int_case; 2785 case TARGET_SO_DOMAIN: 2786 optname = SO_DOMAIN; 2787 goto int_case; 2788 default: 2789 goto int_case; 2790 } 2791 break; 2792 case SOL_TCP: 2793 case SOL_UDP: 2794 /* TCP and UDP options all take an 'int' value. */ 2795 int_case: 2796 if (get_user_u32(len, optlen)) 2797 return -TARGET_EFAULT; 2798 if (len < 0) 2799 return -TARGET_EINVAL; 2800 lv = sizeof(lv); 2801 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 2802 if (ret < 0) 2803 return ret; 2804 if (optname == SO_TYPE) { 2805 val = host_to_target_sock_type(val); 2806 } 2807 if (len > lv) 2808 len = lv; 2809 if (len == 4) { 2810 if (put_user_u32(val, optval_addr)) 2811 return -TARGET_EFAULT; 2812 } else { 2813 if (put_user_u8(val, optval_addr)) 2814 return -TARGET_EFAULT; 2815 } 2816 if (put_user_u32(len, optlen)) 2817 return -TARGET_EFAULT; 2818 break; 2819 case SOL_IP: 2820 switch(optname) { 2821 case IP_TOS: 2822 case IP_TTL: 2823 case IP_HDRINCL: 2824 case IP_ROUTER_ALERT: 2825 case IP_RECVOPTS: 2826 case IP_RETOPTS: 2827 case IP_PKTINFO: 2828 case IP_MTU_DISCOVER: 2829 case IP_RECVERR: 2830 case IP_RECVTOS: 2831 #ifdef IP_FREEBIND 2832 case IP_FREEBIND: 2833 #endif 2834 case IP_MULTICAST_TTL: 2835 case IP_MULTICAST_LOOP: 2836 if (get_user_u32(len, optlen)) 2837 return -TARGET_EFAULT; 2838 if (len < 0) 2839 return -TARGET_EINVAL; 2840 lv = sizeof(lv); 2841 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 2842 if (ret < 0) 2843 return ret; 2844 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) { 2845 len = 1; 2846 if (put_user_u32(len, optlen) 2847 || put_user_u8(val, optval_addr)) 2848 return -TARGET_EFAULT; 2849 } else { 2850 if (len > sizeof(int)) 2851 len = sizeof(int); 2852 if (put_user_u32(len, optlen) 2853 || put_user_u32(val, optval_addr)) 2854 return -TARGET_EFAULT; 2855 } 2856 break; 2857 default: 2858 ret = -TARGET_ENOPROTOOPT; 2859 break; 2860 } 2861 break; 2862 case SOL_IPV6: 2863 switch (optname) { 2864 case IPV6_MTU_DISCOVER: 2865 case IPV6_MTU: 2866 case IPV6_V6ONLY: 2867 case IPV6_RECVPKTINFO: 2868 case IPV6_UNICAST_HOPS: 2869 case IPV6_MULTICAST_HOPS: 2870 case IPV6_MULTICAST_LOOP: 2871 case IPV6_RECVERR: 2872 case IPV6_RECVHOPLIMIT: 2873 case IPV6_2292HOPLIMIT: 2874 case IPV6_CHECKSUM: 2875 case IPV6_ADDRFORM: 2876 case IPV6_2292PKTINFO: 2877 case IPV6_RECVTCLASS: 2878 case IPV6_RECVRTHDR: 2879 case IPV6_2292RTHDR: 2880 case IPV6_RECVHOPOPTS: 2881 case IPV6_2292HOPOPTS: 2882 case IPV6_RECVDSTOPTS: 2883 case IPV6_2292DSTOPTS: 2884 case IPV6_TCLASS: 2885 case IPV6_ADDR_PREFERENCES: 2886 #ifdef IPV6_RECVPATHMTU 2887 case IPV6_RECVPATHMTU: 2888 #endif 2889 #ifdef IPV6_TRANSPARENT 2890 case IPV6_TRANSPARENT: 2891 #endif 2892 #ifdef IPV6_FREEBIND 2893 case IPV6_FREEBIND: 2894 #endif 2895 #ifdef IPV6_RECVORIGDSTADDR 2896 case IPV6_RECVORIGDSTADDR: 2897 #endif 2898 if (get_user_u32(len, optlen)) 2899 return -TARGET_EFAULT; 2900 if (len < 0) 2901 return -TARGET_EINVAL; 2902 lv = sizeof(lv); 2903 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 2904 if (ret < 0) 2905 return ret; 2906 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) { 2907 len = 1; 2908 if (put_user_u32(len, optlen) 2909 || put_user_u8(val, optval_addr)) 2910 return -TARGET_EFAULT; 2911 } else { 2912 if (len > sizeof(int)) 2913 len = sizeof(int); 2914 if (put_user_u32(len, optlen) 2915 || put_user_u32(val, optval_addr)) 2916 return -TARGET_EFAULT; 2917 } 2918 break; 2919 default: 2920 ret = -TARGET_ENOPROTOOPT; 2921 break; 2922 } 2923 break; 2924 #ifdef SOL_NETLINK 2925 case SOL_NETLINK: 2926 switch (optname) { 2927 case NETLINK_PKTINFO: 2928 case NETLINK_BROADCAST_ERROR: 2929 case NETLINK_NO_ENOBUFS: 2930 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) 2931 case NETLINK_LISTEN_ALL_NSID: 2932 case NETLINK_CAP_ACK: 2933 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */ 2934 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) 2935 case NETLINK_EXT_ACK: 2936 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */ 2937 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0) 2938 case NETLINK_GET_STRICT_CHK: 2939 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */ 2940 if (get_user_u32(len, optlen)) { 2941 return -TARGET_EFAULT; 2942 } 2943 if (len != sizeof(val)) { 2944 return -TARGET_EINVAL; 2945 } 2946 lv = len; 2947 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 2948 if (ret < 0) { 2949 return ret; 2950 } 2951 if (put_user_u32(lv, optlen) 2952 || put_user_u32(val, optval_addr)) { 2953 return -TARGET_EFAULT; 2954 } 2955 break; 2956 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) 2957 case NETLINK_LIST_MEMBERSHIPS: 2958 { 2959 uint32_t *results; 2960 int i; 2961 if (get_user_u32(len, optlen)) { 2962 return -TARGET_EFAULT; 2963 } 2964 if (len < 0) { 2965 return -TARGET_EINVAL; 2966 } 2967 results = lock_user(VERIFY_WRITE, optval_addr, len, 1); 2968 if (!results && len > 0) { 2969 return -TARGET_EFAULT; 2970 } 2971 lv = len; 2972 ret = get_errno(getsockopt(sockfd, level, optname, results, &lv)); 2973 if (ret < 0) { 2974 unlock_user(results, optval_addr, 0); 2975 return ret; 2976 } 2977 /* swap host endianess to target endianess. */ 2978 for (i = 0; i < (len / sizeof(uint32_t)); i++) { 2979 results[i] = tswap32(results[i]); 2980 } 2981 if (put_user_u32(lv, optlen)) { 2982 return -TARGET_EFAULT; 2983 } 2984 unlock_user(results, optval_addr, 0); 2985 break; 2986 } 2987 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */ 2988 default: 2989 goto unimplemented; 2990 } 2991 break; 2992 #endif /* SOL_NETLINK */ 2993 default: 2994 unimplemented: 2995 qemu_log_mask(LOG_UNIMP, 2996 "getsockopt level=%d optname=%d not yet supported\n", 2997 level, optname); 2998 ret = -TARGET_EOPNOTSUPP; 2999 break; 3000 } 3001 return ret; 3002 } 3003 3004 /* Convert target low/high pair representing file offset into the host 3005 * low/high pair. This function doesn't handle offsets bigger than 64 bits 3006 * as the kernel doesn't handle them either. 3007 */ 3008 static void target_to_host_low_high(abi_ulong tlow, 3009 abi_ulong thigh, 3010 unsigned long *hlow, 3011 unsigned long *hhigh) 3012 { 3013 uint64_t off = tlow | 3014 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) << 3015 TARGET_LONG_BITS / 2; 3016 3017 *hlow = off; 3018 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2; 3019 } 3020 3021 static struct iovec *lock_iovec(int type, abi_ulong target_addr, 3022 abi_ulong count, int copy) 3023 { 3024 struct target_iovec *target_vec; 3025 struct iovec *vec; 3026 abi_ulong total_len, max_len; 3027 int i; 3028 int err = 0; 3029 bool bad_address = false; 3030 3031 if (count == 0) { 3032 errno = 0; 3033 return NULL; 3034 } 3035 if (count > IOV_MAX) { 3036 errno = EINVAL; 3037 return NULL; 3038 } 3039 3040 vec = g_try_new0(struct iovec, count); 3041 if (vec == NULL) { 3042 errno = ENOMEM; 3043 return NULL; 3044 } 3045 3046 target_vec = lock_user(VERIFY_READ, target_addr, 3047 count * sizeof(struct target_iovec), 1); 3048 if (target_vec == NULL) { 3049 err = EFAULT; 3050 goto fail2; 3051 } 3052 3053 /* ??? If host page size > target page size, this will result in a 3054 value larger than what we can actually support. */ 3055 max_len = 0x7fffffff & TARGET_PAGE_MASK; 3056 total_len = 0; 3057 3058 for (i = 0; i < count; i++) { 3059 abi_ulong base = tswapal(target_vec[i].iov_base); 3060 abi_long len = tswapal(target_vec[i].iov_len); 3061 3062 if (len < 0) { 3063 err = EINVAL; 3064 goto fail; 3065 } else if (len == 0) { 3066 /* Zero length pointer is ignored. */ 3067 vec[i].iov_base = 0; 3068 } else { 3069 vec[i].iov_base = lock_user(type, base, len, copy); 3070 /* If the first buffer pointer is bad, this is a fault. But 3071 * subsequent bad buffers will result in a partial write; this 3072 * is realized by filling the vector with null pointers and 3073 * zero lengths. */ 3074 if (!vec[i].iov_base) { 3075 if (i == 0) { 3076 err = EFAULT; 3077 goto fail; 3078 } else { 3079 bad_address = true; 3080 } 3081 } 3082 if (bad_address) { 3083 len = 0; 3084 } 3085 if (len > max_len - total_len) { 3086 len = max_len - total_len; 3087 } 3088 } 3089 vec[i].iov_len = len; 3090 total_len += len; 3091 } 3092 3093 unlock_user(target_vec, target_addr, 0); 3094 return vec; 3095 3096 fail: 3097 while (--i >= 0) { 3098 if (tswapal(target_vec[i].iov_len) > 0) { 3099 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0); 3100 } 3101 } 3102 unlock_user(target_vec, target_addr, 0); 3103 fail2: 3104 g_free(vec); 3105 errno = err; 3106 return NULL; 3107 } 3108 3109 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr, 3110 abi_ulong count, int copy) 3111 { 3112 struct target_iovec *target_vec; 3113 int i; 3114 3115 target_vec = lock_user(VERIFY_READ, target_addr, 3116 count * sizeof(struct target_iovec), 1); 3117 if (target_vec) { 3118 for (i = 0; i < count; i++) { 3119 abi_ulong base = tswapal(target_vec[i].iov_base); 3120 abi_long len = tswapal(target_vec[i].iov_len); 3121 if (len < 0) { 3122 break; 3123 } 3124 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0); 3125 } 3126 unlock_user(target_vec, target_addr, 0); 3127 } 3128 3129 g_free(vec); 3130 } 3131 3132 static inline int target_to_host_sock_type(int *type) 3133 { 3134 int host_type = 0; 3135 int target_type = *type; 3136 3137 switch (target_type & TARGET_SOCK_TYPE_MASK) { 3138 case TARGET_SOCK_DGRAM: 3139 host_type = SOCK_DGRAM; 3140 break; 3141 case TARGET_SOCK_STREAM: 3142 host_type = SOCK_STREAM; 3143 break; 3144 default: 3145 host_type = target_type & TARGET_SOCK_TYPE_MASK; 3146 break; 3147 } 3148 if (target_type & TARGET_SOCK_CLOEXEC) { 3149 #if defined(SOCK_CLOEXEC) 3150 host_type |= SOCK_CLOEXEC; 3151 #else 3152 return -TARGET_EINVAL; 3153 #endif 3154 } 3155 if (target_type & TARGET_SOCK_NONBLOCK) { 3156 #if defined(SOCK_NONBLOCK) 3157 host_type |= SOCK_NONBLOCK; 3158 #elif !defined(O_NONBLOCK) 3159 return -TARGET_EINVAL; 3160 #endif 3161 } 3162 *type = host_type; 3163 return 0; 3164 } 3165 3166 /* Try to emulate socket type flags after socket creation. */ 3167 static int sock_flags_fixup(int fd, int target_type) 3168 { 3169 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK) 3170 if (target_type & TARGET_SOCK_NONBLOCK) { 3171 int flags = fcntl(fd, F_GETFL); 3172 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) { 3173 close(fd); 3174 return -TARGET_EINVAL; 3175 } 3176 } 3177 #endif 3178 return fd; 3179 } 3180 3181 /* do_socket() Must return target values and target errnos. */ 3182 static abi_long do_socket(int domain, int type, int protocol) 3183 { 3184 int target_type = type; 3185 int ret; 3186 3187 ret = target_to_host_sock_type(&type); 3188 if (ret) { 3189 return ret; 3190 } 3191 3192 if (domain == PF_NETLINK && !( 3193 #ifdef CONFIG_RTNETLINK 3194 protocol == NETLINK_ROUTE || 3195 #endif 3196 protocol == NETLINK_KOBJECT_UEVENT || 3197 protocol == NETLINK_AUDIT)) { 3198 return -TARGET_EPROTONOSUPPORT; 3199 } 3200 3201 if (domain == AF_PACKET || 3202 (domain == AF_INET && type == SOCK_PACKET)) { 3203 protocol = tswap16(protocol); 3204 } 3205 3206 ret = get_errno(socket(domain, type, protocol)); 3207 if (ret >= 0) { 3208 ret = sock_flags_fixup(ret, target_type); 3209 if (type == SOCK_PACKET) { 3210 /* Manage an obsolete case : 3211 * if socket type is SOCK_PACKET, bind by name 3212 */ 3213 fd_trans_register(ret, &target_packet_trans); 3214 } else if (domain == PF_NETLINK) { 3215 switch (protocol) { 3216 #ifdef CONFIG_RTNETLINK 3217 case NETLINK_ROUTE: 3218 fd_trans_register(ret, &target_netlink_route_trans); 3219 break; 3220 #endif 3221 case NETLINK_KOBJECT_UEVENT: 3222 /* nothing to do: messages are strings */ 3223 break; 3224 case NETLINK_AUDIT: 3225 fd_trans_register(ret, &target_netlink_audit_trans); 3226 break; 3227 default: 3228 g_assert_not_reached(); 3229 } 3230 } 3231 } 3232 return ret; 3233 } 3234 3235 /* do_bind() Must return target values and target errnos. */ 3236 static abi_long do_bind(int sockfd, abi_ulong target_addr, 3237 socklen_t addrlen) 3238 { 3239 void *addr; 3240 abi_long ret; 3241 3242 if ((int)addrlen < 0) { 3243 return -TARGET_EINVAL; 3244 } 3245 3246 addr = alloca(addrlen+1); 3247 3248 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen); 3249 if (ret) 3250 return ret; 3251 3252 return get_errno(bind(sockfd, addr, addrlen)); 3253 } 3254 3255 /* do_connect() Must return target values and target errnos. */ 3256 static abi_long do_connect(int sockfd, abi_ulong target_addr, 3257 socklen_t addrlen) 3258 { 3259 void *addr; 3260 abi_long ret; 3261 3262 if ((int)addrlen < 0) { 3263 return -TARGET_EINVAL; 3264 } 3265 3266 addr = alloca(addrlen+1); 3267 3268 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen); 3269 if (ret) 3270 return ret; 3271 3272 return get_errno(safe_connect(sockfd, addr, addrlen)); 3273 } 3274 3275 /* do_sendrecvmsg_locked() Must return target values and target errnos. */ 3276 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp, 3277 int flags, int send) 3278 { 3279 abi_long ret, len; 3280 struct msghdr msg; 3281 abi_ulong count; 3282 struct iovec *vec; 3283 abi_ulong target_vec; 3284 3285 if (msgp->msg_name) { 3286 msg.msg_namelen = tswap32(msgp->msg_namelen); 3287 msg.msg_name = alloca(msg.msg_namelen+1); 3288 ret = target_to_host_sockaddr(fd, msg.msg_name, 3289 tswapal(msgp->msg_name), 3290 msg.msg_namelen); 3291 if (ret == -TARGET_EFAULT) { 3292 /* For connected sockets msg_name and msg_namelen must 3293 * be ignored, so returning EFAULT immediately is wrong. 3294 * Instead, pass a bad msg_name to the host kernel, and 3295 * let it decide whether to return EFAULT or not. 3296 */ 3297 msg.msg_name = (void *)-1; 3298 } else if (ret) { 3299 goto out2; 3300 } 3301 } else { 3302 msg.msg_name = NULL; 3303 msg.msg_namelen = 0; 3304 } 3305 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen); 3306 msg.msg_control = alloca(msg.msg_controllen); 3307 memset(msg.msg_control, 0, msg.msg_controllen); 3308 3309 msg.msg_flags = tswap32(msgp->msg_flags); 3310 3311 count = tswapal(msgp->msg_iovlen); 3312 target_vec = tswapal(msgp->msg_iov); 3313 3314 if (count > IOV_MAX) { 3315 /* sendrcvmsg returns a different errno for this condition than 3316 * readv/writev, so we must catch it here before lock_iovec() does. 3317 */ 3318 ret = -TARGET_EMSGSIZE; 3319 goto out2; 3320 } 3321 3322 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, 3323 target_vec, count, send); 3324 if (vec == NULL) { 3325 ret = -host_to_target_errno(errno); 3326 goto out2; 3327 } 3328 msg.msg_iovlen = count; 3329 msg.msg_iov = vec; 3330 3331 if (send) { 3332 if (fd_trans_target_to_host_data(fd)) { 3333 void *host_msg; 3334 3335 host_msg = g_malloc(msg.msg_iov->iov_len); 3336 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len); 3337 ret = fd_trans_target_to_host_data(fd)(host_msg, 3338 msg.msg_iov->iov_len); 3339 if (ret >= 0) { 3340 msg.msg_iov->iov_base = host_msg; 3341 ret = get_errno(safe_sendmsg(fd, &msg, flags)); 3342 } 3343 g_free(host_msg); 3344 } else { 3345 ret = target_to_host_cmsg(&msg, msgp); 3346 if (ret == 0) { 3347 ret = get_errno(safe_sendmsg(fd, &msg, flags)); 3348 } 3349 } 3350 } else { 3351 ret = get_errno(safe_recvmsg(fd, &msg, flags)); 3352 if (!is_error(ret)) { 3353 len = ret; 3354 if (fd_trans_host_to_target_data(fd)) { 3355 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base, 3356 MIN(msg.msg_iov->iov_len, len)); 3357 } 3358 if (!is_error(ret)) { 3359 ret = host_to_target_cmsg(msgp, &msg); 3360 } 3361 if (!is_error(ret)) { 3362 msgp->msg_namelen = tswap32(msg.msg_namelen); 3363 msgp->msg_flags = tswap32(msg.msg_flags); 3364 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) { 3365 ret = host_to_target_sockaddr(tswapal(msgp->msg_name), 3366 msg.msg_name, msg.msg_namelen); 3367 if (ret) { 3368 goto out; 3369 } 3370 } 3371 3372 ret = len; 3373 } 3374 } 3375 } 3376 3377 out: 3378 unlock_iovec(vec, target_vec, count, !send); 3379 out2: 3380 return ret; 3381 } 3382 3383 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg, 3384 int flags, int send) 3385 { 3386 abi_long ret; 3387 struct target_msghdr *msgp; 3388 3389 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE, 3390 msgp, 3391 target_msg, 3392 send ? 1 : 0)) { 3393 return -TARGET_EFAULT; 3394 } 3395 ret = do_sendrecvmsg_locked(fd, msgp, flags, send); 3396 unlock_user_struct(msgp, target_msg, send ? 0 : 1); 3397 return ret; 3398 } 3399 3400 /* We don't rely on the C library to have sendmmsg/recvmmsg support, 3401 * so it might not have this *mmsg-specific flag either. 3402 */ 3403 #ifndef MSG_WAITFORONE 3404 #define MSG_WAITFORONE 0x10000 3405 #endif 3406 3407 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec, 3408 unsigned int vlen, unsigned int flags, 3409 int send) 3410 { 3411 struct target_mmsghdr *mmsgp; 3412 abi_long ret = 0; 3413 int i; 3414 3415 if (vlen > UIO_MAXIOV) { 3416 vlen = UIO_MAXIOV; 3417 } 3418 3419 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1); 3420 if (!mmsgp) { 3421 return -TARGET_EFAULT; 3422 } 3423 3424 for (i = 0; i < vlen; i++) { 3425 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send); 3426 if (is_error(ret)) { 3427 break; 3428 } 3429 mmsgp[i].msg_len = tswap32(ret); 3430 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */ 3431 if (flags & MSG_WAITFORONE) { 3432 flags |= MSG_DONTWAIT; 3433 } 3434 } 3435 3436 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i); 3437 3438 /* Return number of datagrams sent if we sent any at all; 3439 * otherwise return the error. 3440 */ 3441 if (i) { 3442 return i; 3443 } 3444 return ret; 3445 } 3446 3447 /* do_accept4() Must return target values and target errnos. */ 3448 static abi_long do_accept4(int fd, abi_ulong target_addr, 3449 abi_ulong target_addrlen_addr, int flags) 3450 { 3451 socklen_t addrlen, ret_addrlen; 3452 void *addr; 3453 abi_long ret; 3454 int host_flags; 3455 3456 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl); 3457 3458 if (target_addr == 0) { 3459 return get_errno(safe_accept4(fd, NULL, NULL, host_flags)); 3460 } 3461 3462 /* linux returns EFAULT if addrlen pointer is invalid */ 3463 if (get_user_u32(addrlen, target_addrlen_addr)) 3464 return -TARGET_EFAULT; 3465 3466 if ((int)addrlen < 0) { 3467 return -TARGET_EINVAL; 3468 } 3469 3470 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) { 3471 return -TARGET_EFAULT; 3472 } 3473 3474 addr = alloca(addrlen); 3475 3476 ret_addrlen = addrlen; 3477 ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags)); 3478 if (!is_error(ret)) { 3479 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen)); 3480 if (put_user_u32(ret_addrlen, target_addrlen_addr)) { 3481 ret = -TARGET_EFAULT; 3482 } 3483 } 3484 return ret; 3485 } 3486 3487 /* do_getpeername() Must return target values and target errnos. */ 3488 static abi_long do_getpeername(int fd, abi_ulong target_addr, 3489 abi_ulong target_addrlen_addr) 3490 { 3491 socklen_t addrlen, ret_addrlen; 3492 void *addr; 3493 abi_long ret; 3494 3495 if (get_user_u32(addrlen, target_addrlen_addr)) 3496 return -TARGET_EFAULT; 3497 3498 if ((int)addrlen < 0) { 3499 return -TARGET_EINVAL; 3500 } 3501 3502 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) { 3503 return -TARGET_EFAULT; 3504 } 3505 3506 addr = alloca(addrlen); 3507 3508 ret_addrlen = addrlen; 3509 ret = get_errno(getpeername(fd, addr, &ret_addrlen)); 3510 if (!is_error(ret)) { 3511 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen)); 3512 if (put_user_u32(ret_addrlen, target_addrlen_addr)) { 3513 ret = -TARGET_EFAULT; 3514 } 3515 } 3516 return ret; 3517 } 3518 3519 /* do_getsockname() Must return target values and target errnos. */ 3520 static abi_long do_getsockname(int fd, abi_ulong target_addr, 3521 abi_ulong target_addrlen_addr) 3522 { 3523 socklen_t addrlen, ret_addrlen; 3524 void *addr; 3525 abi_long ret; 3526 3527 if (get_user_u32(addrlen, target_addrlen_addr)) 3528 return -TARGET_EFAULT; 3529 3530 if ((int)addrlen < 0) { 3531 return -TARGET_EINVAL; 3532 } 3533 3534 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) { 3535 return -TARGET_EFAULT; 3536 } 3537 3538 addr = alloca(addrlen); 3539 3540 ret_addrlen = addrlen; 3541 ret = get_errno(getsockname(fd, addr, &ret_addrlen)); 3542 if (!is_error(ret)) { 3543 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen)); 3544 if (put_user_u32(ret_addrlen, target_addrlen_addr)) { 3545 ret = -TARGET_EFAULT; 3546 } 3547 } 3548 return ret; 3549 } 3550 3551 /* do_socketpair() Must return target values and target errnos. */ 3552 static abi_long do_socketpair(int domain, int type, int protocol, 3553 abi_ulong target_tab_addr) 3554 { 3555 int tab[2]; 3556 abi_long ret; 3557 3558 target_to_host_sock_type(&type); 3559 3560 ret = get_errno(socketpair(domain, type, protocol, tab)); 3561 if (!is_error(ret)) { 3562 if (put_user_s32(tab[0], target_tab_addr) 3563 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0]))) 3564 ret = -TARGET_EFAULT; 3565 } 3566 return ret; 3567 } 3568 3569 /* do_sendto() Must return target values and target errnos. */ 3570 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags, 3571 abi_ulong target_addr, socklen_t addrlen) 3572 { 3573 void *addr; 3574 void *host_msg; 3575 void *copy_msg = NULL; 3576 abi_long ret; 3577 3578 if ((int)addrlen < 0) { 3579 return -TARGET_EINVAL; 3580 } 3581 3582 host_msg = lock_user(VERIFY_READ, msg, len, 1); 3583 if (!host_msg) 3584 return -TARGET_EFAULT; 3585 if (fd_trans_target_to_host_data(fd)) { 3586 copy_msg = host_msg; 3587 host_msg = g_malloc(len); 3588 memcpy(host_msg, copy_msg, len); 3589 ret = fd_trans_target_to_host_data(fd)(host_msg, len); 3590 if (ret < 0) { 3591 goto fail; 3592 } 3593 } 3594 if (target_addr) { 3595 addr = alloca(addrlen+1); 3596 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen); 3597 if (ret) { 3598 goto fail; 3599 } 3600 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen)); 3601 } else { 3602 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0)); 3603 } 3604 fail: 3605 if (copy_msg) { 3606 g_free(host_msg); 3607 host_msg = copy_msg; 3608 } 3609 unlock_user(host_msg, msg, 0); 3610 return ret; 3611 } 3612 3613 /* do_recvfrom() Must return target values and target errnos. */ 3614 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags, 3615 abi_ulong target_addr, 3616 abi_ulong target_addrlen) 3617 { 3618 socklen_t addrlen, ret_addrlen; 3619 void *addr; 3620 void *host_msg; 3621 abi_long ret; 3622 3623 if (!msg) { 3624 host_msg = NULL; 3625 } else { 3626 host_msg = lock_user(VERIFY_WRITE, msg, len, 0); 3627 if (!host_msg) { 3628 return -TARGET_EFAULT; 3629 } 3630 } 3631 if (target_addr) { 3632 if (get_user_u32(addrlen, target_addrlen)) { 3633 ret = -TARGET_EFAULT; 3634 goto fail; 3635 } 3636 if ((int)addrlen < 0) { 3637 ret = -TARGET_EINVAL; 3638 goto fail; 3639 } 3640 addr = alloca(addrlen); 3641 ret_addrlen = addrlen; 3642 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, 3643 addr, &ret_addrlen)); 3644 } else { 3645 addr = NULL; /* To keep compiler quiet. */ 3646 addrlen = 0; /* To keep compiler quiet. */ 3647 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0)); 3648 } 3649 if (!is_error(ret)) { 3650 if (fd_trans_host_to_target_data(fd)) { 3651 abi_long trans; 3652 trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len)); 3653 if (is_error(trans)) { 3654 ret = trans; 3655 goto fail; 3656 } 3657 } 3658 if (target_addr) { 3659 host_to_target_sockaddr(target_addr, addr, 3660 MIN(addrlen, ret_addrlen)); 3661 if (put_user_u32(ret_addrlen, target_addrlen)) { 3662 ret = -TARGET_EFAULT; 3663 goto fail; 3664 } 3665 } 3666 unlock_user(host_msg, msg, len); 3667 } else { 3668 fail: 3669 unlock_user(host_msg, msg, 0); 3670 } 3671 return ret; 3672 } 3673 3674 #ifdef TARGET_NR_socketcall 3675 /* do_socketcall() must return target values and target errnos. */ 3676 static abi_long do_socketcall(int num, abi_ulong vptr) 3677 { 3678 static const unsigned nargs[] = { /* number of arguments per operation */ 3679 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */ 3680 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */ 3681 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */ 3682 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */ 3683 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */ 3684 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */ 3685 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */ 3686 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */ 3687 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */ 3688 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */ 3689 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */ 3690 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */ 3691 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */ 3692 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */ 3693 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */ 3694 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */ 3695 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */ 3696 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */ 3697 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */ 3698 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */ 3699 }; 3700 abi_long a[6]; /* max 6 args */ 3701 unsigned i; 3702 3703 /* check the range of the first argument num */ 3704 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */ 3705 if (num < 1 || num > TARGET_SYS_SENDMMSG) { 3706 return -TARGET_EINVAL; 3707 } 3708 /* ensure we have space for args */ 3709 if (nargs[num] > ARRAY_SIZE(a)) { 3710 return -TARGET_EINVAL; 3711 } 3712 /* collect the arguments in a[] according to nargs[] */ 3713 for (i = 0; i < nargs[num]; ++i) { 3714 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) { 3715 return -TARGET_EFAULT; 3716 } 3717 } 3718 /* now when we have the args, invoke the appropriate underlying function */ 3719 switch (num) { 3720 case TARGET_SYS_SOCKET: /* domain, type, protocol */ 3721 return do_socket(a[0], a[1], a[2]); 3722 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */ 3723 return do_bind(a[0], a[1], a[2]); 3724 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */ 3725 return do_connect(a[0], a[1], a[2]); 3726 case TARGET_SYS_LISTEN: /* sockfd, backlog */ 3727 return get_errno(listen(a[0], a[1])); 3728 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */ 3729 return do_accept4(a[0], a[1], a[2], 0); 3730 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */ 3731 return do_getsockname(a[0], a[1], a[2]); 3732 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */ 3733 return do_getpeername(a[0], a[1], a[2]); 3734 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */ 3735 return do_socketpair(a[0], a[1], a[2], a[3]); 3736 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */ 3737 return do_sendto(a[0], a[1], a[2], a[3], 0, 0); 3738 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */ 3739 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0); 3740 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */ 3741 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]); 3742 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */ 3743 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]); 3744 case TARGET_SYS_SHUTDOWN: /* sockfd, how */ 3745 return get_errno(shutdown(a[0], a[1])); 3746 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */ 3747 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]); 3748 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */ 3749 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]); 3750 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */ 3751 return do_sendrecvmsg(a[0], a[1], a[2], 1); 3752 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */ 3753 return do_sendrecvmsg(a[0], a[1], a[2], 0); 3754 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */ 3755 return do_accept4(a[0], a[1], a[2], a[3]); 3756 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */ 3757 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0); 3758 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */ 3759 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1); 3760 default: 3761 qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num); 3762 return -TARGET_EINVAL; 3763 } 3764 } 3765 #endif 3766 3767 #define N_SHM_REGIONS 32 3768 3769 static struct shm_region { 3770 abi_ulong start; 3771 abi_ulong size; 3772 bool in_use; 3773 } shm_regions[N_SHM_REGIONS]; 3774 3775 #ifndef TARGET_SEMID64_DS 3776 /* asm-generic version of this struct */ 3777 struct target_semid64_ds 3778 { 3779 struct target_ipc_perm sem_perm; 3780 abi_ulong sem_otime; 3781 #if TARGET_ABI_BITS == 32 3782 abi_ulong __unused1; 3783 #endif 3784 abi_ulong sem_ctime; 3785 #if TARGET_ABI_BITS == 32 3786 abi_ulong __unused2; 3787 #endif 3788 abi_ulong sem_nsems; 3789 abi_ulong __unused3; 3790 abi_ulong __unused4; 3791 }; 3792 #endif 3793 3794 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip, 3795 abi_ulong target_addr) 3796 { 3797 struct target_ipc_perm *target_ip; 3798 struct target_semid64_ds *target_sd; 3799 3800 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 3801 return -TARGET_EFAULT; 3802 target_ip = &(target_sd->sem_perm); 3803 host_ip->__key = tswap32(target_ip->__key); 3804 host_ip->uid = tswap32(target_ip->uid); 3805 host_ip->gid = tswap32(target_ip->gid); 3806 host_ip->cuid = tswap32(target_ip->cuid); 3807 host_ip->cgid = tswap32(target_ip->cgid); 3808 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 3809 host_ip->mode = tswap32(target_ip->mode); 3810 #else 3811 host_ip->mode = tswap16(target_ip->mode); 3812 #endif 3813 #if defined(TARGET_PPC) 3814 host_ip->__seq = tswap32(target_ip->__seq); 3815 #else 3816 host_ip->__seq = tswap16(target_ip->__seq); 3817 #endif 3818 unlock_user_struct(target_sd, target_addr, 0); 3819 return 0; 3820 } 3821 3822 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr, 3823 struct ipc_perm *host_ip) 3824 { 3825 struct target_ipc_perm *target_ip; 3826 struct target_semid64_ds *target_sd; 3827 3828 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 3829 return -TARGET_EFAULT; 3830 target_ip = &(target_sd->sem_perm); 3831 target_ip->__key = tswap32(host_ip->__key); 3832 target_ip->uid = tswap32(host_ip->uid); 3833 target_ip->gid = tswap32(host_ip->gid); 3834 target_ip->cuid = tswap32(host_ip->cuid); 3835 target_ip->cgid = tswap32(host_ip->cgid); 3836 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 3837 target_ip->mode = tswap32(host_ip->mode); 3838 #else 3839 target_ip->mode = tswap16(host_ip->mode); 3840 #endif 3841 #if defined(TARGET_PPC) 3842 target_ip->__seq = tswap32(host_ip->__seq); 3843 #else 3844 target_ip->__seq = tswap16(host_ip->__seq); 3845 #endif 3846 unlock_user_struct(target_sd, target_addr, 1); 3847 return 0; 3848 } 3849 3850 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd, 3851 abi_ulong target_addr) 3852 { 3853 struct target_semid64_ds *target_sd; 3854 3855 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 3856 return -TARGET_EFAULT; 3857 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr)) 3858 return -TARGET_EFAULT; 3859 host_sd->sem_nsems = tswapal(target_sd->sem_nsems); 3860 host_sd->sem_otime = tswapal(target_sd->sem_otime); 3861 host_sd->sem_ctime = tswapal(target_sd->sem_ctime); 3862 unlock_user_struct(target_sd, target_addr, 0); 3863 return 0; 3864 } 3865 3866 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr, 3867 struct semid_ds *host_sd) 3868 { 3869 struct target_semid64_ds *target_sd; 3870 3871 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 3872 return -TARGET_EFAULT; 3873 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm))) 3874 return -TARGET_EFAULT; 3875 target_sd->sem_nsems = tswapal(host_sd->sem_nsems); 3876 target_sd->sem_otime = tswapal(host_sd->sem_otime); 3877 target_sd->sem_ctime = tswapal(host_sd->sem_ctime); 3878 unlock_user_struct(target_sd, target_addr, 1); 3879 return 0; 3880 } 3881 3882 struct target_seminfo { 3883 int semmap; 3884 int semmni; 3885 int semmns; 3886 int semmnu; 3887 int semmsl; 3888 int semopm; 3889 int semume; 3890 int semusz; 3891 int semvmx; 3892 int semaem; 3893 }; 3894 3895 static inline abi_long host_to_target_seminfo(abi_ulong target_addr, 3896 struct seminfo *host_seminfo) 3897 { 3898 struct target_seminfo *target_seminfo; 3899 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0)) 3900 return -TARGET_EFAULT; 3901 __put_user(host_seminfo->semmap, &target_seminfo->semmap); 3902 __put_user(host_seminfo->semmni, &target_seminfo->semmni); 3903 __put_user(host_seminfo->semmns, &target_seminfo->semmns); 3904 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu); 3905 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl); 3906 __put_user(host_seminfo->semopm, &target_seminfo->semopm); 3907 __put_user(host_seminfo->semume, &target_seminfo->semume); 3908 __put_user(host_seminfo->semusz, &target_seminfo->semusz); 3909 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx); 3910 __put_user(host_seminfo->semaem, &target_seminfo->semaem); 3911 unlock_user_struct(target_seminfo, target_addr, 1); 3912 return 0; 3913 } 3914 3915 union semun { 3916 int val; 3917 struct semid_ds *buf; 3918 unsigned short *array; 3919 struct seminfo *__buf; 3920 }; 3921 3922 union target_semun { 3923 int val; 3924 abi_ulong buf; 3925 abi_ulong array; 3926 abi_ulong __buf; 3927 }; 3928 3929 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array, 3930 abi_ulong target_addr) 3931 { 3932 int nsems; 3933 unsigned short *array; 3934 union semun semun; 3935 struct semid_ds semid_ds; 3936 int i, ret; 3937 3938 semun.buf = &semid_ds; 3939 3940 ret = semctl(semid, 0, IPC_STAT, semun); 3941 if (ret == -1) 3942 return get_errno(ret); 3943 3944 nsems = semid_ds.sem_nsems; 3945 3946 *host_array = g_try_new(unsigned short, nsems); 3947 if (!*host_array) { 3948 return -TARGET_ENOMEM; 3949 } 3950 array = lock_user(VERIFY_READ, target_addr, 3951 nsems*sizeof(unsigned short), 1); 3952 if (!array) { 3953 g_free(*host_array); 3954 return -TARGET_EFAULT; 3955 } 3956 3957 for(i=0; i<nsems; i++) { 3958 __get_user((*host_array)[i], &array[i]); 3959 } 3960 unlock_user(array, target_addr, 0); 3961 3962 return 0; 3963 } 3964 3965 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr, 3966 unsigned short **host_array) 3967 { 3968 int nsems; 3969 unsigned short *array; 3970 union semun semun; 3971 struct semid_ds semid_ds; 3972 int i, ret; 3973 3974 semun.buf = &semid_ds; 3975 3976 ret = semctl(semid, 0, IPC_STAT, semun); 3977 if (ret == -1) 3978 return get_errno(ret); 3979 3980 nsems = semid_ds.sem_nsems; 3981 3982 array = lock_user(VERIFY_WRITE, target_addr, 3983 nsems*sizeof(unsigned short), 0); 3984 if (!array) 3985 return -TARGET_EFAULT; 3986 3987 for(i=0; i<nsems; i++) { 3988 __put_user((*host_array)[i], &array[i]); 3989 } 3990 g_free(*host_array); 3991 unlock_user(array, target_addr, 1); 3992 3993 return 0; 3994 } 3995 3996 static inline abi_long do_semctl(int semid, int semnum, int cmd, 3997 abi_ulong target_arg) 3998 { 3999 union target_semun target_su = { .buf = target_arg }; 4000 union semun arg; 4001 struct semid_ds dsarg; 4002 unsigned short *array = NULL; 4003 struct seminfo seminfo; 4004 abi_long ret = -TARGET_EINVAL; 4005 abi_long err; 4006 cmd &= 0xff; 4007 4008 switch( cmd ) { 4009 case GETVAL: 4010 case SETVAL: 4011 /* In 64 bit cross-endian situations, we will erroneously pick up 4012 * the wrong half of the union for the "val" element. To rectify 4013 * this, the entire 8-byte structure is byteswapped, followed by 4014 * a swap of the 4 byte val field. In other cases, the data is 4015 * already in proper host byte order. */ 4016 if (sizeof(target_su.val) != (sizeof(target_su.buf))) { 4017 target_su.buf = tswapal(target_su.buf); 4018 arg.val = tswap32(target_su.val); 4019 } else { 4020 arg.val = target_su.val; 4021 } 4022 ret = get_errno(semctl(semid, semnum, cmd, arg)); 4023 break; 4024 case GETALL: 4025 case SETALL: 4026 err = target_to_host_semarray(semid, &array, target_su.array); 4027 if (err) 4028 return err; 4029 arg.array = array; 4030 ret = get_errno(semctl(semid, semnum, cmd, arg)); 4031 err = host_to_target_semarray(semid, target_su.array, &array); 4032 if (err) 4033 return err; 4034 break; 4035 case IPC_STAT: 4036 case IPC_SET: 4037 case SEM_STAT: 4038 err = target_to_host_semid_ds(&dsarg, target_su.buf); 4039 if (err) 4040 return err; 4041 arg.buf = &dsarg; 4042 ret = get_errno(semctl(semid, semnum, cmd, arg)); 4043 err = host_to_target_semid_ds(target_su.buf, &dsarg); 4044 if (err) 4045 return err; 4046 break; 4047 case IPC_INFO: 4048 case SEM_INFO: 4049 arg.__buf = &seminfo; 4050 ret = get_errno(semctl(semid, semnum, cmd, arg)); 4051 err = host_to_target_seminfo(target_su.__buf, &seminfo); 4052 if (err) 4053 return err; 4054 break; 4055 case IPC_RMID: 4056 case GETPID: 4057 case GETNCNT: 4058 case GETZCNT: 4059 ret = get_errno(semctl(semid, semnum, cmd, NULL)); 4060 break; 4061 } 4062 4063 return ret; 4064 } 4065 4066 struct target_sembuf { 4067 unsigned short sem_num; 4068 short sem_op; 4069 short sem_flg; 4070 }; 4071 4072 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf, 4073 abi_ulong target_addr, 4074 unsigned nsops) 4075 { 4076 struct target_sembuf *target_sembuf; 4077 int i; 4078 4079 target_sembuf = lock_user(VERIFY_READ, target_addr, 4080 nsops*sizeof(struct target_sembuf), 1); 4081 if (!target_sembuf) 4082 return -TARGET_EFAULT; 4083 4084 for(i=0; i<nsops; i++) { 4085 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num); 4086 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op); 4087 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg); 4088 } 4089 4090 unlock_user(target_sembuf, target_addr, 0); 4091 4092 return 0; 4093 } 4094 4095 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \ 4096 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64) 4097 4098 /* 4099 * This macro is required to handle the s390 variants, which passes the 4100 * arguments in a different order than default. 4101 */ 4102 #ifdef __s390x__ 4103 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \ 4104 (__nsops), (__timeout), (__sops) 4105 #else 4106 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \ 4107 (__nsops), 0, (__sops), (__timeout) 4108 #endif 4109 4110 static inline abi_long do_semtimedop(int semid, 4111 abi_long ptr, 4112 unsigned nsops, 4113 abi_long timeout, bool time64) 4114 { 4115 struct sembuf *sops; 4116 struct timespec ts, *pts = NULL; 4117 abi_long ret; 4118 4119 if (timeout) { 4120 pts = &ts; 4121 if (time64) { 4122 if (target_to_host_timespec64(pts, timeout)) { 4123 return -TARGET_EFAULT; 4124 } 4125 } else { 4126 if (target_to_host_timespec(pts, timeout)) { 4127 return -TARGET_EFAULT; 4128 } 4129 } 4130 } 4131 4132 if (nsops > TARGET_SEMOPM) { 4133 return -TARGET_E2BIG; 4134 } 4135 4136 sops = g_new(struct sembuf, nsops); 4137 4138 if (target_to_host_sembuf(sops, ptr, nsops)) { 4139 g_free(sops); 4140 return -TARGET_EFAULT; 4141 } 4142 4143 ret = -TARGET_ENOSYS; 4144 #ifdef __NR_semtimedop 4145 ret = get_errno(safe_semtimedop(semid, sops, nsops, pts)); 4146 #endif 4147 #ifdef __NR_ipc 4148 if (ret == -TARGET_ENOSYS) { 4149 ret = get_errno(safe_ipc(IPCOP_semtimedop, semid, 4150 SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts))); 4151 } 4152 #endif 4153 g_free(sops); 4154 return ret; 4155 } 4156 #endif 4157 4158 struct target_msqid_ds 4159 { 4160 struct target_ipc_perm msg_perm; 4161 abi_ulong msg_stime; 4162 #if TARGET_ABI_BITS == 32 4163 abi_ulong __unused1; 4164 #endif 4165 abi_ulong msg_rtime; 4166 #if TARGET_ABI_BITS == 32 4167 abi_ulong __unused2; 4168 #endif 4169 abi_ulong msg_ctime; 4170 #if TARGET_ABI_BITS == 32 4171 abi_ulong __unused3; 4172 #endif 4173 abi_ulong __msg_cbytes; 4174 abi_ulong msg_qnum; 4175 abi_ulong msg_qbytes; 4176 abi_ulong msg_lspid; 4177 abi_ulong msg_lrpid; 4178 abi_ulong __unused4; 4179 abi_ulong __unused5; 4180 }; 4181 4182 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md, 4183 abi_ulong target_addr) 4184 { 4185 struct target_msqid_ds *target_md; 4186 4187 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1)) 4188 return -TARGET_EFAULT; 4189 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr)) 4190 return -TARGET_EFAULT; 4191 host_md->msg_stime = tswapal(target_md->msg_stime); 4192 host_md->msg_rtime = tswapal(target_md->msg_rtime); 4193 host_md->msg_ctime = tswapal(target_md->msg_ctime); 4194 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes); 4195 host_md->msg_qnum = tswapal(target_md->msg_qnum); 4196 host_md->msg_qbytes = tswapal(target_md->msg_qbytes); 4197 host_md->msg_lspid = tswapal(target_md->msg_lspid); 4198 host_md->msg_lrpid = tswapal(target_md->msg_lrpid); 4199 unlock_user_struct(target_md, target_addr, 0); 4200 return 0; 4201 } 4202 4203 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr, 4204 struct msqid_ds *host_md) 4205 { 4206 struct target_msqid_ds *target_md; 4207 4208 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0)) 4209 return -TARGET_EFAULT; 4210 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm))) 4211 return -TARGET_EFAULT; 4212 target_md->msg_stime = tswapal(host_md->msg_stime); 4213 target_md->msg_rtime = tswapal(host_md->msg_rtime); 4214 target_md->msg_ctime = tswapal(host_md->msg_ctime); 4215 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes); 4216 target_md->msg_qnum = tswapal(host_md->msg_qnum); 4217 target_md->msg_qbytes = tswapal(host_md->msg_qbytes); 4218 target_md->msg_lspid = tswapal(host_md->msg_lspid); 4219 target_md->msg_lrpid = tswapal(host_md->msg_lrpid); 4220 unlock_user_struct(target_md, target_addr, 1); 4221 return 0; 4222 } 4223 4224 struct target_msginfo { 4225 int msgpool; 4226 int msgmap; 4227 int msgmax; 4228 int msgmnb; 4229 int msgmni; 4230 int msgssz; 4231 int msgtql; 4232 unsigned short int msgseg; 4233 }; 4234 4235 static inline abi_long host_to_target_msginfo(abi_ulong target_addr, 4236 struct msginfo *host_msginfo) 4237 { 4238 struct target_msginfo *target_msginfo; 4239 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0)) 4240 return -TARGET_EFAULT; 4241 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool); 4242 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap); 4243 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax); 4244 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb); 4245 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni); 4246 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz); 4247 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql); 4248 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg); 4249 unlock_user_struct(target_msginfo, target_addr, 1); 4250 return 0; 4251 } 4252 4253 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr) 4254 { 4255 struct msqid_ds dsarg; 4256 struct msginfo msginfo; 4257 abi_long ret = -TARGET_EINVAL; 4258 4259 cmd &= 0xff; 4260 4261 switch (cmd) { 4262 case IPC_STAT: 4263 case IPC_SET: 4264 case MSG_STAT: 4265 if (target_to_host_msqid_ds(&dsarg,ptr)) 4266 return -TARGET_EFAULT; 4267 ret = get_errno(msgctl(msgid, cmd, &dsarg)); 4268 if (host_to_target_msqid_ds(ptr,&dsarg)) 4269 return -TARGET_EFAULT; 4270 break; 4271 case IPC_RMID: 4272 ret = get_errno(msgctl(msgid, cmd, NULL)); 4273 break; 4274 case IPC_INFO: 4275 case MSG_INFO: 4276 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo)); 4277 if (host_to_target_msginfo(ptr, &msginfo)) 4278 return -TARGET_EFAULT; 4279 break; 4280 } 4281 4282 return ret; 4283 } 4284 4285 struct target_msgbuf { 4286 abi_long mtype; 4287 char mtext[1]; 4288 }; 4289 4290 static inline abi_long do_msgsnd(int msqid, abi_long msgp, 4291 ssize_t msgsz, int msgflg) 4292 { 4293 struct target_msgbuf *target_mb; 4294 struct msgbuf *host_mb; 4295 abi_long ret = 0; 4296 4297 if (msgsz < 0) { 4298 return -TARGET_EINVAL; 4299 } 4300 4301 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0)) 4302 return -TARGET_EFAULT; 4303 host_mb = g_try_malloc(msgsz + sizeof(long)); 4304 if (!host_mb) { 4305 unlock_user_struct(target_mb, msgp, 0); 4306 return -TARGET_ENOMEM; 4307 } 4308 host_mb->mtype = (abi_long) tswapal(target_mb->mtype); 4309 memcpy(host_mb->mtext, target_mb->mtext, msgsz); 4310 ret = -TARGET_ENOSYS; 4311 #ifdef __NR_msgsnd 4312 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg)); 4313 #endif 4314 #ifdef __NR_ipc 4315 if (ret == -TARGET_ENOSYS) { 4316 #ifdef __s390x__ 4317 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg, 4318 host_mb)); 4319 #else 4320 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg, 4321 host_mb, 0)); 4322 #endif 4323 } 4324 #endif 4325 g_free(host_mb); 4326 unlock_user_struct(target_mb, msgp, 0); 4327 4328 return ret; 4329 } 4330 4331 #ifdef __NR_ipc 4332 #if defined(__sparc__) 4333 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */ 4334 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp 4335 #elif defined(__s390x__) 4336 /* The s390 sys_ipc variant has only five parameters. */ 4337 #define MSGRCV_ARGS(__msgp, __msgtyp) \ 4338 ((long int[]){(long int)__msgp, __msgtyp}) 4339 #else 4340 #define MSGRCV_ARGS(__msgp, __msgtyp) \ 4341 ((long int[]){(long int)__msgp, __msgtyp}), 0 4342 #endif 4343 #endif 4344 4345 static inline abi_long do_msgrcv(int msqid, abi_long msgp, 4346 ssize_t msgsz, abi_long msgtyp, 4347 int msgflg) 4348 { 4349 struct target_msgbuf *target_mb; 4350 char *target_mtext; 4351 struct msgbuf *host_mb; 4352 abi_long ret = 0; 4353 4354 if (msgsz < 0) { 4355 return -TARGET_EINVAL; 4356 } 4357 4358 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0)) 4359 return -TARGET_EFAULT; 4360 4361 host_mb = g_try_malloc(msgsz + sizeof(long)); 4362 if (!host_mb) { 4363 ret = -TARGET_ENOMEM; 4364 goto end; 4365 } 4366 ret = -TARGET_ENOSYS; 4367 #ifdef __NR_msgrcv 4368 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg)); 4369 #endif 4370 #ifdef __NR_ipc 4371 if (ret == -TARGET_ENOSYS) { 4372 ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz, 4373 msgflg, MSGRCV_ARGS(host_mb, msgtyp))); 4374 } 4375 #endif 4376 4377 if (ret > 0) { 4378 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong); 4379 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0); 4380 if (!target_mtext) { 4381 ret = -TARGET_EFAULT; 4382 goto end; 4383 } 4384 memcpy(target_mb->mtext, host_mb->mtext, ret); 4385 unlock_user(target_mtext, target_mtext_addr, ret); 4386 } 4387 4388 target_mb->mtype = tswapal(host_mb->mtype); 4389 4390 end: 4391 if (target_mb) 4392 unlock_user_struct(target_mb, msgp, 1); 4393 g_free(host_mb); 4394 return ret; 4395 } 4396 4397 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd, 4398 abi_ulong target_addr) 4399 { 4400 struct target_shmid_ds *target_sd; 4401 4402 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 4403 return -TARGET_EFAULT; 4404 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr)) 4405 return -TARGET_EFAULT; 4406 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz); 4407 __get_user(host_sd->shm_atime, &target_sd->shm_atime); 4408 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime); 4409 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime); 4410 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid); 4411 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid); 4412 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch); 4413 unlock_user_struct(target_sd, target_addr, 0); 4414 return 0; 4415 } 4416 4417 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr, 4418 struct shmid_ds *host_sd) 4419 { 4420 struct target_shmid_ds *target_sd; 4421 4422 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 4423 return -TARGET_EFAULT; 4424 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm))) 4425 return -TARGET_EFAULT; 4426 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz); 4427 __put_user(host_sd->shm_atime, &target_sd->shm_atime); 4428 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime); 4429 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime); 4430 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid); 4431 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid); 4432 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch); 4433 unlock_user_struct(target_sd, target_addr, 1); 4434 return 0; 4435 } 4436 4437 struct target_shminfo { 4438 abi_ulong shmmax; 4439 abi_ulong shmmin; 4440 abi_ulong shmmni; 4441 abi_ulong shmseg; 4442 abi_ulong shmall; 4443 }; 4444 4445 static inline abi_long host_to_target_shminfo(abi_ulong target_addr, 4446 struct shminfo *host_shminfo) 4447 { 4448 struct target_shminfo *target_shminfo; 4449 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0)) 4450 return -TARGET_EFAULT; 4451 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax); 4452 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin); 4453 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni); 4454 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg); 4455 __put_user(host_shminfo->shmall, &target_shminfo->shmall); 4456 unlock_user_struct(target_shminfo, target_addr, 1); 4457 return 0; 4458 } 4459 4460 struct target_shm_info { 4461 int used_ids; 4462 abi_ulong shm_tot; 4463 abi_ulong shm_rss; 4464 abi_ulong shm_swp; 4465 abi_ulong swap_attempts; 4466 abi_ulong swap_successes; 4467 }; 4468 4469 static inline abi_long host_to_target_shm_info(abi_ulong target_addr, 4470 struct shm_info *host_shm_info) 4471 { 4472 struct target_shm_info *target_shm_info; 4473 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0)) 4474 return -TARGET_EFAULT; 4475 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids); 4476 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot); 4477 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss); 4478 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp); 4479 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts); 4480 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes); 4481 unlock_user_struct(target_shm_info, target_addr, 1); 4482 return 0; 4483 } 4484 4485 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf) 4486 { 4487 struct shmid_ds dsarg; 4488 struct shminfo shminfo; 4489 struct shm_info shm_info; 4490 abi_long ret = -TARGET_EINVAL; 4491 4492 cmd &= 0xff; 4493 4494 switch(cmd) { 4495 case IPC_STAT: 4496 case IPC_SET: 4497 case SHM_STAT: 4498 if (target_to_host_shmid_ds(&dsarg, buf)) 4499 return -TARGET_EFAULT; 4500 ret = get_errno(shmctl(shmid, cmd, &dsarg)); 4501 if (host_to_target_shmid_ds(buf, &dsarg)) 4502 return -TARGET_EFAULT; 4503 break; 4504 case IPC_INFO: 4505 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo)); 4506 if (host_to_target_shminfo(buf, &shminfo)) 4507 return -TARGET_EFAULT; 4508 break; 4509 case SHM_INFO: 4510 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info)); 4511 if (host_to_target_shm_info(buf, &shm_info)) 4512 return -TARGET_EFAULT; 4513 break; 4514 case IPC_RMID: 4515 case SHM_LOCK: 4516 case SHM_UNLOCK: 4517 ret = get_errno(shmctl(shmid, cmd, NULL)); 4518 break; 4519 } 4520 4521 return ret; 4522 } 4523 4524 #ifndef TARGET_FORCE_SHMLBA 4525 /* For most architectures, SHMLBA is the same as the page size; 4526 * some architectures have larger values, in which case they should 4527 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function. 4528 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA 4529 * and defining its own value for SHMLBA. 4530 * 4531 * The kernel also permits SHMLBA to be set by the architecture to a 4532 * value larger than the page size without setting __ARCH_FORCE_SHMLBA; 4533 * this means that addresses are rounded to the large size if 4534 * SHM_RND is set but addresses not aligned to that size are not rejected 4535 * as long as they are at least page-aligned. Since the only architecture 4536 * which uses this is ia64 this code doesn't provide for that oddity. 4537 */ 4538 static inline abi_ulong target_shmlba(CPUArchState *cpu_env) 4539 { 4540 return TARGET_PAGE_SIZE; 4541 } 4542 #endif 4543 4544 static inline abi_ulong do_shmat(CPUArchState *cpu_env, 4545 int shmid, abi_ulong shmaddr, int shmflg) 4546 { 4547 CPUState *cpu = env_cpu(cpu_env); 4548 abi_long raddr; 4549 void *host_raddr; 4550 struct shmid_ds shm_info; 4551 int i,ret; 4552 abi_ulong shmlba; 4553 4554 /* shmat pointers are always untagged */ 4555 4556 /* find out the length of the shared memory segment */ 4557 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info)); 4558 if (is_error(ret)) { 4559 /* can't get length, bail out */ 4560 return ret; 4561 } 4562 4563 shmlba = target_shmlba(cpu_env); 4564 4565 if (shmaddr & (shmlba - 1)) { 4566 if (shmflg & SHM_RND) { 4567 shmaddr &= ~(shmlba - 1); 4568 } else { 4569 return -TARGET_EINVAL; 4570 } 4571 } 4572 if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) { 4573 return -TARGET_EINVAL; 4574 } 4575 4576 mmap_lock(); 4577 4578 /* 4579 * We're mapping shared memory, so ensure we generate code for parallel 4580 * execution and flush old translations. This will work up to the level 4581 * supported by the host -- anything that requires EXCP_ATOMIC will not 4582 * be atomic with respect to an external process. 4583 */ 4584 if (!(cpu->tcg_cflags & CF_PARALLEL)) { 4585 cpu->tcg_cflags |= CF_PARALLEL; 4586 tb_flush(cpu); 4587 } 4588 4589 if (shmaddr) 4590 host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg); 4591 else { 4592 abi_ulong mmap_start; 4593 4594 /* In order to use the host shmat, we need to honor host SHMLBA. */ 4595 mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba), false); 4596 4597 if (mmap_start == -1) { 4598 errno = ENOMEM; 4599 host_raddr = (void *)-1; 4600 } else 4601 host_raddr = shmat(shmid, g2h_untagged(mmap_start), 4602 shmflg | SHM_REMAP); 4603 } 4604 4605 if (host_raddr == (void *)-1) { 4606 mmap_unlock(); 4607 return get_errno((long)host_raddr); 4608 } 4609 raddr=h2g((unsigned long)host_raddr); 4610 4611 page_set_flags(raddr, raddr + shm_info.shm_segsz, 4612 PAGE_VALID | PAGE_RESET | PAGE_READ | 4613 (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE)); 4614 4615 for (i = 0; i < N_SHM_REGIONS; i++) { 4616 if (!shm_regions[i].in_use) { 4617 shm_regions[i].in_use = true; 4618 shm_regions[i].start = raddr; 4619 shm_regions[i].size = shm_info.shm_segsz; 4620 break; 4621 } 4622 } 4623 4624 mmap_unlock(); 4625 return raddr; 4626 4627 } 4628 4629 static inline abi_long do_shmdt(abi_ulong shmaddr) 4630 { 4631 int i; 4632 abi_long rv; 4633 4634 /* shmdt pointers are always untagged */ 4635 4636 mmap_lock(); 4637 4638 for (i = 0; i < N_SHM_REGIONS; ++i) { 4639 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) { 4640 shm_regions[i].in_use = false; 4641 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0); 4642 break; 4643 } 4644 } 4645 rv = get_errno(shmdt(g2h_untagged(shmaddr))); 4646 4647 mmap_unlock(); 4648 4649 return rv; 4650 } 4651 4652 #ifdef TARGET_NR_ipc 4653 /* ??? This only works with linear mappings. */ 4654 /* do_ipc() must return target values and target errnos. */ 4655 static abi_long do_ipc(CPUArchState *cpu_env, 4656 unsigned int call, abi_long first, 4657 abi_long second, abi_long third, 4658 abi_long ptr, abi_long fifth) 4659 { 4660 int version; 4661 abi_long ret = 0; 4662 4663 version = call >> 16; 4664 call &= 0xffff; 4665 4666 switch (call) { 4667 case IPCOP_semop: 4668 ret = do_semtimedop(first, ptr, second, 0, false); 4669 break; 4670 case IPCOP_semtimedop: 4671 /* 4672 * The s390 sys_ipc variant has only five parameters instead of six 4673 * (as for default variant) and the only difference is the handling of 4674 * SEMTIMEDOP where on s390 the third parameter is used as a pointer 4675 * to a struct timespec where the generic variant uses fifth parameter. 4676 */ 4677 #if defined(TARGET_S390X) 4678 ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64); 4679 #else 4680 ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64); 4681 #endif 4682 break; 4683 4684 case IPCOP_semget: 4685 ret = get_errno(semget(first, second, third)); 4686 break; 4687 4688 case IPCOP_semctl: { 4689 /* The semun argument to semctl is passed by value, so dereference the 4690 * ptr argument. */ 4691 abi_ulong atptr; 4692 get_user_ual(atptr, ptr); 4693 ret = do_semctl(first, second, third, atptr); 4694 break; 4695 } 4696 4697 case IPCOP_msgget: 4698 ret = get_errno(msgget(first, second)); 4699 break; 4700 4701 case IPCOP_msgsnd: 4702 ret = do_msgsnd(first, ptr, second, third); 4703 break; 4704 4705 case IPCOP_msgctl: 4706 ret = do_msgctl(first, second, ptr); 4707 break; 4708 4709 case IPCOP_msgrcv: 4710 switch (version) { 4711 case 0: 4712 { 4713 struct target_ipc_kludge { 4714 abi_long msgp; 4715 abi_long msgtyp; 4716 } *tmp; 4717 4718 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) { 4719 ret = -TARGET_EFAULT; 4720 break; 4721 } 4722 4723 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third); 4724 4725 unlock_user_struct(tmp, ptr, 0); 4726 break; 4727 } 4728 default: 4729 ret = do_msgrcv(first, ptr, second, fifth, third); 4730 } 4731 break; 4732 4733 case IPCOP_shmat: 4734 switch (version) { 4735 default: 4736 { 4737 abi_ulong raddr; 4738 raddr = do_shmat(cpu_env, first, ptr, second); 4739 if (is_error(raddr)) 4740 return get_errno(raddr); 4741 if (put_user_ual(raddr, third)) 4742 return -TARGET_EFAULT; 4743 break; 4744 } 4745 case 1: 4746 ret = -TARGET_EINVAL; 4747 break; 4748 } 4749 break; 4750 case IPCOP_shmdt: 4751 ret = do_shmdt(ptr); 4752 break; 4753 4754 case IPCOP_shmget: 4755 /* IPC_* flag values are the same on all linux platforms */ 4756 ret = get_errno(shmget(first, second, third)); 4757 break; 4758 4759 /* IPC_* and SHM_* command values are the same on all linux platforms */ 4760 case IPCOP_shmctl: 4761 ret = do_shmctl(first, second, ptr); 4762 break; 4763 default: 4764 qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n", 4765 call, version); 4766 ret = -TARGET_ENOSYS; 4767 break; 4768 } 4769 return ret; 4770 } 4771 #endif 4772 4773 /* kernel structure types definitions */ 4774 4775 #define STRUCT(name, ...) STRUCT_ ## name, 4776 #define STRUCT_SPECIAL(name) STRUCT_ ## name, 4777 enum { 4778 #include "syscall_types.h" 4779 STRUCT_MAX 4780 }; 4781 #undef STRUCT 4782 #undef STRUCT_SPECIAL 4783 4784 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL }; 4785 #define STRUCT_SPECIAL(name) 4786 #include "syscall_types.h" 4787 #undef STRUCT 4788 #undef STRUCT_SPECIAL 4789 4790 #define MAX_STRUCT_SIZE 4096 4791 4792 #ifdef CONFIG_FIEMAP 4793 /* So fiemap access checks don't overflow on 32 bit systems. 4794 * This is very slightly smaller than the limit imposed by 4795 * the underlying kernel. 4796 */ 4797 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \ 4798 / sizeof(struct fiemap_extent)) 4799 4800 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp, 4801 int fd, int cmd, abi_long arg) 4802 { 4803 /* The parameter for this ioctl is a struct fiemap followed 4804 * by an array of struct fiemap_extent whose size is set 4805 * in fiemap->fm_extent_count. The array is filled in by the 4806 * ioctl. 4807 */ 4808 int target_size_in, target_size_out; 4809 struct fiemap *fm; 4810 const argtype *arg_type = ie->arg_type; 4811 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) }; 4812 void *argptr, *p; 4813 abi_long ret; 4814 int i, extent_size = thunk_type_size(extent_arg_type, 0); 4815 uint32_t outbufsz; 4816 int free_fm = 0; 4817 4818 assert(arg_type[0] == TYPE_PTR); 4819 assert(ie->access == IOC_RW); 4820 arg_type++; 4821 target_size_in = thunk_type_size(arg_type, 0); 4822 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1); 4823 if (!argptr) { 4824 return -TARGET_EFAULT; 4825 } 4826 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 4827 unlock_user(argptr, arg, 0); 4828 fm = (struct fiemap *)buf_temp; 4829 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) { 4830 return -TARGET_EINVAL; 4831 } 4832 4833 outbufsz = sizeof (*fm) + 4834 (sizeof(struct fiemap_extent) * fm->fm_extent_count); 4835 4836 if (outbufsz > MAX_STRUCT_SIZE) { 4837 /* We can't fit all the extents into the fixed size buffer. 4838 * Allocate one that is large enough and use it instead. 4839 */ 4840 fm = g_try_malloc(outbufsz); 4841 if (!fm) { 4842 return -TARGET_ENOMEM; 4843 } 4844 memcpy(fm, buf_temp, sizeof(struct fiemap)); 4845 free_fm = 1; 4846 } 4847 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm)); 4848 if (!is_error(ret)) { 4849 target_size_out = target_size_in; 4850 /* An extent_count of 0 means we were only counting the extents 4851 * so there are no structs to copy 4852 */ 4853 if (fm->fm_extent_count != 0) { 4854 target_size_out += fm->fm_mapped_extents * extent_size; 4855 } 4856 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0); 4857 if (!argptr) { 4858 ret = -TARGET_EFAULT; 4859 } else { 4860 /* Convert the struct fiemap */ 4861 thunk_convert(argptr, fm, arg_type, THUNK_TARGET); 4862 if (fm->fm_extent_count != 0) { 4863 p = argptr + target_size_in; 4864 /* ...and then all the struct fiemap_extents */ 4865 for (i = 0; i < fm->fm_mapped_extents; i++) { 4866 thunk_convert(p, &fm->fm_extents[i], extent_arg_type, 4867 THUNK_TARGET); 4868 p += extent_size; 4869 } 4870 } 4871 unlock_user(argptr, arg, target_size_out); 4872 } 4873 } 4874 if (free_fm) { 4875 g_free(fm); 4876 } 4877 return ret; 4878 } 4879 #endif 4880 4881 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp, 4882 int fd, int cmd, abi_long arg) 4883 { 4884 const argtype *arg_type = ie->arg_type; 4885 int target_size; 4886 void *argptr; 4887 int ret; 4888 struct ifconf *host_ifconf; 4889 uint32_t outbufsz; 4890 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) }; 4891 const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) }; 4892 int target_ifreq_size; 4893 int nb_ifreq; 4894 int free_buf = 0; 4895 int i; 4896 int target_ifc_len; 4897 abi_long target_ifc_buf; 4898 int host_ifc_len; 4899 char *host_ifc_buf; 4900 4901 assert(arg_type[0] == TYPE_PTR); 4902 assert(ie->access == IOC_RW); 4903 4904 arg_type++; 4905 target_size = thunk_type_size(arg_type, 0); 4906 4907 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 4908 if (!argptr) 4909 return -TARGET_EFAULT; 4910 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 4911 unlock_user(argptr, arg, 0); 4912 4913 host_ifconf = (struct ifconf *)(unsigned long)buf_temp; 4914 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf; 4915 target_ifreq_size = thunk_type_size(ifreq_max_type, 0); 4916 4917 if (target_ifc_buf != 0) { 4918 target_ifc_len = host_ifconf->ifc_len; 4919 nb_ifreq = target_ifc_len / target_ifreq_size; 4920 host_ifc_len = nb_ifreq * sizeof(struct ifreq); 4921 4922 outbufsz = sizeof(*host_ifconf) + host_ifc_len; 4923 if (outbufsz > MAX_STRUCT_SIZE) { 4924 /* 4925 * We can't fit all the extents into the fixed size buffer. 4926 * Allocate one that is large enough and use it instead. 4927 */ 4928 host_ifconf = g_try_malloc(outbufsz); 4929 if (!host_ifconf) { 4930 return -TARGET_ENOMEM; 4931 } 4932 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf)); 4933 free_buf = 1; 4934 } 4935 host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf); 4936 4937 host_ifconf->ifc_len = host_ifc_len; 4938 } else { 4939 host_ifc_buf = NULL; 4940 } 4941 host_ifconf->ifc_buf = host_ifc_buf; 4942 4943 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf)); 4944 if (!is_error(ret)) { 4945 /* convert host ifc_len to target ifc_len */ 4946 4947 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq); 4948 target_ifc_len = nb_ifreq * target_ifreq_size; 4949 host_ifconf->ifc_len = target_ifc_len; 4950 4951 /* restore target ifc_buf */ 4952 4953 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf; 4954 4955 /* copy struct ifconf to target user */ 4956 4957 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 4958 if (!argptr) 4959 return -TARGET_EFAULT; 4960 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET); 4961 unlock_user(argptr, arg, target_size); 4962 4963 if (target_ifc_buf != 0) { 4964 /* copy ifreq[] to target user */ 4965 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0); 4966 for (i = 0; i < nb_ifreq ; i++) { 4967 thunk_convert(argptr + i * target_ifreq_size, 4968 host_ifc_buf + i * sizeof(struct ifreq), 4969 ifreq_arg_type, THUNK_TARGET); 4970 } 4971 unlock_user(argptr, target_ifc_buf, target_ifc_len); 4972 } 4973 } 4974 4975 if (free_buf) { 4976 g_free(host_ifconf); 4977 } 4978 4979 return ret; 4980 } 4981 4982 #if defined(CONFIG_USBFS) 4983 #if HOST_LONG_BITS > 64 4984 #error USBDEVFS thunks do not support >64 bit hosts yet. 4985 #endif 4986 struct live_urb { 4987 uint64_t target_urb_adr; 4988 uint64_t target_buf_adr; 4989 char *target_buf_ptr; 4990 struct usbdevfs_urb host_urb; 4991 }; 4992 4993 static GHashTable *usbdevfs_urb_hashtable(void) 4994 { 4995 static GHashTable *urb_hashtable; 4996 4997 if (!urb_hashtable) { 4998 urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal); 4999 } 5000 return urb_hashtable; 5001 } 5002 5003 static void urb_hashtable_insert(struct live_urb *urb) 5004 { 5005 GHashTable *urb_hashtable = usbdevfs_urb_hashtable(); 5006 g_hash_table_insert(urb_hashtable, urb, urb); 5007 } 5008 5009 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr) 5010 { 5011 GHashTable *urb_hashtable = usbdevfs_urb_hashtable(); 5012 return g_hash_table_lookup(urb_hashtable, &target_urb_adr); 5013 } 5014 5015 static void urb_hashtable_remove(struct live_urb *urb) 5016 { 5017 GHashTable *urb_hashtable = usbdevfs_urb_hashtable(); 5018 g_hash_table_remove(urb_hashtable, urb); 5019 } 5020 5021 static abi_long 5022 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp, 5023 int fd, int cmd, abi_long arg) 5024 { 5025 const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) }; 5026 const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 }; 5027 struct live_urb *lurb; 5028 void *argptr; 5029 uint64_t hurb; 5030 int target_size; 5031 uintptr_t target_urb_adr; 5032 abi_long ret; 5033 5034 target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET); 5035 5036 memset(buf_temp, 0, sizeof(uint64_t)); 5037 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5038 if (is_error(ret)) { 5039 return ret; 5040 } 5041 5042 memcpy(&hurb, buf_temp, sizeof(uint64_t)); 5043 lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb)); 5044 if (!lurb->target_urb_adr) { 5045 return -TARGET_EFAULT; 5046 } 5047 urb_hashtable_remove(lurb); 5048 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 5049 lurb->host_urb.buffer_length); 5050 lurb->target_buf_ptr = NULL; 5051 5052 /* restore the guest buffer pointer */ 5053 lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr; 5054 5055 /* update the guest urb struct */ 5056 argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0); 5057 if (!argptr) { 5058 g_free(lurb); 5059 return -TARGET_EFAULT; 5060 } 5061 thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET); 5062 unlock_user(argptr, lurb->target_urb_adr, target_size); 5063 5064 target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET); 5065 /* write back the urb handle */ 5066 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5067 if (!argptr) { 5068 g_free(lurb); 5069 return -TARGET_EFAULT; 5070 } 5071 5072 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */ 5073 target_urb_adr = lurb->target_urb_adr; 5074 thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET); 5075 unlock_user(argptr, arg, target_size); 5076 5077 g_free(lurb); 5078 return ret; 5079 } 5080 5081 static abi_long 5082 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie, 5083 uint8_t *buf_temp __attribute__((unused)), 5084 int fd, int cmd, abi_long arg) 5085 { 5086 struct live_urb *lurb; 5087 5088 /* map target address back to host URB with metadata. */ 5089 lurb = urb_hashtable_lookup(arg); 5090 if (!lurb) { 5091 return -TARGET_EFAULT; 5092 } 5093 return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb)); 5094 } 5095 5096 static abi_long 5097 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp, 5098 int fd, int cmd, abi_long arg) 5099 { 5100 const argtype *arg_type = ie->arg_type; 5101 int target_size; 5102 abi_long ret; 5103 void *argptr; 5104 int rw_dir; 5105 struct live_urb *lurb; 5106 5107 /* 5108 * each submitted URB needs to map to a unique ID for the 5109 * kernel, and that unique ID needs to be a pointer to 5110 * host memory. hence, we need to malloc for each URB. 5111 * isochronous transfers have a variable length struct. 5112 */ 5113 arg_type++; 5114 target_size = thunk_type_size(arg_type, THUNK_TARGET); 5115 5116 /* construct host copy of urb and metadata */ 5117 lurb = g_try_new0(struct live_urb, 1); 5118 if (!lurb) { 5119 return -TARGET_ENOMEM; 5120 } 5121 5122 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5123 if (!argptr) { 5124 g_free(lurb); 5125 return -TARGET_EFAULT; 5126 } 5127 thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST); 5128 unlock_user(argptr, arg, 0); 5129 5130 lurb->target_urb_adr = arg; 5131 lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer; 5132 5133 /* buffer space used depends on endpoint type so lock the entire buffer */ 5134 /* control type urbs should check the buffer contents for true direction */ 5135 rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ; 5136 lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr, 5137 lurb->host_urb.buffer_length, 1); 5138 if (lurb->target_buf_ptr == NULL) { 5139 g_free(lurb); 5140 return -TARGET_EFAULT; 5141 } 5142 5143 /* update buffer pointer in host copy */ 5144 lurb->host_urb.buffer = lurb->target_buf_ptr; 5145 5146 ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb)); 5147 if (is_error(ret)) { 5148 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0); 5149 g_free(lurb); 5150 } else { 5151 urb_hashtable_insert(lurb); 5152 } 5153 5154 return ret; 5155 } 5156 #endif /* CONFIG_USBFS */ 5157 5158 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, 5159 int cmd, abi_long arg) 5160 { 5161 void *argptr; 5162 struct dm_ioctl *host_dm; 5163 abi_long guest_data; 5164 uint32_t guest_data_size; 5165 int target_size; 5166 const argtype *arg_type = ie->arg_type; 5167 abi_long ret; 5168 void *big_buf = NULL; 5169 char *host_data; 5170 5171 arg_type++; 5172 target_size = thunk_type_size(arg_type, 0); 5173 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5174 if (!argptr) { 5175 ret = -TARGET_EFAULT; 5176 goto out; 5177 } 5178 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5179 unlock_user(argptr, arg, 0); 5180 5181 /* buf_temp is too small, so fetch things into a bigger buffer */ 5182 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2); 5183 memcpy(big_buf, buf_temp, target_size); 5184 buf_temp = big_buf; 5185 host_dm = big_buf; 5186 5187 guest_data = arg + host_dm->data_start; 5188 if ((guest_data - arg) < 0) { 5189 ret = -TARGET_EINVAL; 5190 goto out; 5191 } 5192 guest_data_size = host_dm->data_size - host_dm->data_start; 5193 host_data = (char*)host_dm + host_dm->data_start; 5194 5195 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1); 5196 if (!argptr) { 5197 ret = -TARGET_EFAULT; 5198 goto out; 5199 } 5200 5201 switch (ie->host_cmd) { 5202 case DM_REMOVE_ALL: 5203 case DM_LIST_DEVICES: 5204 case DM_DEV_CREATE: 5205 case DM_DEV_REMOVE: 5206 case DM_DEV_SUSPEND: 5207 case DM_DEV_STATUS: 5208 case DM_DEV_WAIT: 5209 case DM_TABLE_STATUS: 5210 case DM_TABLE_CLEAR: 5211 case DM_TABLE_DEPS: 5212 case DM_LIST_VERSIONS: 5213 /* no input data */ 5214 break; 5215 case DM_DEV_RENAME: 5216 case DM_DEV_SET_GEOMETRY: 5217 /* data contains only strings */ 5218 memcpy(host_data, argptr, guest_data_size); 5219 break; 5220 case DM_TARGET_MSG: 5221 memcpy(host_data, argptr, guest_data_size); 5222 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr); 5223 break; 5224 case DM_TABLE_LOAD: 5225 { 5226 void *gspec = argptr; 5227 void *cur_data = host_data; 5228 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 5229 int spec_size = thunk_type_size(arg_type, 0); 5230 int i; 5231 5232 for (i = 0; i < host_dm->target_count; i++) { 5233 struct dm_target_spec *spec = cur_data; 5234 uint32_t next; 5235 int slen; 5236 5237 thunk_convert(spec, gspec, arg_type, THUNK_HOST); 5238 slen = strlen((char*)gspec + spec_size) + 1; 5239 next = spec->next; 5240 spec->next = sizeof(*spec) + slen; 5241 strcpy((char*)&spec[1], gspec + spec_size); 5242 gspec += next; 5243 cur_data += spec->next; 5244 } 5245 break; 5246 } 5247 default: 5248 ret = -TARGET_EINVAL; 5249 unlock_user(argptr, guest_data, 0); 5250 goto out; 5251 } 5252 unlock_user(argptr, guest_data, 0); 5253 5254 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5255 if (!is_error(ret)) { 5256 guest_data = arg + host_dm->data_start; 5257 guest_data_size = host_dm->data_size - host_dm->data_start; 5258 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0); 5259 switch (ie->host_cmd) { 5260 case DM_REMOVE_ALL: 5261 case DM_DEV_CREATE: 5262 case DM_DEV_REMOVE: 5263 case DM_DEV_RENAME: 5264 case DM_DEV_SUSPEND: 5265 case DM_DEV_STATUS: 5266 case DM_TABLE_LOAD: 5267 case DM_TABLE_CLEAR: 5268 case DM_TARGET_MSG: 5269 case DM_DEV_SET_GEOMETRY: 5270 /* no return data */ 5271 break; 5272 case DM_LIST_DEVICES: 5273 { 5274 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start; 5275 uint32_t remaining_data = guest_data_size; 5276 void *cur_data = argptr; 5277 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) }; 5278 int nl_size = 12; /* can't use thunk_size due to alignment */ 5279 5280 while (1) { 5281 uint32_t next = nl->next; 5282 if (next) { 5283 nl->next = nl_size + (strlen(nl->name) + 1); 5284 } 5285 if (remaining_data < nl->next) { 5286 host_dm->flags |= DM_BUFFER_FULL_FLAG; 5287 break; 5288 } 5289 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET); 5290 strcpy(cur_data + nl_size, nl->name); 5291 cur_data += nl->next; 5292 remaining_data -= nl->next; 5293 if (!next) { 5294 break; 5295 } 5296 nl = (void*)nl + next; 5297 } 5298 break; 5299 } 5300 case DM_DEV_WAIT: 5301 case DM_TABLE_STATUS: 5302 { 5303 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start; 5304 void *cur_data = argptr; 5305 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 5306 int spec_size = thunk_type_size(arg_type, 0); 5307 int i; 5308 5309 for (i = 0; i < host_dm->target_count; i++) { 5310 uint32_t next = spec->next; 5311 int slen = strlen((char*)&spec[1]) + 1; 5312 spec->next = (cur_data - argptr) + spec_size + slen; 5313 if (guest_data_size < spec->next) { 5314 host_dm->flags |= DM_BUFFER_FULL_FLAG; 5315 break; 5316 } 5317 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET); 5318 strcpy(cur_data + spec_size, (char*)&spec[1]); 5319 cur_data = argptr + spec->next; 5320 spec = (void*)host_dm + host_dm->data_start + next; 5321 } 5322 break; 5323 } 5324 case DM_TABLE_DEPS: 5325 { 5326 void *hdata = (void*)host_dm + host_dm->data_start; 5327 int count = *(uint32_t*)hdata; 5328 uint64_t *hdev = hdata + 8; 5329 uint64_t *gdev = argptr + 8; 5330 int i; 5331 5332 *(uint32_t*)argptr = tswap32(count); 5333 for (i = 0; i < count; i++) { 5334 *gdev = tswap64(*hdev); 5335 gdev++; 5336 hdev++; 5337 } 5338 break; 5339 } 5340 case DM_LIST_VERSIONS: 5341 { 5342 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start; 5343 uint32_t remaining_data = guest_data_size; 5344 void *cur_data = argptr; 5345 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) }; 5346 int vers_size = thunk_type_size(arg_type, 0); 5347 5348 while (1) { 5349 uint32_t next = vers->next; 5350 if (next) { 5351 vers->next = vers_size + (strlen(vers->name) + 1); 5352 } 5353 if (remaining_data < vers->next) { 5354 host_dm->flags |= DM_BUFFER_FULL_FLAG; 5355 break; 5356 } 5357 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET); 5358 strcpy(cur_data + vers_size, vers->name); 5359 cur_data += vers->next; 5360 remaining_data -= vers->next; 5361 if (!next) { 5362 break; 5363 } 5364 vers = (void*)vers + next; 5365 } 5366 break; 5367 } 5368 default: 5369 unlock_user(argptr, guest_data, 0); 5370 ret = -TARGET_EINVAL; 5371 goto out; 5372 } 5373 unlock_user(argptr, guest_data, guest_data_size); 5374 5375 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5376 if (!argptr) { 5377 ret = -TARGET_EFAULT; 5378 goto out; 5379 } 5380 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 5381 unlock_user(argptr, arg, target_size); 5382 } 5383 out: 5384 g_free(big_buf); 5385 return ret; 5386 } 5387 5388 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, 5389 int cmd, abi_long arg) 5390 { 5391 void *argptr; 5392 int target_size; 5393 const argtype *arg_type = ie->arg_type; 5394 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) }; 5395 abi_long ret; 5396 5397 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp; 5398 struct blkpg_partition host_part; 5399 5400 /* Read and convert blkpg */ 5401 arg_type++; 5402 target_size = thunk_type_size(arg_type, 0); 5403 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5404 if (!argptr) { 5405 ret = -TARGET_EFAULT; 5406 goto out; 5407 } 5408 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5409 unlock_user(argptr, arg, 0); 5410 5411 switch (host_blkpg->op) { 5412 case BLKPG_ADD_PARTITION: 5413 case BLKPG_DEL_PARTITION: 5414 /* payload is struct blkpg_partition */ 5415 break; 5416 default: 5417 /* Unknown opcode */ 5418 ret = -TARGET_EINVAL; 5419 goto out; 5420 } 5421 5422 /* Read and convert blkpg->data */ 5423 arg = (abi_long)(uintptr_t)host_blkpg->data; 5424 target_size = thunk_type_size(part_arg_type, 0); 5425 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5426 if (!argptr) { 5427 ret = -TARGET_EFAULT; 5428 goto out; 5429 } 5430 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST); 5431 unlock_user(argptr, arg, 0); 5432 5433 /* Swizzle the data pointer to our local copy and call! */ 5434 host_blkpg->data = &host_part; 5435 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg)); 5436 5437 out: 5438 return ret; 5439 } 5440 5441 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp, 5442 int fd, int cmd, abi_long arg) 5443 { 5444 const argtype *arg_type = ie->arg_type; 5445 const StructEntry *se; 5446 const argtype *field_types; 5447 const int *dst_offsets, *src_offsets; 5448 int target_size; 5449 void *argptr; 5450 abi_ulong *target_rt_dev_ptr = NULL; 5451 unsigned long *host_rt_dev_ptr = NULL; 5452 abi_long ret; 5453 int i; 5454 5455 assert(ie->access == IOC_W); 5456 assert(*arg_type == TYPE_PTR); 5457 arg_type++; 5458 assert(*arg_type == TYPE_STRUCT); 5459 target_size = thunk_type_size(arg_type, 0); 5460 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5461 if (!argptr) { 5462 return -TARGET_EFAULT; 5463 } 5464 arg_type++; 5465 assert(*arg_type == (int)STRUCT_rtentry); 5466 se = struct_entries + *arg_type++; 5467 assert(se->convert[0] == NULL); 5468 /* convert struct here to be able to catch rt_dev string */ 5469 field_types = se->field_types; 5470 dst_offsets = se->field_offsets[THUNK_HOST]; 5471 src_offsets = se->field_offsets[THUNK_TARGET]; 5472 for (i = 0; i < se->nb_fields; i++) { 5473 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) { 5474 assert(*field_types == TYPE_PTRVOID); 5475 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]); 5476 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]); 5477 if (*target_rt_dev_ptr != 0) { 5478 *host_rt_dev_ptr = (unsigned long)lock_user_string( 5479 tswapal(*target_rt_dev_ptr)); 5480 if (!*host_rt_dev_ptr) { 5481 unlock_user(argptr, arg, 0); 5482 return -TARGET_EFAULT; 5483 } 5484 } else { 5485 *host_rt_dev_ptr = 0; 5486 } 5487 field_types++; 5488 continue; 5489 } 5490 field_types = thunk_convert(buf_temp + dst_offsets[i], 5491 argptr + src_offsets[i], 5492 field_types, THUNK_HOST); 5493 } 5494 unlock_user(argptr, arg, 0); 5495 5496 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5497 5498 assert(host_rt_dev_ptr != NULL); 5499 assert(target_rt_dev_ptr != NULL); 5500 if (*host_rt_dev_ptr != 0) { 5501 unlock_user((void *)*host_rt_dev_ptr, 5502 *target_rt_dev_ptr, 0); 5503 } 5504 return ret; 5505 } 5506 5507 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp, 5508 int fd, int cmd, abi_long arg) 5509 { 5510 int sig = target_to_host_signal(arg); 5511 return get_errno(safe_ioctl(fd, ie->host_cmd, sig)); 5512 } 5513 5514 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp, 5515 int fd, int cmd, abi_long arg) 5516 { 5517 struct timeval tv; 5518 abi_long ret; 5519 5520 ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv)); 5521 if (is_error(ret)) { 5522 return ret; 5523 } 5524 5525 if (cmd == (int)TARGET_SIOCGSTAMP_OLD) { 5526 if (copy_to_user_timeval(arg, &tv)) { 5527 return -TARGET_EFAULT; 5528 } 5529 } else { 5530 if (copy_to_user_timeval64(arg, &tv)) { 5531 return -TARGET_EFAULT; 5532 } 5533 } 5534 5535 return ret; 5536 } 5537 5538 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp, 5539 int fd, int cmd, abi_long arg) 5540 { 5541 struct timespec ts; 5542 abi_long ret; 5543 5544 ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts)); 5545 if (is_error(ret)) { 5546 return ret; 5547 } 5548 5549 if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) { 5550 if (host_to_target_timespec(arg, &ts)) { 5551 return -TARGET_EFAULT; 5552 } 5553 } else{ 5554 if (host_to_target_timespec64(arg, &ts)) { 5555 return -TARGET_EFAULT; 5556 } 5557 } 5558 5559 return ret; 5560 } 5561 5562 #ifdef TIOCGPTPEER 5563 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp, 5564 int fd, int cmd, abi_long arg) 5565 { 5566 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl); 5567 return get_errno(safe_ioctl(fd, ie->host_cmd, flags)); 5568 } 5569 #endif 5570 5571 #ifdef HAVE_DRM_H 5572 5573 static void unlock_drm_version(struct drm_version *host_ver, 5574 struct target_drm_version *target_ver, 5575 bool copy) 5576 { 5577 unlock_user(host_ver->name, target_ver->name, 5578 copy ? host_ver->name_len : 0); 5579 unlock_user(host_ver->date, target_ver->date, 5580 copy ? host_ver->date_len : 0); 5581 unlock_user(host_ver->desc, target_ver->desc, 5582 copy ? host_ver->desc_len : 0); 5583 } 5584 5585 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver, 5586 struct target_drm_version *target_ver) 5587 { 5588 memset(host_ver, 0, sizeof(*host_ver)); 5589 5590 __get_user(host_ver->name_len, &target_ver->name_len); 5591 if (host_ver->name_len) { 5592 host_ver->name = lock_user(VERIFY_WRITE, target_ver->name, 5593 target_ver->name_len, 0); 5594 if (!host_ver->name) { 5595 return -EFAULT; 5596 } 5597 } 5598 5599 __get_user(host_ver->date_len, &target_ver->date_len); 5600 if (host_ver->date_len) { 5601 host_ver->date = lock_user(VERIFY_WRITE, target_ver->date, 5602 target_ver->date_len, 0); 5603 if (!host_ver->date) { 5604 goto err; 5605 } 5606 } 5607 5608 __get_user(host_ver->desc_len, &target_ver->desc_len); 5609 if (host_ver->desc_len) { 5610 host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc, 5611 target_ver->desc_len, 0); 5612 if (!host_ver->desc) { 5613 goto err; 5614 } 5615 } 5616 5617 return 0; 5618 err: 5619 unlock_drm_version(host_ver, target_ver, false); 5620 return -EFAULT; 5621 } 5622 5623 static inline void host_to_target_drmversion( 5624 struct target_drm_version *target_ver, 5625 struct drm_version *host_ver) 5626 { 5627 __put_user(host_ver->version_major, &target_ver->version_major); 5628 __put_user(host_ver->version_minor, &target_ver->version_minor); 5629 __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel); 5630 __put_user(host_ver->name_len, &target_ver->name_len); 5631 __put_user(host_ver->date_len, &target_ver->date_len); 5632 __put_user(host_ver->desc_len, &target_ver->desc_len); 5633 unlock_drm_version(host_ver, target_ver, true); 5634 } 5635 5636 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp, 5637 int fd, int cmd, abi_long arg) 5638 { 5639 struct drm_version *ver; 5640 struct target_drm_version *target_ver; 5641 abi_long ret; 5642 5643 switch (ie->host_cmd) { 5644 case DRM_IOCTL_VERSION: 5645 if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) { 5646 return -TARGET_EFAULT; 5647 } 5648 ver = (struct drm_version *)buf_temp; 5649 ret = target_to_host_drmversion(ver, target_ver); 5650 if (!is_error(ret)) { 5651 ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver)); 5652 if (is_error(ret)) { 5653 unlock_drm_version(ver, target_ver, false); 5654 } else { 5655 host_to_target_drmversion(target_ver, ver); 5656 } 5657 } 5658 unlock_user_struct(target_ver, arg, 0); 5659 return ret; 5660 } 5661 return -TARGET_ENOSYS; 5662 } 5663 5664 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie, 5665 struct drm_i915_getparam *gparam, 5666 int fd, abi_long arg) 5667 { 5668 abi_long ret; 5669 int value; 5670 struct target_drm_i915_getparam *target_gparam; 5671 5672 if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) { 5673 return -TARGET_EFAULT; 5674 } 5675 5676 __get_user(gparam->param, &target_gparam->param); 5677 gparam->value = &value; 5678 ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam)); 5679 put_user_s32(value, target_gparam->value); 5680 5681 unlock_user_struct(target_gparam, arg, 0); 5682 return ret; 5683 } 5684 5685 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp, 5686 int fd, int cmd, abi_long arg) 5687 { 5688 switch (ie->host_cmd) { 5689 case DRM_IOCTL_I915_GETPARAM: 5690 return do_ioctl_drm_i915_getparam(ie, 5691 (struct drm_i915_getparam *)buf_temp, 5692 fd, arg); 5693 default: 5694 return -TARGET_ENOSYS; 5695 } 5696 } 5697 5698 #endif 5699 5700 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp, 5701 int fd, int cmd, abi_long arg) 5702 { 5703 struct tun_filter *filter = (struct tun_filter *)buf_temp; 5704 struct tun_filter *target_filter; 5705 char *target_addr; 5706 5707 assert(ie->access == IOC_W); 5708 5709 target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1); 5710 if (!target_filter) { 5711 return -TARGET_EFAULT; 5712 } 5713 filter->flags = tswap16(target_filter->flags); 5714 filter->count = tswap16(target_filter->count); 5715 unlock_user(target_filter, arg, 0); 5716 5717 if (filter->count) { 5718 if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN > 5719 MAX_STRUCT_SIZE) { 5720 return -TARGET_EFAULT; 5721 } 5722 5723 target_addr = lock_user(VERIFY_READ, 5724 arg + offsetof(struct tun_filter, addr), 5725 filter->count * ETH_ALEN, 1); 5726 if (!target_addr) { 5727 return -TARGET_EFAULT; 5728 } 5729 memcpy(filter->addr, target_addr, filter->count * ETH_ALEN); 5730 unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0); 5731 } 5732 5733 return get_errno(safe_ioctl(fd, ie->host_cmd, filter)); 5734 } 5735 5736 IOCTLEntry ioctl_entries[] = { 5737 #define IOCTL(cmd, access, ...) \ 5738 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } }, 5739 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \ 5740 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } }, 5741 #define IOCTL_IGNORE(cmd) \ 5742 { TARGET_ ## cmd, 0, #cmd }, 5743 #include "ioctls.h" 5744 { 0, 0, }, 5745 }; 5746 5747 /* ??? Implement proper locking for ioctls. */ 5748 /* do_ioctl() Must return target values and target errnos. */ 5749 static abi_long do_ioctl(int fd, int cmd, abi_long arg) 5750 { 5751 const IOCTLEntry *ie; 5752 const argtype *arg_type; 5753 abi_long ret; 5754 uint8_t buf_temp[MAX_STRUCT_SIZE]; 5755 int target_size; 5756 void *argptr; 5757 5758 ie = ioctl_entries; 5759 for(;;) { 5760 if (ie->target_cmd == 0) { 5761 qemu_log_mask( 5762 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd); 5763 return -TARGET_ENOSYS; 5764 } 5765 if (ie->target_cmd == cmd) 5766 break; 5767 ie++; 5768 } 5769 arg_type = ie->arg_type; 5770 if (ie->do_ioctl) { 5771 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg); 5772 } else if (!ie->host_cmd) { 5773 /* Some architectures define BSD ioctls in their headers 5774 that are not implemented in Linux. */ 5775 return -TARGET_ENOSYS; 5776 } 5777 5778 switch(arg_type[0]) { 5779 case TYPE_NULL: 5780 /* no argument */ 5781 ret = get_errno(safe_ioctl(fd, ie->host_cmd)); 5782 break; 5783 case TYPE_PTRVOID: 5784 case TYPE_INT: 5785 case TYPE_LONG: 5786 case TYPE_ULONG: 5787 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg)); 5788 break; 5789 case TYPE_PTR: 5790 arg_type++; 5791 target_size = thunk_type_size(arg_type, 0); 5792 switch(ie->access) { 5793 case IOC_R: 5794 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5795 if (!is_error(ret)) { 5796 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5797 if (!argptr) 5798 return -TARGET_EFAULT; 5799 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 5800 unlock_user(argptr, arg, target_size); 5801 } 5802 break; 5803 case IOC_W: 5804 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5805 if (!argptr) 5806 return -TARGET_EFAULT; 5807 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5808 unlock_user(argptr, arg, 0); 5809 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5810 break; 5811 default: 5812 case IOC_RW: 5813 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5814 if (!argptr) 5815 return -TARGET_EFAULT; 5816 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5817 unlock_user(argptr, arg, 0); 5818 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5819 if (!is_error(ret)) { 5820 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5821 if (!argptr) 5822 return -TARGET_EFAULT; 5823 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 5824 unlock_user(argptr, arg, target_size); 5825 } 5826 break; 5827 } 5828 break; 5829 default: 5830 qemu_log_mask(LOG_UNIMP, 5831 "Unsupported ioctl type: cmd=0x%04lx type=%d\n", 5832 (long)cmd, arg_type[0]); 5833 ret = -TARGET_ENOSYS; 5834 break; 5835 } 5836 return ret; 5837 } 5838 5839 static const bitmask_transtbl iflag_tbl[] = { 5840 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK }, 5841 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT }, 5842 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR }, 5843 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK }, 5844 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK }, 5845 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP }, 5846 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR }, 5847 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR }, 5848 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL }, 5849 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC }, 5850 { TARGET_IXON, TARGET_IXON, IXON, IXON }, 5851 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY }, 5852 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF }, 5853 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL }, 5854 { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8}, 5855 { 0, 0, 0, 0 } 5856 }; 5857 5858 static const bitmask_transtbl oflag_tbl[] = { 5859 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST }, 5860 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC }, 5861 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR }, 5862 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL }, 5863 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR }, 5864 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET }, 5865 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL }, 5866 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL }, 5867 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 }, 5868 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 }, 5869 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 }, 5870 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 }, 5871 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 }, 5872 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 }, 5873 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 }, 5874 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 }, 5875 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 }, 5876 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 }, 5877 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 }, 5878 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 }, 5879 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 }, 5880 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 }, 5881 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 }, 5882 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 }, 5883 { 0, 0, 0, 0 } 5884 }; 5885 5886 static const bitmask_transtbl cflag_tbl[] = { 5887 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 }, 5888 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 }, 5889 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 }, 5890 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 }, 5891 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 }, 5892 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 }, 5893 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 }, 5894 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 }, 5895 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 }, 5896 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 }, 5897 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 }, 5898 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 }, 5899 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 }, 5900 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 }, 5901 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 }, 5902 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 }, 5903 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 }, 5904 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 }, 5905 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 }, 5906 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 }, 5907 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 }, 5908 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 }, 5909 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 }, 5910 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 }, 5911 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB }, 5912 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD }, 5913 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB }, 5914 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD }, 5915 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL }, 5916 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL }, 5917 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS }, 5918 { 0, 0, 0, 0 } 5919 }; 5920 5921 static const bitmask_transtbl lflag_tbl[] = { 5922 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG }, 5923 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON }, 5924 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE }, 5925 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO }, 5926 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE }, 5927 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK }, 5928 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL }, 5929 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH }, 5930 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP }, 5931 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL }, 5932 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT }, 5933 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE }, 5934 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO }, 5935 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN }, 5936 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN }, 5937 { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC}, 5938 { 0, 0, 0, 0 } 5939 }; 5940 5941 static void target_to_host_termios (void *dst, const void *src) 5942 { 5943 struct host_termios *host = dst; 5944 const struct target_termios *target = src; 5945 5946 host->c_iflag = 5947 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl); 5948 host->c_oflag = 5949 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl); 5950 host->c_cflag = 5951 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl); 5952 host->c_lflag = 5953 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl); 5954 host->c_line = target->c_line; 5955 5956 memset(host->c_cc, 0, sizeof(host->c_cc)); 5957 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR]; 5958 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT]; 5959 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE]; 5960 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL]; 5961 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF]; 5962 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME]; 5963 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN]; 5964 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC]; 5965 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART]; 5966 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP]; 5967 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP]; 5968 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL]; 5969 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT]; 5970 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD]; 5971 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE]; 5972 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT]; 5973 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2]; 5974 } 5975 5976 static void host_to_target_termios (void *dst, const void *src) 5977 { 5978 struct target_termios *target = dst; 5979 const struct host_termios *host = src; 5980 5981 target->c_iflag = 5982 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl)); 5983 target->c_oflag = 5984 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl)); 5985 target->c_cflag = 5986 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl)); 5987 target->c_lflag = 5988 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl)); 5989 target->c_line = host->c_line; 5990 5991 memset(target->c_cc, 0, sizeof(target->c_cc)); 5992 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR]; 5993 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT]; 5994 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE]; 5995 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL]; 5996 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF]; 5997 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME]; 5998 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN]; 5999 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC]; 6000 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART]; 6001 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP]; 6002 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP]; 6003 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL]; 6004 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT]; 6005 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD]; 6006 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE]; 6007 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT]; 6008 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2]; 6009 } 6010 6011 static const StructEntry struct_termios_def = { 6012 .convert = { host_to_target_termios, target_to_host_termios }, 6013 .size = { sizeof(struct target_termios), sizeof(struct host_termios) }, 6014 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) }, 6015 .print = print_termios, 6016 }; 6017 6018 static const bitmask_transtbl mmap_flags_tbl[] = { 6019 #ifdef TARGET_MAP_32BIT 6020 { TARGET_MAP_32BIT, TARGET_MAP_32BIT, MAP_32BIT, MAP_32BIT }, 6021 #endif 6022 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED }, 6023 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE }, 6024 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED }, 6025 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, 6026 MAP_ANONYMOUS, MAP_ANONYMOUS }, 6027 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, 6028 MAP_GROWSDOWN, MAP_GROWSDOWN }, 6029 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, 6030 MAP_DENYWRITE, MAP_DENYWRITE }, 6031 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, 6032 MAP_EXECUTABLE, MAP_EXECUTABLE }, 6033 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED }, 6034 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, 6035 MAP_NORESERVE, MAP_NORESERVE }, 6036 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB }, 6037 /* MAP_STACK had been ignored by the kernel for quite some time. 6038 Recognize it for the target insofar as we do not want to pass 6039 it through to the host. */ 6040 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 }, 6041 { 0, 0, 0, 0 } 6042 }; 6043 6044 /* 6045 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64) 6046 * TARGET_I386 is defined if TARGET_X86_64 is defined 6047 */ 6048 #if defined(TARGET_I386) 6049 6050 /* NOTE: there is really one LDT for all the threads */ 6051 static uint8_t *ldt_table; 6052 6053 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount) 6054 { 6055 int size; 6056 void *p; 6057 6058 if (!ldt_table) 6059 return 0; 6060 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE; 6061 if (size > bytecount) 6062 size = bytecount; 6063 p = lock_user(VERIFY_WRITE, ptr, size, 0); 6064 if (!p) 6065 return -TARGET_EFAULT; 6066 /* ??? Should this by byteswapped? */ 6067 memcpy(p, ldt_table, size); 6068 unlock_user(p, ptr, size); 6069 return size; 6070 } 6071 6072 /* XXX: add locking support */ 6073 static abi_long write_ldt(CPUX86State *env, 6074 abi_ulong ptr, unsigned long bytecount, int oldmode) 6075 { 6076 struct target_modify_ldt_ldt_s ldt_info; 6077 struct target_modify_ldt_ldt_s *target_ldt_info; 6078 int seg_32bit, contents, read_exec_only, limit_in_pages; 6079 int seg_not_present, useable, lm; 6080 uint32_t *lp, entry_1, entry_2; 6081 6082 if (bytecount != sizeof(ldt_info)) 6083 return -TARGET_EINVAL; 6084 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1)) 6085 return -TARGET_EFAULT; 6086 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 6087 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 6088 ldt_info.limit = tswap32(target_ldt_info->limit); 6089 ldt_info.flags = tswap32(target_ldt_info->flags); 6090 unlock_user_struct(target_ldt_info, ptr, 0); 6091 6092 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES) 6093 return -TARGET_EINVAL; 6094 seg_32bit = ldt_info.flags & 1; 6095 contents = (ldt_info.flags >> 1) & 3; 6096 read_exec_only = (ldt_info.flags >> 3) & 1; 6097 limit_in_pages = (ldt_info.flags >> 4) & 1; 6098 seg_not_present = (ldt_info.flags >> 5) & 1; 6099 useable = (ldt_info.flags >> 6) & 1; 6100 #ifdef TARGET_ABI32 6101 lm = 0; 6102 #else 6103 lm = (ldt_info.flags >> 7) & 1; 6104 #endif 6105 if (contents == 3) { 6106 if (oldmode) 6107 return -TARGET_EINVAL; 6108 if (seg_not_present == 0) 6109 return -TARGET_EINVAL; 6110 } 6111 /* allocate the LDT */ 6112 if (!ldt_table) { 6113 env->ldt.base = target_mmap(0, 6114 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE, 6115 PROT_READ|PROT_WRITE, 6116 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 6117 if (env->ldt.base == -1) 6118 return -TARGET_ENOMEM; 6119 memset(g2h_untagged(env->ldt.base), 0, 6120 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE); 6121 env->ldt.limit = 0xffff; 6122 ldt_table = g2h_untagged(env->ldt.base); 6123 } 6124 6125 /* NOTE: same code as Linux kernel */ 6126 /* Allow LDTs to be cleared by the user. */ 6127 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 6128 if (oldmode || 6129 (contents == 0 && 6130 read_exec_only == 1 && 6131 seg_32bit == 0 && 6132 limit_in_pages == 0 && 6133 seg_not_present == 1 && 6134 useable == 0 )) { 6135 entry_1 = 0; 6136 entry_2 = 0; 6137 goto install; 6138 } 6139 } 6140 6141 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 6142 (ldt_info.limit & 0x0ffff); 6143 entry_2 = (ldt_info.base_addr & 0xff000000) | 6144 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 6145 (ldt_info.limit & 0xf0000) | 6146 ((read_exec_only ^ 1) << 9) | 6147 (contents << 10) | 6148 ((seg_not_present ^ 1) << 15) | 6149 (seg_32bit << 22) | 6150 (limit_in_pages << 23) | 6151 (lm << 21) | 6152 0x7000; 6153 if (!oldmode) 6154 entry_2 |= (useable << 20); 6155 6156 /* Install the new entry ... */ 6157 install: 6158 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3)); 6159 lp[0] = tswap32(entry_1); 6160 lp[1] = tswap32(entry_2); 6161 return 0; 6162 } 6163 6164 /* specific and weird i386 syscalls */ 6165 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr, 6166 unsigned long bytecount) 6167 { 6168 abi_long ret; 6169 6170 switch (func) { 6171 case 0: 6172 ret = read_ldt(ptr, bytecount); 6173 break; 6174 case 1: 6175 ret = write_ldt(env, ptr, bytecount, 1); 6176 break; 6177 case 0x11: 6178 ret = write_ldt(env, ptr, bytecount, 0); 6179 break; 6180 default: 6181 ret = -TARGET_ENOSYS; 6182 break; 6183 } 6184 return ret; 6185 } 6186 6187 #if defined(TARGET_ABI32) 6188 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr) 6189 { 6190 uint64_t *gdt_table = g2h_untagged(env->gdt.base); 6191 struct target_modify_ldt_ldt_s ldt_info; 6192 struct target_modify_ldt_ldt_s *target_ldt_info; 6193 int seg_32bit, contents, read_exec_only, limit_in_pages; 6194 int seg_not_present, useable, lm; 6195 uint32_t *lp, entry_1, entry_2; 6196 int i; 6197 6198 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 6199 if (!target_ldt_info) 6200 return -TARGET_EFAULT; 6201 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 6202 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 6203 ldt_info.limit = tswap32(target_ldt_info->limit); 6204 ldt_info.flags = tswap32(target_ldt_info->flags); 6205 if (ldt_info.entry_number == -1) { 6206 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) { 6207 if (gdt_table[i] == 0) { 6208 ldt_info.entry_number = i; 6209 target_ldt_info->entry_number = tswap32(i); 6210 break; 6211 } 6212 } 6213 } 6214 unlock_user_struct(target_ldt_info, ptr, 1); 6215 6216 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN || 6217 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX) 6218 return -TARGET_EINVAL; 6219 seg_32bit = ldt_info.flags & 1; 6220 contents = (ldt_info.flags >> 1) & 3; 6221 read_exec_only = (ldt_info.flags >> 3) & 1; 6222 limit_in_pages = (ldt_info.flags >> 4) & 1; 6223 seg_not_present = (ldt_info.flags >> 5) & 1; 6224 useable = (ldt_info.flags >> 6) & 1; 6225 #ifdef TARGET_ABI32 6226 lm = 0; 6227 #else 6228 lm = (ldt_info.flags >> 7) & 1; 6229 #endif 6230 6231 if (contents == 3) { 6232 if (seg_not_present == 0) 6233 return -TARGET_EINVAL; 6234 } 6235 6236 /* NOTE: same code as Linux kernel */ 6237 /* Allow LDTs to be cleared by the user. */ 6238 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 6239 if ((contents == 0 && 6240 read_exec_only == 1 && 6241 seg_32bit == 0 && 6242 limit_in_pages == 0 && 6243 seg_not_present == 1 && 6244 useable == 0 )) { 6245 entry_1 = 0; 6246 entry_2 = 0; 6247 goto install; 6248 } 6249 } 6250 6251 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 6252 (ldt_info.limit & 0x0ffff); 6253 entry_2 = (ldt_info.base_addr & 0xff000000) | 6254 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 6255 (ldt_info.limit & 0xf0000) | 6256 ((read_exec_only ^ 1) << 9) | 6257 (contents << 10) | 6258 ((seg_not_present ^ 1) << 15) | 6259 (seg_32bit << 22) | 6260 (limit_in_pages << 23) | 6261 (useable << 20) | 6262 (lm << 21) | 6263 0x7000; 6264 6265 /* Install the new entry ... */ 6266 install: 6267 lp = (uint32_t *)(gdt_table + ldt_info.entry_number); 6268 lp[0] = tswap32(entry_1); 6269 lp[1] = tswap32(entry_2); 6270 return 0; 6271 } 6272 6273 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr) 6274 { 6275 struct target_modify_ldt_ldt_s *target_ldt_info; 6276 uint64_t *gdt_table = g2h_untagged(env->gdt.base); 6277 uint32_t base_addr, limit, flags; 6278 int seg_32bit, contents, read_exec_only, limit_in_pages, idx; 6279 int seg_not_present, useable, lm; 6280 uint32_t *lp, entry_1, entry_2; 6281 6282 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 6283 if (!target_ldt_info) 6284 return -TARGET_EFAULT; 6285 idx = tswap32(target_ldt_info->entry_number); 6286 if (idx < TARGET_GDT_ENTRY_TLS_MIN || 6287 idx > TARGET_GDT_ENTRY_TLS_MAX) { 6288 unlock_user_struct(target_ldt_info, ptr, 1); 6289 return -TARGET_EINVAL; 6290 } 6291 lp = (uint32_t *)(gdt_table + idx); 6292 entry_1 = tswap32(lp[0]); 6293 entry_2 = tswap32(lp[1]); 6294 6295 read_exec_only = ((entry_2 >> 9) & 1) ^ 1; 6296 contents = (entry_2 >> 10) & 3; 6297 seg_not_present = ((entry_2 >> 15) & 1) ^ 1; 6298 seg_32bit = (entry_2 >> 22) & 1; 6299 limit_in_pages = (entry_2 >> 23) & 1; 6300 useable = (entry_2 >> 20) & 1; 6301 #ifdef TARGET_ABI32 6302 lm = 0; 6303 #else 6304 lm = (entry_2 >> 21) & 1; 6305 #endif 6306 flags = (seg_32bit << 0) | (contents << 1) | 6307 (read_exec_only << 3) | (limit_in_pages << 4) | 6308 (seg_not_present << 5) | (useable << 6) | (lm << 7); 6309 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000); 6310 base_addr = (entry_1 >> 16) | 6311 (entry_2 & 0xff000000) | 6312 ((entry_2 & 0xff) << 16); 6313 target_ldt_info->base_addr = tswapal(base_addr); 6314 target_ldt_info->limit = tswap32(limit); 6315 target_ldt_info->flags = tswap32(flags); 6316 unlock_user_struct(target_ldt_info, ptr, 1); 6317 return 0; 6318 } 6319 6320 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) 6321 { 6322 return -TARGET_ENOSYS; 6323 } 6324 #else 6325 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) 6326 { 6327 abi_long ret = 0; 6328 abi_ulong val; 6329 int idx; 6330 6331 switch(code) { 6332 case TARGET_ARCH_SET_GS: 6333 case TARGET_ARCH_SET_FS: 6334 if (code == TARGET_ARCH_SET_GS) 6335 idx = R_GS; 6336 else 6337 idx = R_FS; 6338 cpu_x86_load_seg(env, idx, 0); 6339 env->segs[idx].base = addr; 6340 break; 6341 case TARGET_ARCH_GET_GS: 6342 case TARGET_ARCH_GET_FS: 6343 if (code == TARGET_ARCH_GET_GS) 6344 idx = R_GS; 6345 else 6346 idx = R_FS; 6347 val = env->segs[idx].base; 6348 if (put_user(val, addr, abi_ulong)) 6349 ret = -TARGET_EFAULT; 6350 break; 6351 default: 6352 ret = -TARGET_EINVAL; 6353 break; 6354 } 6355 return ret; 6356 } 6357 #endif /* defined(TARGET_ABI32 */ 6358 #endif /* defined(TARGET_I386) */ 6359 6360 /* 6361 * These constants are generic. Supply any that are missing from the host. 6362 */ 6363 #ifndef PR_SET_NAME 6364 # define PR_SET_NAME 15 6365 # define PR_GET_NAME 16 6366 #endif 6367 #ifndef PR_SET_FP_MODE 6368 # define PR_SET_FP_MODE 45 6369 # define PR_GET_FP_MODE 46 6370 # define PR_FP_MODE_FR (1 << 0) 6371 # define PR_FP_MODE_FRE (1 << 1) 6372 #endif 6373 #ifndef PR_SVE_SET_VL 6374 # define PR_SVE_SET_VL 50 6375 # define PR_SVE_GET_VL 51 6376 # define PR_SVE_VL_LEN_MASK 0xffff 6377 # define PR_SVE_VL_INHERIT (1 << 17) 6378 #endif 6379 #ifndef PR_PAC_RESET_KEYS 6380 # define PR_PAC_RESET_KEYS 54 6381 # define PR_PAC_APIAKEY (1 << 0) 6382 # define PR_PAC_APIBKEY (1 << 1) 6383 # define PR_PAC_APDAKEY (1 << 2) 6384 # define PR_PAC_APDBKEY (1 << 3) 6385 # define PR_PAC_APGAKEY (1 << 4) 6386 #endif 6387 #ifndef PR_SET_TAGGED_ADDR_CTRL 6388 # define PR_SET_TAGGED_ADDR_CTRL 55 6389 # define PR_GET_TAGGED_ADDR_CTRL 56 6390 # define PR_TAGGED_ADDR_ENABLE (1UL << 0) 6391 #endif 6392 #ifndef PR_MTE_TCF_SHIFT 6393 # define PR_MTE_TCF_SHIFT 1 6394 # define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT) 6395 # define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT) 6396 # define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT) 6397 # define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT) 6398 # define PR_MTE_TAG_SHIFT 3 6399 # define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT) 6400 #endif 6401 #ifndef PR_SET_IO_FLUSHER 6402 # define PR_SET_IO_FLUSHER 57 6403 # define PR_GET_IO_FLUSHER 58 6404 #endif 6405 #ifndef PR_SET_SYSCALL_USER_DISPATCH 6406 # define PR_SET_SYSCALL_USER_DISPATCH 59 6407 #endif 6408 #ifndef PR_SME_SET_VL 6409 # define PR_SME_SET_VL 63 6410 # define PR_SME_GET_VL 64 6411 # define PR_SME_VL_LEN_MASK 0xffff 6412 # define PR_SME_VL_INHERIT (1 << 17) 6413 #endif 6414 6415 #include "target_prctl.h" 6416 6417 static abi_long do_prctl_inval0(CPUArchState *env) 6418 { 6419 return -TARGET_EINVAL; 6420 } 6421 6422 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2) 6423 { 6424 return -TARGET_EINVAL; 6425 } 6426 6427 #ifndef do_prctl_get_fp_mode 6428 #define do_prctl_get_fp_mode do_prctl_inval0 6429 #endif 6430 #ifndef do_prctl_set_fp_mode 6431 #define do_prctl_set_fp_mode do_prctl_inval1 6432 #endif 6433 #ifndef do_prctl_sve_get_vl 6434 #define do_prctl_sve_get_vl do_prctl_inval0 6435 #endif 6436 #ifndef do_prctl_sve_set_vl 6437 #define do_prctl_sve_set_vl do_prctl_inval1 6438 #endif 6439 #ifndef do_prctl_reset_keys 6440 #define do_prctl_reset_keys do_prctl_inval1 6441 #endif 6442 #ifndef do_prctl_set_tagged_addr_ctrl 6443 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1 6444 #endif 6445 #ifndef do_prctl_get_tagged_addr_ctrl 6446 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0 6447 #endif 6448 #ifndef do_prctl_get_unalign 6449 #define do_prctl_get_unalign do_prctl_inval1 6450 #endif 6451 #ifndef do_prctl_set_unalign 6452 #define do_prctl_set_unalign do_prctl_inval1 6453 #endif 6454 #ifndef do_prctl_sme_get_vl 6455 #define do_prctl_sme_get_vl do_prctl_inval0 6456 #endif 6457 #ifndef do_prctl_sme_set_vl 6458 #define do_prctl_sme_set_vl do_prctl_inval1 6459 #endif 6460 6461 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2, 6462 abi_long arg3, abi_long arg4, abi_long arg5) 6463 { 6464 abi_long ret; 6465 6466 switch (option) { 6467 case PR_GET_PDEATHSIG: 6468 { 6469 int deathsig; 6470 ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig, 6471 arg3, arg4, arg5)); 6472 if (!is_error(ret) && 6473 put_user_s32(host_to_target_signal(deathsig), arg2)) { 6474 return -TARGET_EFAULT; 6475 } 6476 return ret; 6477 } 6478 case PR_SET_PDEATHSIG: 6479 return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2), 6480 arg3, arg4, arg5)); 6481 case PR_GET_NAME: 6482 { 6483 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1); 6484 if (!name) { 6485 return -TARGET_EFAULT; 6486 } 6487 ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name, 6488 arg3, arg4, arg5)); 6489 unlock_user(name, arg2, 16); 6490 return ret; 6491 } 6492 case PR_SET_NAME: 6493 { 6494 void *name = lock_user(VERIFY_READ, arg2, 16, 1); 6495 if (!name) { 6496 return -TARGET_EFAULT; 6497 } 6498 ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name, 6499 arg3, arg4, arg5)); 6500 unlock_user(name, arg2, 0); 6501 return ret; 6502 } 6503 case PR_GET_FP_MODE: 6504 return do_prctl_get_fp_mode(env); 6505 case PR_SET_FP_MODE: 6506 return do_prctl_set_fp_mode(env, arg2); 6507 case PR_SVE_GET_VL: 6508 return do_prctl_sve_get_vl(env); 6509 case PR_SVE_SET_VL: 6510 return do_prctl_sve_set_vl(env, arg2); 6511 case PR_SME_GET_VL: 6512 return do_prctl_sme_get_vl(env); 6513 case PR_SME_SET_VL: 6514 return do_prctl_sme_set_vl(env, arg2); 6515 case PR_PAC_RESET_KEYS: 6516 if (arg3 || arg4 || arg5) { 6517 return -TARGET_EINVAL; 6518 } 6519 return do_prctl_reset_keys(env, arg2); 6520 case PR_SET_TAGGED_ADDR_CTRL: 6521 if (arg3 || arg4 || arg5) { 6522 return -TARGET_EINVAL; 6523 } 6524 return do_prctl_set_tagged_addr_ctrl(env, arg2); 6525 case PR_GET_TAGGED_ADDR_CTRL: 6526 if (arg2 || arg3 || arg4 || arg5) { 6527 return -TARGET_EINVAL; 6528 } 6529 return do_prctl_get_tagged_addr_ctrl(env); 6530 6531 case PR_GET_UNALIGN: 6532 return do_prctl_get_unalign(env, arg2); 6533 case PR_SET_UNALIGN: 6534 return do_prctl_set_unalign(env, arg2); 6535 6536 case PR_CAP_AMBIENT: 6537 case PR_CAPBSET_READ: 6538 case PR_CAPBSET_DROP: 6539 case PR_GET_DUMPABLE: 6540 case PR_SET_DUMPABLE: 6541 case PR_GET_KEEPCAPS: 6542 case PR_SET_KEEPCAPS: 6543 case PR_GET_SECUREBITS: 6544 case PR_SET_SECUREBITS: 6545 case PR_GET_TIMING: 6546 case PR_SET_TIMING: 6547 case PR_GET_TIMERSLACK: 6548 case PR_SET_TIMERSLACK: 6549 case PR_MCE_KILL: 6550 case PR_MCE_KILL_GET: 6551 case PR_GET_NO_NEW_PRIVS: 6552 case PR_SET_NO_NEW_PRIVS: 6553 case PR_GET_IO_FLUSHER: 6554 case PR_SET_IO_FLUSHER: 6555 /* Some prctl options have no pointer arguments and we can pass on. */ 6556 return get_errno(prctl(option, arg2, arg3, arg4, arg5)); 6557 6558 case PR_GET_CHILD_SUBREAPER: 6559 case PR_SET_CHILD_SUBREAPER: 6560 case PR_GET_SPECULATION_CTRL: 6561 case PR_SET_SPECULATION_CTRL: 6562 case PR_GET_TID_ADDRESS: 6563 /* TODO */ 6564 return -TARGET_EINVAL; 6565 6566 case PR_GET_FPEXC: 6567 case PR_SET_FPEXC: 6568 /* Was used for SPE on PowerPC. */ 6569 return -TARGET_EINVAL; 6570 6571 case PR_GET_ENDIAN: 6572 case PR_SET_ENDIAN: 6573 case PR_GET_FPEMU: 6574 case PR_SET_FPEMU: 6575 case PR_SET_MM: 6576 case PR_GET_SECCOMP: 6577 case PR_SET_SECCOMP: 6578 case PR_SET_SYSCALL_USER_DISPATCH: 6579 case PR_GET_THP_DISABLE: 6580 case PR_SET_THP_DISABLE: 6581 case PR_GET_TSC: 6582 case PR_SET_TSC: 6583 /* Disable to prevent the target disabling stuff we need. */ 6584 return -TARGET_EINVAL; 6585 6586 default: 6587 qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n", 6588 option); 6589 return -TARGET_EINVAL; 6590 } 6591 } 6592 6593 #define NEW_STACK_SIZE 0x40000 6594 6595 6596 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER; 6597 typedef struct { 6598 CPUArchState *env; 6599 pthread_mutex_t mutex; 6600 pthread_cond_t cond; 6601 pthread_t thread; 6602 uint32_t tid; 6603 abi_ulong child_tidptr; 6604 abi_ulong parent_tidptr; 6605 sigset_t sigmask; 6606 } new_thread_info; 6607 6608 static void *clone_func(void *arg) 6609 { 6610 new_thread_info *info = arg; 6611 CPUArchState *env; 6612 CPUState *cpu; 6613 TaskState *ts; 6614 6615 rcu_register_thread(); 6616 tcg_register_thread(); 6617 env = info->env; 6618 cpu = env_cpu(env); 6619 thread_cpu = cpu; 6620 ts = (TaskState *)cpu->opaque; 6621 info->tid = sys_gettid(); 6622 task_settid(ts); 6623 if (info->child_tidptr) 6624 put_user_u32(info->tid, info->child_tidptr); 6625 if (info->parent_tidptr) 6626 put_user_u32(info->tid, info->parent_tidptr); 6627 qemu_guest_random_seed_thread_part2(cpu->random_seed); 6628 /* Enable signals. */ 6629 sigprocmask(SIG_SETMASK, &info->sigmask, NULL); 6630 /* Signal to the parent that we're ready. */ 6631 pthread_mutex_lock(&info->mutex); 6632 pthread_cond_broadcast(&info->cond); 6633 pthread_mutex_unlock(&info->mutex); 6634 /* Wait until the parent has finished initializing the tls state. */ 6635 pthread_mutex_lock(&clone_lock); 6636 pthread_mutex_unlock(&clone_lock); 6637 cpu_loop(env); 6638 /* never exits */ 6639 return NULL; 6640 } 6641 6642 /* do_fork() Must return host values and target errnos (unlike most 6643 do_*() functions). */ 6644 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, 6645 abi_ulong parent_tidptr, target_ulong newtls, 6646 abi_ulong child_tidptr) 6647 { 6648 CPUState *cpu = env_cpu(env); 6649 int ret; 6650 TaskState *ts; 6651 CPUState *new_cpu; 6652 CPUArchState *new_env; 6653 sigset_t sigmask; 6654 6655 flags &= ~CLONE_IGNORED_FLAGS; 6656 6657 /* Emulate vfork() with fork() */ 6658 if (flags & CLONE_VFORK) 6659 flags &= ~(CLONE_VFORK | CLONE_VM); 6660 6661 if (flags & CLONE_VM) { 6662 TaskState *parent_ts = (TaskState *)cpu->opaque; 6663 new_thread_info info; 6664 pthread_attr_t attr; 6665 6666 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) || 6667 (flags & CLONE_INVALID_THREAD_FLAGS)) { 6668 return -TARGET_EINVAL; 6669 } 6670 6671 ts = g_new0(TaskState, 1); 6672 init_task_state(ts); 6673 6674 /* Grab a mutex so that thread setup appears atomic. */ 6675 pthread_mutex_lock(&clone_lock); 6676 6677 /* 6678 * If this is our first additional thread, we need to ensure we 6679 * generate code for parallel execution and flush old translations. 6680 * Do this now so that the copy gets CF_PARALLEL too. 6681 */ 6682 if (!(cpu->tcg_cflags & CF_PARALLEL)) { 6683 cpu->tcg_cflags |= CF_PARALLEL; 6684 tb_flush(cpu); 6685 } 6686 6687 /* we create a new CPU instance. */ 6688 new_env = cpu_copy(env); 6689 /* Init regs that differ from the parent. */ 6690 cpu_clone_regs_child(new_env, newsp, flags); 6691 cpu_clone_regs_parent(env, flags); 6692 new_cpu = env_cpu(new_env); 6693 new_cpu->opaque = ts; 6694 ts->bprm = parent_ts->bprm; 6695 ts->info = parent_ts->info; 6696 ts->signal_mask = parent_ts->signal_mask; 6697 6698 if (flags & CLONE_CHILD_CLEARTID) { 6699 ts->child_tidptr = child_tidptr; 6700 } 6701 6702 if (flags & CLONE_SETTLS) { 6703 cpu_set_tls (new_env, newtls); 6704 } 6705 6706 memset(&info, 0, sizeof(info)); 6707 pthread_mutex_init(&info.mutex, NULL); 6708 pthread_mutex_lock(&info.mutex); 6709 pthread_cond_init(&info.cond, NULL); 6710 info.env = new_env; 6711 if (flags & CLONE_CHILD_SETTID) { 6712 info.child_tidptr = child_tidptr; 6713 } 6714 if (flags & CLONE_PARENT_SETTID) { 6715 info.parent_tidptr = parent_tidptr; 6716 } 6717 6718 ret = pthread_attr_init(&attr); 6719 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE); 6720 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 6721 /* It is not safe to deliver signals until the child has finished 6722 initializing, so temporarily block all signals. */ 6723 sigfillset(&sigmask); 6724 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask); 6725 cpu->random_seed = qemu_guest_random_seed_thread_part1(); 6726 6727 ret = pthread_create(&info.thread, &attr, clone_func, &info); 6728 /* TODO: Free new CPU state if thread creation failed. */ 6729 6730 sigprocmask(SIG_SETMASK, &info.sigmask, NULL); 6731 pthread_attr_destroy(&attr); 6732 if (ret == 0) { 6733 /* Wait for the child to initialize. */ 6734 pthread_cond_wait(&info.cond, &info.mutex); 6735 ret = info.tid; 6736 } else { 6737 ret = -1; 6738 } 6739 pthread_mutex_unlock(&info.mutex); 6740 pthread_cond_destroy(&info.cond); 6741 pthread_mutex_destroy(&info.mutex); 6742 pthread_mutex_unlock(&clone_lock); 6743 } else { 6744 /* if no CLONE_VM, we consider it is a fork */ 6745 if (flags & CLONE_INVALID_FORK_FLAGS) { 6746 return -TARGET_EINVAL; 6747 } 6748 6749 /* We can't support custom termination signals */ 6750 if ((flags & CSIGNAL) != TARGET_SIGCHLD) { 6751 return -TARGET_EINVAL; 6752 } 6753 6754 if (block_signals()) { 6755 return -QEMU_ERESTARTSYS; 6756 } 6757 6758 fork_start(); 6759 ret = fork(); 6760 if (ret == 0) { 6761 /* Child Process. */ 6762 cpu_clone_regs_child(env, newsp, flags); 6763 fork_end(1); 6764 /* There is a race condition here. The parent process could 6765 theoretically read the TID in the child process before the child 6766 tid is set. This would require using either ptrace 6767 (not implemented) or having *_tidptr to point at a shared memory 6768 mapping. We can't repeat the spinlock hack used above because 6769 the child process gets its own copy of the lock. */ 6770 if (flags & CLONE_CHILD_SETTID) 6771 put_user_u32(sys_gettid(), child_tidptr); 6772 if (flags & CLONE_PARENT_SETTID) 6773 put_user_u32(sys_gettid(), parent_tidptr); 6774 ts = (TaskState *)cpu->opaque; 6775 if (flags & CLONE_SETTLS) 6776 cpu_set_tls (env, newtls); 6777 if (flags & CLONE_CHILD_CLEARTID) 6778 ts->child_tidptr = child_tidptr; 6779 } else { 6780 cpu_clone_regs_parent(env, flags); 6781 fork_end(0); 6782 } 6783 } 6784 return ret; 6785 } 6786 6787 /* warning : doesn't handle linux specific flags... */ 6788 static int target_to_host_fcntl_cmd(int cmd) 6789 { 6790 int ret; 6791 6792 switch(cmd) { 6793 case TARGET_F_DUPFD: 6794 case TARGET_F_GETFD: 6795 case TARGET_F_SETFD: 6796 case TARGET_F_GETFL: 6797 case TARGET_F_SETFL: 6798 case TARGET_F_OFD_GETLK: 6799 case TARGET_F_OFD_SETLK: 6800 case TARGET_F_OFD_SETLKW: 6801 ret = cmd; 6802 break; 6803 case TARGET_F_GETLK: 6804 ret = F_GETLK64; 6805 break; 6806 case TARGET_F_SETLK: 6807 ret = F_SETLK64; 6808 break; 6809 case TARGET_F_SETLKW: 6810 ret = F_SETLKW64; 6811 break; 6812 case TARGET_F_GETOWN: 6813 ret = F_GETOWN; 6814 break; 6815 case TARGET_F_SETOWN: 6816 ret = F_SETOWN; 6817 break; 6818 case TARGET_F_GETSIG: 6819 ret = F_GETSIG; 6820 break; 6821 case TARGET_F_SETSIG: 6822 ret = F_SETSIG; 6823 break; 6824 #if TARGET_ABI_BITS == 32 6825 case TARGET_F_GETLK64: 6826 ret = F_GETLK64; 6827 break; 6828 case TARGET_F_SETLK64: 6829 ret = F_SETLK64; 6830 break; 6831 case TARGET_F_SETLKW64: 6832 ret = F_SETLKW64; 6833 break; 6834 #endif 6835 case TARGET_F_SETLEASE: 6836 ret = F_SETLEASE; 6837 break; 6838 case TARGET_F_GETLEASE: 6839 ret = F_GETLEASE; 6840 break; 6841 #ifdef F_DUPFD_CLOEXEC 6842 case TARGET_F_DUPFD_CLOEXEC: 6843 ret = F_DUPFD_CLOEXEC; 6844 break; 6845 #endif 6846 case TARGET_F_NOTIFY: 6847 ret = F_NOTIFY; 6848 break; 6849 #ifdef F_GETOWN_EX 6850 case TARGET_F_GETOWN_EX: 6851 ret = F_GETOWN_EX; 6852 break; 6853 #endif 6854 #ifdef F_SETOWN_EX 6855 case TARGET_F_SETOWN_EX: 6856 ret = F_SETOWN_EX; 6857 break; 6858 #endif 6859 #ifdef F_SETPIPE_SZ 6860 case TARGET_F_SETPIPE_SZ: 6861 ret = F_SETPIPE_SZ; 6862 break; 6863 case TARGET_F_GETPIPE_SZ: 6864 ret = F_GETPIPE_SZ; 6865 break; 6866 #endif 6867 #ifdef F_ADD_SEALS 6868 case TARGET_F_ADD_SEALS: 6869 ret = F_ADD_SEALS; 6870 break; 6871 case TARGET_F_GET_SEALS: 6872 ret = F_GET_SEALS; 6873 break; 6874 #endif 6875 default: 6876 ret = -TARGET_EINVAL; 6877 break; 6878 } 6879 6880 #if defined(__powerpc64__) 6881 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and 6882 * is not supported by kernel. The glibc fcntl call actually adjusts 6883 * them to 5, 6 and 7 before making the syscall(). Since we make the 6884 * syscall directly, adjust to what is supported by the kernel. 6885 */ 6886 if (ret >= F_GETLK64 && ret <= F_SETLKW64) { 6887 ret -= F_GETLK64 - 5; 6888 } 6889 #endif 6890 6891 return ret; 6892 } 6893 6894 #define FLOCK_TRANSTBL \ 6895 switch (type) { \ 6896 TRANSTBL_CONVERT(F_RDLCK); \ 6897 TRANSTBL_CONVERT(F_WRLCK); \ 6898 TRANSTBL_CONVERT(F_UNLCK); \ 6899 } 6900 6901 static int target_to_host_flock(int type) 6902 { 6903 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a 6904 FLOCK_TRANSTBL 6905 #undef TRANSTBL_CONVERT 6906 return -TARGET_EINVAL; 6907 } 6908 6909 static int host_to_target_flock(int type) 6910 { 6911 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a 6912 FLOCK_TRANSTBL 6913 #undef TRANSTBL_CONVERT 6914 /* if we don't know how to convert the value coming 6915 * from the host we copy to the target field as-is 6916 */ 6917 return type; 6918 } 6919 6920 static inline abi_long copy_from_user_flock(struct flock64 *fl, 6921 abi_ulong target_flock_addr) 6922 { 6923 struct target_flock *target_fl; 6924 int l_type; 6925 6926 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) { 6927 return -TARGET_EFAULT; 6928 } 6929 6930 __get_user(l_type, &target_fl->l_type); 6931 l_type = target_to_host_flock(l_type); 6932 if (l_type < 0) { 6933 return l_type; 6934 } 6935 fl->l_type = l_type; 6936 __get_user(fl->l_whence, &target_fl->l_whence); 6937 __get_user(fl->l_start, &target_fl->l_start); 6938 __get_user(fl->l_len, &target_fl->l_len); 6939 __get_user(fl->l_pid, &target_fl->l_pid); 6940 unlock_user_struct(target_fl, target_flock_addr, 0); 6941 return 0; 6942 } 6943 6944 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr, 6945 const struct flock64 *fl) 6946 { 6947 struct target_flock *target_fl; 6948 short l_type; 6949 6950 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) { 6951 return -TARGET_EFAULT; 6952 } 6953 6954 l_type = host_to_target_flock(fl->l_type); 6955 __put_user(l_type, &target_fl->l_type); 6956 __put_user(fl->l_whence, &target_fl->l_whence); 6957 __put_user(fl->l_start, &target_fl->l_start); 6958 __put_user(fl->l_len, &target_fl->l_len); 6959 __put_user(fl->l_pid, &target_fl->l_pid); 6960 unlock_user_struct(target_fl, target_flock_addr, 1); 6961 return 0; 6962 } 6963 6964 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr); 6965 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl); 6966 6967 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32 6968 struct target_oabi_flock64 { 6969 abi_short l_type; 6970 abi_short l_whence; 6971 abi_llong l_start; 6972 abi_llong l_len; 6973 abi_int l_pid; 6974 } QEMU_PACKED; 6975 6976 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl, 6977 abi_ulong target_flock_addr) 6978 { 6979 struct target_oabi_flock64 *target_fl; 6980 int l_type; 6981 6982 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) { 6983 return -TARGET_EFAULT; 6984 } 6985 6986 __get_user(l_type, &target_fl->l_type); 6987 l_type = target_to_host_flock(l_type); 6988 if (l_type < 0) { 6989 return l_type; 6990 } 6991 fl->l_type = l_type; 6992 __get_user(fl->l_whence, &target_fl->l_whence); 6993 __get_user(fl->l_start, &target_fl->l_start); 6994 __get_user(fl->l_len, &target_fl->l_len); 6995 __get_user(fl->l_pid, &target_fl->l_pid); 6996 unlock_user_struct(target_fl, target_flock_addr, 0); 6997 return 0; 6998 } 6999 7000 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr, 7001 const struct flock64 *fl) 7002 { 7003 struct target_oabi_flock64 *target_fl; 7004 short l_type; 7005 7006 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) { 7007 return -TARGET_EFAULT; 7008 } 7009 7010 l_type = host_to_target_flock(fl->l_type); 7011 __put_user(l_type, &target_fl->l_type); 7012 __put_user(fl->l_whence, &target_fl->l_whence); 7013 __put_user(fl->l_start, &target_fl->l_start); 7014 __put_user(fl->l_len, &target_fl->l_len); 7015 __put_user(fl->l_pid, &target_fl->l_pid); 7016 unlock_user_struct(target_fl, target_flock_addr, 1); 7017 return 0; 7018 } 7019 #endif 7020 7021 static inline abi_long copy_from_user_flock64(struct flock64 *fl, 7022 abi_ulong target_flock_addr) 7023 { 7024 struct target_flock64 *target_fl; 7025 int l_type; 7026 7027 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) { 7028 return -TARGET_EFAULT; 7029 } 7030 7031 __get_user(l_type, &target_fl->l_type); 7032 l_type = target_to_host_flock(l_type); 7033 if (l_type < 0) { 7034 return l_type; 7035 } 7036 fl->l_type = l_type; 7037 __get_user(fl->l_whence, &target_fl->l_whence); 7038 __get_user(fl->l_start, &target_fl->l_start); 7039 __get_user(fl->l_len, &target_fl->l_len); 7040 __get_user(fl->l_pid, &target_fl->l_pid); 7041 unlock_user_struct(target_fl, target_flock_addr, 0); 7042 return 0; 7043 } 7044 7045 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr, 7046 const struct flock64 *fl) 7047 { 7048 struct target_flock64 *target_fl; 7049 short l_type; 7050 7051 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) { 7052 return -TARGET_EFAULT; 7053 } 7054 7055 l_type = host_to_target_flock(fl->l_type); 7056 __put_user(l_type, &target_fl->l_type); 7057 __put_user(fl->l_whence, &target_fl->l_whence); 7058 __put_user(fl->l_start, &target_fl->l_start); 7059 __put_user(fl->l_len, &target_fl->l_len); 7060 __put_user(fl->l_pid, &target_fl->l_pid); 7061 unlock_user_struct(target_fl, target_flock_addr, 1); 7062 return 0; 7063 } 7064 7065 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg) 7066 { 7067 struct flock64 fl64; 7068 #ifdef F_GETOWN_EX 7069 struct f_owner_ex fox; 7070 struct target_f_owner_ex *target_fox; 7071 #endif 7072 abi_long ret; 7073 int host_cmd = target_to_host_fcntl_cmd(cmd); 7074 7075 if (host_cmd == -TARGET_EINVAL) 7076 return host_cmd; 7077 7078 switch(cmd) { 7079 case TARGET_F_GETLK: 7080 ret = copy_from_user_flock(&fl64, arg); 7081 if (ret) { 7082 return ret; 7083 } 7084 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 7085 if (ret == 0) { 7086 ret = copy_to_user_flock(arg, &fl64); 7087 } 7088 break; 7089 7090 case TARGET_F_SETLK: 7091 case TARGET_F_SETLKW: 7092 ret = copy_from_user_flock(&fl64, arg); 7093 if (ret) { 7094 return ret; 7095 } 7096 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 7097 break; 7098 7099 case TARGET_F_GETLK64: 7100 case TARGET_F_OFD_GETLK: 7101 ret = copy_from_user_flock64(&fl64, arg); 7102 if (ret) { 7103 return ret; 7104 } 7105 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 7106 if (ret == 0) { 7107 ret = copy_to_user_flock64(arg, &fl64); 7108 } 7109 break; 7110 case TARGET_F_SETLK64: 7111 case TARGET_F_SETLKW64: 7112 case TARGET_F_OFD_SETLK: 7113 case TARGET_F_OFD_SETLKW: 7114 ret = copy_from_user_flock64(&fl64, arg); 7115 if (ret) { 7116 return ret; 7117 } 7118 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 7119 break; 7120 7121 case TARGET_F_GETFL: 7122 ret = get_errno(safe_fcntl(fd, host_cmd, arg)); 7123 if (ret >= 0) { 7124 ret = host_to_target_bitmask(ret, fcntl_flags_tbl); 7125 } 7126 break; 7127 7128 case TARGET_F_SETFL: 7129 ret = get_errno(safe_fcntl(fd, host_cmd, 7130 target_to_host_bitmask(arg, 7131 fcntl_flags_tbl))); 7132 break; 7133 7134 #ifdef F_GETOWN_EX 7135 case TARGET_F_GETOWN_EX: 7136 ret = get_errno(safe_fcntl(fd, host_cmd, &fox)); 7137 if (ret >= 0) { 7138 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0)) 7139 return -TARGET_EFAULT; 7140 target_fox->type = tswap32(fox.type); 7141 target_fox->pid = tswap32(fox.pid); 7142 unlock_user_struct(target_fox, arg, 1); 7143 } 7144 break; 7145 #endif 7146 7147 #ifdef F_SETOWN_EX 7148 case TARGET_F_SETOWN_EX: 7149 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1)) 7150 return -TARGET_EFAULT; 7151 fox.type = tswap32(target_fox->type); 7152 fox.pid = tswap32(target_fox->pid); 7153 unlock_user_struct(target_fox, arg, 0); 7154 ret = get_errno(safe_fcntl(fd, host_cmd, &fox)); 7155 break; 7156 #endif 7157 7158 case TARGET_F_SETSIG: 7159 ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg))); 7160 break; 7161 7162 case TARGET_F_GETSIG: 7163 ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg))); 7164 break; 7165 7166 case TARGET_F_SETOWN: 7167 case TARGET_F_GETOWN: 7168 case TARGET_F_SETLEASE: 7169 case TARGET_F_GETLEASE: 7170 case TARGET_F_SETPIPE_SZ: 7171 case TARGET_F_GETPIPE_SZ: 7172 case TARGET_F_ADD_SEALS: 7173 case TARGET_F_GET_SEALS: 7174 ret = get_errno(safe_fcntl(fd, host_cmd, arg)); 7175 break; 7176 7177 default: 7178 ret = get_errno(safe_fcntl(fd, cmd, arg)); 7179 break; 7180 } 7181 return ret; 7182 } 7183 7184 #ifdef USE_UID16 7185 7186 static inline int high2lowuid(int uid) 7187 { 7188 if (uid > 65535) 7189 return 65534; 7190 else 7191 return uid; 7192 } 7193 7194 static inline int high2lowgid(int gid) 7195 { 7196 if (gid > 65535) 7197 return 65534; 7198 else 7199 return gid; 7200 } 7201 7202 static inline int low2highuid(int uid) 7203 { 7204 if ((int16_t)uid == -1) 7205 return -1; 7206 else 7207 return uid; 7208 } 7209 7210 static inline int low2highgid(int gid) 7211 { 7212 if ((int16_t)gid == -1) 7213 return -1; 7214 else 7215 return gid; 7216 } 7217 static inline int tswapid(int id) 7218 { 7219 return tswap16(id); 7220 } 7221 7222 #define put_user_id(x, gaddr) put_user_u16(x, gaddr) 7223 7224 #else /* !USE_UID16 */ 7225 static inline int high2lowuid(int uid) 7226 { 7227 return uid; 7228 } 7229 static inline int high2lowgid(int gid) 7230 { 7231 return gid; 7232 } 7233 static inline int low2highuid(int uid) 7234 { 7235 return uid; 7236 } 7237 static inline int low2highgid(int gid) 7238 { 7239 return gid; 7240 } 7241 static inline int tswapid(int id) 7242 { 7243 return tswap32(id); 7244 } 7245 7246 #define put_user_id(x, gaddr) put_user_u32(x, gaddr) 7247 7248 #endif /* USE_UID16 */ 7249 7250 /* We must do direct syscalls for setting UID/GID, because we want to 7251 * implement the Linux system call semantics of "change only for this thread", 7252 * not the libc/POSIX semantics of "change for all threads in process". 7253 * (See http://ewontfix.com/17/ for more details.) 7254 * We use the 32-bit version of the syscalls if present; if it is not 7255 * then either the host architecture supports 32-bit UIDs natively with 7256 * the standard syscall, or the 16-bit UID is the best we can do. 7257 */ 7258 #ifdef __NR_setuid32 7259 #define __NR_sys_setuid __NR_setuid32 7260 #else 7261 #define __NR_sys_setuid __NR_setuid 7262 #endif 7263 #ifdef __NR_setgid32 7264 #define __NR_sys_setgid __NR_setgid32 7265 #else 7266 #define __NR_sys_setgid __NR_setgid 7267 #endif 7268 #ifdef __NR_setresuid32 7269 #define __NR_sys_setresuid __NR_setresuid32 7270 #else 7271 #define __NR_sys_setresuid __NR_setresuid 7272 #endif 7273 #ifdef __NR_setresgid32 7274 #define __NR_sys_setresgid __NR_setresgid32 7275 #else 7276 #define __NR_sys_setresgid __NR_setresgid 7277 #endif 7278 7279 _syscall1(int, sys_setuid, uid_t, uid) 7280 _syscall1(int, sys_setgid, gid_t, gid) 7281 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid) 7282 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid) 7283 7284 void syscall_init(void) 7285 { 7286 IOCTLEntry *ie; 7287 const argtype *arg_type; 7288 int size; 7289 7290 thunk_init(STRUCT_MAX); 7291 7292 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def); 7293 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def); 7294 #include "syscall_types.h" 7295 #undef STRUCT 7296 #undef STRUCT_SPECIAL 7297 7298 /* we patch the ioctl size if necessary. We rely on the fact that 7299 no ioctl has all the bits at '1' in the size field */ 7300 ie = ioctl_entries; 7301 while (ie->target_cmd != 0) { 7302 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) == 7303 TARGET_IOC_SIZEMASK) { 7304 arg_type = ie->arg_type; 7305 if (arg_type[0] != TYPE_PTR) { 7306 fprintf(stderr, "cannot patch size for ioctl 0x%x\n", 7307 ie->target_cmd); 7308 exit(1); 7309 } 7310 arg_type++; 7311 size = thunk_type_size(arg_type, 0); 7312 ie->target_cmd = (ie->target_cmd & 7313 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) | 7314 (size << TARGET_IOC_SIZESHIFT); 7315 } 7316 7317 /* automatic consistency check if same arch */ 7318 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 7319 (defined(__x86_64__) && defined(TARGET_X86_64)) 7320 if (unlikely(ie->target_cmd != ie->host_cmd)) { 7321 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n", 7322 ie->name, ie->target_cmd, ie->host_cmd); 7323 } 7324 #endif 7325 ie++; 7326 } 7327 } 7328 7329 #ifdef TARGET_NR_truncate64 7330 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1, 7331 abi_long arg2, 7332 abi_long arg3, 7333 abi_long arg4) 7334 { 7335 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) { 7336 arg2 = arg3; 7337 arg3 = arg4; 7338 } 7339 return get_errno(truncate64(arg1, target_offset64(arg2, arg3))); 7340 } 7341 #endif 7342 7343 #ifdef TARGET_NR_ftruncate64 7344 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1, 7345 abi_long arg2, 7346 abi_long arg3, 7347 abi_long arg4) 7348 { 7349 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) { 7350 arg2 = arg3; 7351 arg3 = arg4; 7352 } 7353 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3))); 7354 } 7355 #endif 7356 7357 #if defined(TARGET_NR_timer_settime) || \ 7358 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)) 7359 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its, 7360 abi_ulong target_addr) 7361 { 7362 if (target_to_host_timespec(&host_its->it_interval, target_addr + 7363 offsetof(struct target_itimerspec, 7364 it_interval)) || 7365 target_to_host_timespec(&host_its->it_value, target_addr + 7366 offsetof(struct target_itimerspec, 7367 it_value))) { 7368 return -TARGET_EFAULT; 7369 } 7370 7371 return 0; 7372 } 7373 #endif 7374 7375 #if defined(TARGET_NR_timer_settime64) || \ 7376 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) 7377 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its, 7378 abi_ulong target_addr) 7379 { 7380 if (target_to_host_timespec64(&host_its->it_interval, target_addr + 7381 offsetof(struct target__kernel_itimerspec, 7382 it_interval)) || 7383 target_to_host_timespec64(&host_its->it_value, target_addr + 7384 offsetof(struct target__kernel_itimerspec, 7385 it_value))) { 7386 return -TARGET_EFAULT; 7387 } 7388 7389 return 0; 7390 } 7391 #endif 7392 7393 #if ((defined(TARGET_NR_timerfd_gettime) || \ 7394 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \ 7395 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime) 7396 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr, 7397 struct itimerspec *host_its) 7398 { 7399 if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec, 7400 it_interval), 7401 &host_its->it_interval) || 7402 host_to_target_timespec(target_addr + offsetof(struct target_itimerspec, 7403 it_value), 7404 &host_its->it_value)) { 7405 return -TARGET_EFAULT; 7406 } 7407 return 0; 7408 } 7409 #endif 7410 7411 #if ((defined(TARGET_NR_timerfd_gettime64) || \ 7412 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \ 7413 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64) 7414 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr, 7415 struct itimerspec *host_its) 7416 { 7417 if (host_to_target_timespec64(target_addr + 7418 offsetof(struct target__kernel_itimerspec, 7419 it_interval), 7420 &host_its->it_interval) || 7421 host_to_target_timespec64(target_addr + 7422 offsetof(struct target__kernel_itimerspec, 7423 it_value), 7424 &host_its->it_value)) { 7425 return -TARGET_EFAULT; 7426 } 7427 return 0; 7428 } 7429 #endif 7430 7431 #if defined(TARGET_NR_adjtimex) || \ 7432 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)) 7433 static inline abi_long target_to_host_timex(struct timex *host_tx, 7434 abi_long target_addr) 7435 { 7436 struct target_timex *target_tx; 7437 7438 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) { 7439 return -TARGET_EFAULT; 7440 } 7441 7442 __get_user(host_tx->modes, &target_tx->modes); 7443 __get_user(host_tx->offset, &target_tx->offset); 7444 __get_user(host_tx->freq, &target_tx->freq); 7445 __get_user(host_tx->maxerror, &target_tx->maxerror); 7446 __get_user(host_tx->esterror, &target_tx->esterror); 7447 __get_user(host_tx->status, &target_tx->status); 7448 __get_user(host_tx->constant, &target_tx->constant); 7449 __get_user(host_tx->precision, &target_tx->precision); 7450 __get_user(host_tx->tolerance, &target_tx->tolerance); 7451 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec); 7452 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec); 7453 __get_user(host_tx->tick, &target_tx->tick); 7454 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq); 7455 __get_user(host_tx->jitter, &target_tx->jitter); 7456 __get_user(host_tx->shift, &target_tx->shift); 7457 __get_user(host_tx->stabil, &target_tx->stabil); 7458 __get_user(host_tx->jitcnt, &target_tx->jitcnt); 7459 __get_user(host_tx->calcnt, &target_tx->calcnt); 7460 __get_user(host_tx->errcnt, &target_tx->errcnt); 7461 __get_user(host_tx->stbcnt, &target_tx->stbcnt); 7462 __get_user(host_tx->tai, &target_tx->tai); 7463 7464 unlock_user_struct(target_tx, target_addr, 0); 7465 return 0; 7466 } 7467 7468 static inline abi_long host_to_target_timex(abi_long target_addr, 7469 struct timex *host_tx) 7470 { 7471 struct target_timex *target_tx; 7472 7473 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) { 7474 return -TARGET_EFAULT; 7475 } 7476 7477 __put_user(host_tx->modes, &target_tx->modes); 7478 __put_user(host_tx->offset, &target_tx->offset); 7479 __put_user(host_tx->freq, &target_tx->freq); 7480 __put_user(host_tx->maxerror, &target_tx->maxerror); 7481 __put_user(host_tx->esterror, &target_tx->esterror); 7482 __put_user(host_tx->status, &target_tx->status); 7483 __put_user(host_tx->constant, &target_tx->constant); 7484 __put_user(host_tx->precision, &target_tx->precision); 7485 __put_user(host_tx->tolerance, &target_tx->tolerance); 7486 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec); 7487 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec); 7488 __put_user(host_tx->tick, &target_tx->tick); 7489 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq); 7490 __put_user(host_tx->jitter, &target_tx->jitter); 7491 __put_user(host_tx->shift, &target_tx->shift); 7492 __put_user(host_tx->stabil, &target_tx->stabil); 7493 __put_user(host_tx->jitcnt, &target_tx->jitcnt); 7494 __put_user(host_tx->calcnt, &target_tx->calcnt); 7495 __put_user(host_tx->errcnt, &target_tx->errcnt); 7496 __put_user(host_tx->stbcnt, &target_tx->stbcnt); 7497 __put_user(host_tx->tai, &target_tx->tai); 7498 7499 unlock_user_struct(target_tx, target_addr, 1); 7500 return 0; 7501 } 7502 #endif 7503 7504 7505 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME) 7506 static inline abi_long target_to_host_timex64(struct timex *host_tx, 7507 abi_long target_addr) 7508 { 7509 struct target__kernel_timex *target_tx; 7510 7511 if (copy_from_user_timeval64(&host_tx->time, target_addr + 7512 offsetof(struct target__kernel_timex, 7513 time))) { 7514 return -TARGET_EFAULT; 7515 } 7516 7517 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) { 7518 return -TARGET_EFAULT; 7519 } 7520 7521 __get_user(host_tx->modes, &target_tx->modes); 7522 __get_user(host_tx->offset, &target_tx->offset); 7523 __get_user(host_tx->freq, &target_tx->freq); 7524 __get_user(host_tx->maxerror, &target_tx->maxerror); 7525 __get_user(host_tx->esterror, &target_tx->esterror); 7526 __get_user(host_tx->status, &target_tx->status); 7527 __get_user(host_tx->constant, &target_tx->constant); 7528 __get_user(host_tx->precision, &target_tx->precision); 7529 __get_user(host_tx->tolerance, &target_tx->tolerance); 7530 __get_user(host_tx->tick, &target_tx->tick); 7531 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq); 7532 __get_user(host_tx->jitter, &target_tx->jitter); 7533 __get_user(host_tx->shift, &target_tx->shift); 7534 __get_user(host_tx->stabil, &target_tx->stabil); 7535 __get_user(host_tx->jitcnt, &target_tx->jitcnt); 7536 __get_user(host_tx->calcnt, &target_tx->calcnt); 7537 __get_user(host_tx->errcnt, &target_tx->errcnt); 7538 __get_user(host_tx->stbcnt, &target_tx->stbcnt); 7539 __get_user(host_tx->tai, &target_tx->tai); 7540 7541 unlock_user_struct(target_tx, target_addr, 0); 7542 return 0; 7543 } 7544 7545 static inline abi_long host_to_target_timex64(abi_long target_addr, 7546 struct timex *host_tx) 7547 { 7548 struct target__kernel_timex *target_tx; 7549 7550 if (copy_to_user_timeval64(target_addr + 7551 offsetof(struct target__kernel_timex, time), 7552 &host_tx->time)) { 7553 return -TARGET_EFAULT; 7554 } 7555 7556 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) { 7557 return -TARGET_EFAULT; 7558 } 7559 7560 __put_user(host_tx->modes, &target_tx->modes); 7561 __put_user(host_tx->offset, &target_tx->offset); 7562 __put_user(host_tx->freq, &target_tx->freq); 7563 __put_user(host_tx->maxerror, &target_tx->maxerror); 7564 __put_user(host_tx->esterror, &target_tx->esterror); 7565 __put_user(host_tx->status, &target_tx->status); 7566 __put_user(host_tx->constant, &target_tx->constant); 7567 __put_user(host_tx->precision, &target_tx->precision); 7568 __put_user(host_tx->tolerance, &target_tx->tolerance); 7569 __put_user(host_tx->tick, &target_tx->tick); 7570 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq); 7571 __put_user(host_tx->jitter, &target_tx->jitter); 7572 __put_user(host_tx->shift, &target_tx->shift); 7573 __put_user(host_tx->stabil, &target_tx->stabil); 7574 __put_user(host_tx->jitcnt, &target_tx->jitcnt); 7575 __put_user(host_tx->calcnt, &target_tx->calcnt); 7576 __put_user(host_tx->errcnt, &target_tx->errcnt); 7577 __put_user(host_tx->stbcnt, &target_tx->stbcnt); 7578 __put_user(host_tx->tai, &target_tx->tai); 7579 7580 unlock_user_struct(target_tx, target_addr, 1); 7581 return 0; 7582 } 7583 #endif 7584 7585 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID 7586 #define sigev_notify_thread_id _sigev_un._tid 7587 #endif 7588 7589 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp, 7590 abi_ulong target_addr) 7591 { 7592 struct target_sigevent *target_sevp; 7593 7594 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) { 7595 return -TARGET_EFAULT; 7596 } 7597 7598 /* This union is awkward on 64 bit systems because it has a 32 bit 7599 * integer and a pointer in it; we follow the conversion approach 7600 * used for handling sigval types in signal.c so the guest should get 7601 * the correct value back even if we did a 64 bit byteswap and it's 7602 * using the 32 bit integer. 7603 */ 7604 host_sevp->sigev_value.sival_ptr = 7605 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr); 7606 host_sevp->sigev_signo = 7607 target_to_host_signal(tswap32(target_sevp->sigev_signo)); 7608 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify); 7609 host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid); 7610 7611 unlock_user_struct(target_sevp, target_addr, 1); 7612 return 0; 7613 } 7614 7615 #if defined(TARGET_NR_mlockall) 7616 static inline int target_to_host_mlockall_arg(int arg) 7617 { 7618 int result = 0; 7619 7620 if (arg & TARGET_MCL_CURRENT) { 7621 result |= MCL_CURRENT; 7622 } 7623 if (arg & TARGET_MCL_FUTURE) { 7624 result |= MCL_FUTURE; 7625 } 7626 #ifdef MCL_ONFAULT 7627 if (arg & TARGET_MCL_ONFAULT) { 7628 result |= MCL_ONFAULT; 7629 } 7630 #endif 7631 7632 return result; 7633 } 7634 #endif 7635 7636 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \ 7637 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \ 7638 defined(TARGET_NR_newfstatat)) 7639 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env, 7640 abi_ulong target_addr, 7641 struct stat *host_st) 7642 { 7643 #if defined(TARGET_ARM) && defined(TARGET_ABI32) 7644 if (cpu_env->eabi) { 7645 struct target_eabi_stat64 *target_st; 7646 7647 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 7648 return -TARGET_EFAULT; 7649 memset(target_st, 0, sizeof(struct target_eabi_stat64)); 7650 __put_user(host_st->st_dev, &target_st->st_dev); 7651 __put_user(host_st->st_ino, &target_st->st_ino); 7652 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 7653 __put_user(host_st->st_ino, &target_st->__st_ino); 7654 #endif 7655 __put_user(host_st->st_mode, &target_st->st_mode); 7656 __put_user(host_st->st_nlink, &target_st->st_nlink); 7657 __put_user(host_st->st_uid, &target_st->st_uid); 7658 __put_user(host_st->st_gid, &target_st->st_gid); 7659 __put_user(host_st->st_rdev, &target_st->st_rdev); 7660 __put_user(host_st->st_size, &target_st->st_size); 7661 __put_user(host_st->st_blksize, &target_st->st_blksize); 7662 __put_user(host_st->st_blocks, &target_st->st_blocks); 7663 __put_user(host_st->st_atime, &target_st->target_st_atime); 7664 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 7665 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 7666 #ifdef HAVE_STRUCT_STAT_ST_ATIM 7667 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec); 7668 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec); 7669 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec); 7670 #endif 7671 unlock_user_struct(target_st, target_addr, 1); 7672 } else 7673 #endif 7674 { 7675 #if defined(TARGET_HAS_STRUCT_STAT64) 7676 struct target_stat64 *target_st; 7677 #else 7678 struct target_stat *target_st; 7679 #endif 7680 7681 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 7682 return -TARGET_EFAULT; 7683 memset(target_st, 0, sizeof(*target_st)); 7684 __put_user(host_st->st_dev, &target_st->st_dev); 7685 __put_user(host_st->st_ino, &target_st->st_ino); 7686 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 7687 __put_user(host_st->st_ino, &target_st->__st_ino); 7688 #endif 7689 __put_user(host_st->st_mode, &target_st->st_mode); 7690 __put_user(host_st->st_nlink, &target_st->st_nlink); 7691 __put_user(host_st->st_uid, &target_st->st_uid); 7692 __put_user(host_st->st_gid, &target_st->st_gid); 7693 __put_user(host_st->st_rdev, &target_st->st_rdev); 7694 /* XXX: better use of kernel struct */ 7695 __put_user(host_st->st_size, &target_st->st_size); 7696 __put_user(host_st->st_blksize, &target_st->st_blksize); 7697 __put_user(host_st->st_blocks, &target_st->st_blocks); 7698 __put_user(host_st->st_atime, &target_st->target_st_atime); 7699 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 7700 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 7701 #ifdef HAVE_STRUCT_STAT_ST_ATIM 7702 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec); 7703 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec); 7704 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec); 7705 #endif 7706 unlock_user_struct(target_st, target_addr, 1); 7707 } 7708 7709 return 0; 7710 } 7711 #endif 7712 7713 #if defined(TARGET_NR_statx) && defined(__NR_statx) 7714 static inline abi_long host_to_target_statx(struct target_statx *host_stx, 7715 abi_ulong target_addr) 7716 { 7717 struct target_statx *target_stx; 7718 7719 if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr, 0)) { 7720 return -TARGET_EFAULT; 7721 } 7722 memset(target_stx, 0, sizeof(*target_stx)); 7723 7724 __put_user(host_stx->stx_mask, &target_stx->stx_mask); 7725 __put_user(host_stx->stx_blksize, &target_stx->stx_blksize); 7726 __put_user(host_stx->stx_attributes, &target_stx->stx_attributes); 7727 __put_user(host_stx->stx_nlink, &target_stx->stx_nlink); 7728 __put_user(host_stx->stx_uid, &target_stx->stx_uid); 7729 __put_user(host_stx->stx_gid, &target_stx->stx_gid); 7730 __put_user(host_stx->stx_mode, &target_stx->stx_mode); 7731 __put_user(host_stx->stx_ino, &target_stx->stx_ino); 7732 __put_user(host_stx->stx_size, &target_stx->stx_size); 7733 __put_user(host_stx->stx_blocks, &target_stx->stx_blocks); 7734 __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask); 7735 __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec); 7736 __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec); 7737 __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec); 7738 __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec); 7739 __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec); 7740 __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec); 7741 __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec); 7742 __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec); 7743 __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major); 7744 __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor); 7745 __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major); 7746 __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor); 7747 7748 unlock_user_struct(target_stx, target_addr, 1); 7749 7750 return 0; 7751 } 7752 #endif 7753 7754 static int do_sys_futex(int *uaddr, int op, int val, 7755 const struct timespec *timeout, int *uaddr2, 7756 int val3) 7757 { 7758 #if HOST_LONG_BITS == 64 7759 #if defined(__NR_futex) 7760 /* always a 64-bit time_t, it doesn't define _time64 version */ 7761 return sys_futex(uaddr, op, val, timeout, uaddr2, val3); 7762 7763 #endif 7764 #else /* HOST_LONG_BITS == 64 */ 7765 #if defined(__NR_futex_time64) 7766 if (sizeof(timeout->tv_sec) == 8) { 7767 /* _time64 function on 32bit arch */ 7768 return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3); 7769 } 7770 #endif 7771 #if defined(__NR_futex) 7772 /* old function on 32bit arch */ 7773 return sys_futex(uaddr, op, val, timeout, uaddr2, val3); 7774 #endif 7775 #endif /* HOST_LONG_BITS == 64 */ 7776 g_assert_not_reached(); 7777 } 7778 7779 static int do_safe_futex(int *uaddr, int op, int val, 7780 const struct timespec *timeout, int *uaddr2, 7781 int val3) 7782 { 7783 #if HOST_LONG_BITS == 64 7784 #if defined(__NR_futex) 7785 /* always a 64-bit time_t, it doesn't define _time64 version */ 7786 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3)); 7787 #endif 7788 #else /* HOST_LONG_BITS == 64 */ 7789 #if defined(__NR_futex_time64) 7790 if (sizeof(timeout->tv_sec) == 8) { 7791 /* _time64 function on 32bit arch */ 7792 return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2, 7793 val3)); 7794 } 7795 #endif 7796 #if defined(__NR_futex) 7797 /* old function on 32bit arch */ 7798 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3)); 7799 #endif 7800 #endif /* HOST_LONG_BITS == 64 */ 7801 return -TARGET_ENOSYS; 7802 } 7803 7804 /* ??? Using host futex calls even when target atomic operations 7805 are not really atomic probably breaks things. However implementing 7806 futexes locally would make futexes shared between multiple processes 7807 tricky. However they're probably useless because guest atomic 7808 operations won't work either. */ 7809 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64) 7810 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr, 7811 int op, int val, target_ulong timeout, 7812 target_ulong uaddr2, int val3) 7813 { 7814 struct timespec ts, *pts = NULL; 7815 void *haddr2 = NULL; 7816 int base_op; 7817 7818 /* We assume FUTEX_* constants are the same on both host and target. */ 7819 #ifdef FUTEX_CMD_MASK 7820 base_op = op & FUTEX_CMD_MASK; 7821 #else 7822 base_op = op; 7823 #endif 7824 switch (base_op) { 7825 case FUTEX_WAIT: 7826 case FUTEX_WAIT_BITSET: 7827 val = tswap32(val); 7828 break; 7829 case FUTEX_WAIT_REQUEUE_PI: 7830 val = tswap32(val); 7831 haddr2 = g2h(cpu, uaddr2); 7832 break; 7833 case FUTEX_LOCK_PI: 7834 case FUTEX_LOCK_PI2: 7835 break; 7836 case FUTEX_WAKE: 7837 case FUTEX_WAKE_BITSET: 7838 case FUTEX_TRYLOCK_PI: 7839 case FUTEX_UNLOCK_PI: 7840 timeout = 0; 7841 break; 7842 case FUTEX_FD: 7843 val = target_to_host_signal(val); 7844 timeout = 0; 7845 break; 7846 case FUTEX_CMP_REQUEUE: 7847 case FUTEX_CMP_REQUEUE_PI: 7848 val3 = tswap32(val3); 7849 /* fall through */ 7850 case FUTEX_REQUEUE: 7851 case FUTEX_WAKE_OP: 7852 /* 7853 * For these, the 4th argument is not TIMEOUT, but VAL2. 7854 * But the prototype of do_safe_futex takes a pointer, so 7855 * insert casts to satisfy the compiler. We do not need 7856 * to tswap VAL2 since it's not compared to guest memory. 7857 */ 7858 pts = (struct timespec *)(uintptr_t)timeout; 7859 timeout = 0; 7860 haddr2 = g2h(cpu, uaddr2); 7861 break; 7862 default: 7863 return -TARGET_ENOSYS; 7864 } 7865 if (timeout) { 7866 pts = &ts; 7867 if (time64 7868 ? target_to_host_timespec64(pts, timeout) 7869 : target_to_host_timespec(pts, timeout)) { 7870 return -TARGET_EFAULT; 7871 } 7872 } 7873 return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3); 7874 } 7875 #endif 7876 7877 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 7878 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname, 7879 abi_long handle, abi_long mount_id, 7880 abi_long flags) 7881 { 7882 struct file_handle *target_fh; 7883 struct file_handle *fh; 7884 int mid = 0; 7885 abi_long ret; 7886 char *name; 7887 unsigned int size, total_size; 7888 7889 if (get_user_s32(size, handle)) { 7890 return -TARGET_EFAULT; 7891 } 7892 7893 name = lock_user_string(pathname); 7894 if (!name) { 7895 return -TARGET_EFAULT; 7896 } 7897 7898 total_size = sizeof(struct file_handle) + size; 7899 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0); 7900 if (!target_fh) { 7901 unlock_user(name, pathname, 0); 7902 return -TARGET_EFAULT; 7903 } 7904 7905 fh = g_malloc0(total_size); 7906 fh->handle_bytes = size; 7907 7908 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags)); 7909 unlock_user(name, pathname, 0); 7910 7911 /* man name_to_handle_at(2): 7912 * Other than the use of the handle_bytes field, the caller should treat 7913 * the file_handle structure as an opaque data type 7914 */ 7915 7916 memcpy(target_fh, fh, total_size); 7917 target_fh->handle_bytes = tswap32(fh->handle_bytes); 7918 target_fh->handle_type = tswap32(fh->handle_type); 7919 g_free(fh); 7920 unlock_user(target_fh, handle, total_size); 7921 7922 if (put_user_s32(mid, mount_id)) { 7923 return -TARGET_EFAULT; 7924 } 7925 7926 return ret; 7927 7928 } 7929 #endif 7930 7931 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 7932 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle, 7933 abi_long flags) 7934 { 7935 struct file_handle *target_fh; 7936 struct file_handle *fh; 7937 unsigned int size, total_size; 7938 abi_long ret; 7939 7940 if (get_user_s32(size, handle)) { 7941 return -TARGET_EFAULT; 7942 } 7943 7944 total_size = sizeof(struct file_handle) + size; 7945 target_fh = lock_user(VERIFY_READ, handle, total_size, 1); 7946 if (!target_fh) { 7947 return -TARGET_EFAULT; 7948 } 7949 7950 fh = g_memdup(target_fh, total_size); 7951 fh->handle_bytes = size; 7952 fh->handle_type = tswap32(target_fh->handle_type); 7953 7954 ret = get_errno(open_by_handle_at(mount_fd, fh, 7955 target_to_host_bitmask(flags, fcntl_flags_tbl))); 7956 7957 g_free(fh); 7958 7959 unlock_user(target_fh, handle, total_size); 7960 7961 return ret; 7962 } 7963 #endif 7964 7965 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4) 7966 7967 static abi_long do_signalfd4(int fd, abi_long mask, int flags) 7968 { 7969 int host_flags; 7970 target_sigset_t *target_mask; 7971 sigset_t host_mask; 7972 abi_long ret; 7973 7974 if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) { 7975 return -TARGET_EINVAL; 7976 } 7977 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) { 7978 return -TARGET_EFAULT; 7979 } 7980 7981 target_to_host_sigset(&host_mask, target_mask); 7982 7983 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl); 7984 7985 ret = get_errno(signalfd(fd, &host_mask, host_flags)); 7986 if (ret >= 0) { 7987 fd_trans_register(ret, &target_signalfd_trans); 7988 } 7989 7990 unlock_user_struct(target_mask, mask, 0); 7991 7992 return ret; 7993 } 7994 #endif 7995 7996 /* Map host to target signal numbers for the wait family of syscalls. 7997 Assume all other status bits are the same. */ 7998 int host_to_target_waitstatus(int status) 7999 { 8000 if (WIFSIGNALED(status)) { 8001 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f); 8002 } 8003 if (WIFSTOPPED(status)) { 8004 return (host_to_target_signal(WSTOPSIG(status)) << 8) 8005 | (status & 0xff); 8006 } 8007 return status; 8008 } 8009 8010 static int open_self_cmdline(CPUArchState *cpu_env, int fd) 8011 { 8012 CPUState *cpu = env_cpu(cpu_env); 8013 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm; 8014 int i; 8015 8016 for (i = 0; i < bprm->argc; i++) { 8017 size_t len = strlen(bprm->argv[i]) + 1; 8018 8019 if (write(fd, bprm->argv[i], len) != len) { 8020 return -1; 8021 } 8022 } 8023 8024 return 0; 8025 } 8026 8027 static int open_self_maps(CPUArchState *cpu_env, int fd) 8028 { 8029 CPUState *cpu = env_cpu(cpu_env); 8030 TaskState *ts = cpu->opaque; 8031 GSList *map_info = read_self_maps(); 8032 GSList *s; 8033 int count; 8034 8035 for (s = map_info; s; s = g_slist_next(s)) { 8036 MapInfo *e = (MapInfo *) s->data; 8037 8038 if (h2g_valid(e->start)) { 8039 unsigned long min = e->start; 8040 unsigned long max = e->end; 8041 int flags = page_get_flags(h2g(min)); 8042 const char *path; 8043 8044 max = h2g_valid(max - 1) ? 8045 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1; 8046 8047 if (page_check_range(h2g(min), max - min, flags) == -1) { 8048 continue; 8049 } 8050 8051 #ifdef TARGET_HPPA 8052 if (h2g(max) == ts->info->stack_limit) { 8053 #else 8054 if (h2g(min) == ts->info->stack_limit) { 8055 #endif 8056 path = "[stack]"; 8057 } else { 8058 path = e->path; 8059 } 8060 8061 count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr 8062 " %c%c%c%c %08" PRIx64 " %s %"PRId64, 8063 h2g(min), h2g(max - 1) + 1, 8064 (flags & PAGE_READ) ? 'r' : '-', 8065 (flags & PAGE_WRITE_ORG) ? 'w' : '-', 8066 (flags & PAGE_EXEC) ? 'x' : '-', 8067 e->is_priv ? 'p' : 's', 8068 (uint64_t) e->offset, e->dev, e->inode); 8069 if (path) { 8070 dprintf(fd, "%*s%s\n", 73 - count, "", path); 8071 } else { 8072 dprintf(fd, "\n"); 8073 } 8074 } 8075 } 8076 8077 free_self_maps(map_info); 8078 8079 #ifdef TARGET_VSYSCALL_PAGE 8080 /* 8081 * We only support execution from the vsyscall page. 8082 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3. 8083 */ 8084 count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx 8085 " --xp 00000000 00:00 0", 8086 TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE); 8087 dprintf(fd, "%*s%s\n", 73 - count, "", "[vsyscall]"); 8088 #endif 8089 8090 return 0; 8091 } 8092 8093 static int open_self_stat(CPUArchState *cpu_env, int fd) 8094 { 8095 CPUState *cpu = env_cpu(cpu_env); 8096 TaskState *ts = cpu->opaque; 8097 g_autoptr(GString) buf = g_string_new(NULL); 8098 int i; 8099 8100 for (i = 0; i < 44; i++) { 8101 if (i == 0) { 8102 /* pid */ 8103 g_string_printf(buf, FMT_pid " ", getpid()); 8104 } else if (i == 1) { 8105 /* app name */ 8106 gchar *bin = g_strrstr(ts->bprm->argv[0], "/"); 8107 bin = bin ? bin + 1 : ts->bprm->argv[0]; 8108 g_string_printf(buf, "(%.15s) ", bin); 8109 } else if (i == 3) { 8110 /* ppid */ 8111 g_string_printf(buf, FMT_pid " ", getppid()); 8112 } else if (i == 21) { 8113 /* starttime */ 8114 g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime); 8115 } else if (i == 27) { 8116 /* stack bottom */ 8117 g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack); 8118 } else { 8119 /* for the rest, there is MasterCard */ 8120 g_string_printf(buf, "0%c", i == 43 ? '\n' : ' '); 8121 } 8122 8123 if (write(fd, buf->str, buf->len) != buf->len) { 8124 return -1; 8125 } 8126 } 8127 8128 return 0; 8129 } 8130 8131 static int open_self_auxv(CPUArchState *cpu_env, int fd) 8132 { 8133 CPUState *cpu = env_cpu(cpu_env); 8134 TaskState *ts = cpu->opaque; 8135 abi_ulong auxv = ts->info->saved_auxv; 8136 abi_ulong len = ts->info->auxv_len; 8137 char *ptr; 8138 8139 /* 8140 * Auxiliary vector is stored in target process stack. 8141 * read in whole auxv vector and copy it to file 8142 */ 8143 ptr = lock_user(VERIFY_READ, auxv, len, 0); 8144 if (ptr != NULL) { 8145 while (len > 0) { 8146 ssize_t r; 8147 r = write(fd, ptr, len); 8148 if (r <= 0) { 8149 break; 8150 } 8151 len -= r; 8152 ptr += r; 8153 } 8154 lseek(fd, 0, SEEK_SET); 8155 unlock_user(ptr, auxv, len); 8156 } 8157 8158 return 0; 8159 } 8160 8161 static int is_proc_myself(const char *filename, const char *entry) 8162 { 8163 if (!strncmp(filename, "/proc/", strlen("/proc/"))) { 8164 filename += strlen("/proc/"); 8165 if (!strncmp(filename, "self/", strlen("self/"))) { 8166 filename += strlen("self/"); 8167 } else if (*filename >= '1' && *filename <= '9') { 8168 char myself[80]; 8169 snprintf(myself, sizeof(myself), "%d/", getpid()); 8170 if (!strncmp(filename, myself, strlen(myself))) { 8171 filename += strlen(myself); 8172 } else { 8173 return 0; 8174 } 8175 } else { 8176 return 0; 8177 } 8178 if (!strcmp(filename, entry)) { 8179 return 1; 8180 } 8181 } 8182 return 0; 8183 } 8184 8185 static void excp_dump_file(FILE *logfile, CPUArchState *env, 8186 const char *fmt, int code) 8187 { 8188 if (logfile) { 8189 CPUState *cs = env_cpu(env); 8190 8191 fprintf(logfile, fmt, code); 8192 fprintf(logfile, "Failing executable: %s\n", exec_path); 8193 cpu_dump_state(cs, logfile, 0); 8194 open_self_maps(env, fileno(logfile)); 8195 } 8196 } 8197 8198 void target_exception_dump(CPUArchState *env, const char *fmt, int code) 8199 { 8200 /* dump to console */ 8201 excp_dump_file(stderr, env, fmt, code); 8202 8203 /* dump to log file */ 8204 if (qemu_log_separate()) { 8205 FILE *logfile = qemu_log_trylock(); 8206 8207 excp_dump_file(logfile, env, fmt, code); 8208 qemu_log_unlock(logfile); 8209 } 8210 } 8211 8212 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \ 8213 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA) 8214 static int is_proc(const char *filename, const char *entry) 8215 { 8216 return strcmp(filename, entry) == 0; 8217 } 8218 #endif 8219 8220 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN 8221 static int open_net_route(CPUArchState *cpu_env, int fd) 8222 { 8223 FILE *fp; 8224 char *line = NULL; 8225 size_t len = 0; 8226 ssize_t read; 8227 8228 fp = fopen("/proc/net/route", "r"); 8229 if (fp == NULL) { 8230 return -1; 8231 } 8232 8233 /* read header */ 8234 8235 read = getline(&line, &len, fp); 8236 dprintf(fd, "%s", line); 8237 8238 /* read routes */ 8239 8240 while ((read = getline(&line, &len, fp)) != -1) { 8241 char iface[16]; 8242 uint32_t dest, gw, mask; 8243 unsigned int flags, refcnt, use, metric, mtu, window, irtt; 8244 int fields; 8245 8246 fields = sscanf(line, 8247 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 8248 iface, &dest, &gw, &flags, &refcnt, &use, &metric, 8249 &mask, &mtu, &window, &irtt); 8250 if (fields != 11) { 8251 continue; 8252 } 8253 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 8254 iface, tswap32(dest), tswap32(gw), flags, refcnt, use, 8255 metric, tswap32(mask), mtu, window, irtt); 8256 } 8257 8258 free(line); 8259 fclose(fp); 8260 8261 return 0; 8262 } 8263 #endif 8264 8265 #if defined(TARGET_SPARC) 8266 static int open_cpuinfo(CPUArchState *cpu_env, int fd) 8267 { 8268 dprintf(fd, "type\t\t: sun4u\n"); 8269 return 0; 8270 } 8271 #endif 8272 8273 #if defined(TARGET_HPPA) 8274 static int open_cpuinfo(CPUArchState *cpu_env, int fd) 8275 { 8276 dprintf(fd, "cpu family\t: PA-RISC 1.1e\n"); 8277 dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n"); 8278 dprintf(fd, "capabilities\t: os32\n"); 8279 dprintf(fd, "model\t\t: 9000/778/B160L\n"); 8280 dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n"); 8281 return 0; 8282 } 8283 #endif 8284 8285 #if defined(TARGET_M68K) 8286 static int open_hardware(CPUArchState *cpu_env, int fd) 8287 { 8288 dprintf(fd, "Model:\t\tqemu-m68k\n"); 8289 return 0; 8290 } 8291 #endif 8292 8293 static int do_openat(CPUArchState *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode) 8294 { 8295 struct fake_open { 8296 const char *filename; 8297 int (*fill)(CPUArchState *cpu_env, int fd); 8298 int (*cmp)(const char *s1, const char *s2); 8299 }; 8300 const struct fake_open *fake_open; 8301 static const struct fake_open fakes[] = { 8302 { "maps", open_self_maps, is_proc_myself }, 8303 { "stat", open_self_stat, is_proc_myself }, 8304 { "auxv", open_self_auxv, is_proc_myself }, 8305 { "cmdline", open_self_cmdline, is_proc_myself }, 8306 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN 8307 { "/proc/net/route", open_net_route, is_proc }, 8308 #endif 8309 #if defined(TARGET_SPARC) || defined(TARGET_HPPA) 8310 { "/proc/cpuinfo", open_cpuinfo, is_proc }, 8311 #endif 8312 #if defined(TARGET_M68K) 8313 { "/proc/hardware", open_hardware, is_proc }, 8314 #endif 8315 { NULL, NULL, NULL } 8316 }; 8317 8318 if (is_proc_myself(pathname, "exe")) { 8319 return safe_openat(dirfd, exec_path, flags, mode); 8320 } 8321 8322 for (fake_open = fakes; fake_open->filename; fake_open++) { 8323 if (fake_open->cmp(pathname, fake_open->filename)) { 8324 break; 8325 } 8326 } 8327 8328 if (fake_open->filename) { 8329 const char *tmpdir; 8330 char filename[PATH_MAX]; 8331 int fd, r; 8332 8333 fd = memfd_create("qemu-open", 0); 8334 if (fd < 0) { 8335 if (errno != ENOSYS) { 8336 return fd; 8337 } 8338 /* create temporary file to map stat to */ 8339 tmpdir = getenv("TMPDIR"); 8340 if (!tmpdir) 8341 tmpdir = "/tmp"; 8342 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir); 8343 fd = mkstemp(filename); 8344 if (fd < 0) { 8345 return fd; 8346 } 8347 unlink(filename); 8348 } 8349 8350 if ((r = fake_open->fill(cpu_env, fd))) { 8351 int e = errno; 8352 close(fd); 8353 errno = e; 8354 return r; 8355 } 8356 lseek(fd, 0, SEEK_SET); 8357 8358 return fd; 8359 } 8360 8361 return safe_openat(dirfd, path(pathname), flags, mode); 8362 } 8363 8364 #define TIMER_MAGIC 0x0caf0000 8365 #define TIMER_MAGIC_MASK 0xffff0000 8366 8367 /* Convert QEMU provided timer ID back to internal 16bit index format */ 8368 static target_timer_t get_timer_id(abi_long arg) 8369 { 8370 target_timer_t timerid = arg; 8371 8372 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) { 8373 return -TARGET_EINVAL; 8374 } 8375 8376 timerid &= 0xffff; 8377 8378 if (timerid >= ARRAY_SIZE(g_posix_timers)) { 8379 return -TARGET_EINVAL; 8380 } 8381 8382 return timerid; 8383 } 8384 8385 /* qemu_execve() Must return target values and target errnos. 8386 * 8387 * Although execve() is not an interruptible syscall it is 8388 * a special case where we must use the safe_syscall wrapper: 8389 * if we allow a signal to happen before we make the host 8390 * syscall then we will 'lose' it, because at the point of 8391 * execve the process leaves QEMU's control. So we use the 8392 * safe syscall wrapper to ensure that we either take the 8393 * signal as a guest signal, or else it does not happen 8394 * before the execve completes and makes it the other 8395 * program's problem. 8396 */ 8397 static abi_long qemu_execve(char *filename, char *argv[], 8398 char *envp[]) 8399 { 8400 char *i_arg = NULL, *i_name = NULL; 8401 char **new_argp; 8402 int argc, fd, ret, i, offset = 5; 8403 char *cp; 8404 char buf[BINPRM_BUF_SIZE]; 8405 8406 /* normal execve case */ 8407 if (qemu_execve_path == NULL || *qemu_execve_path == 0) { 8408 return get_errno(safe_execve(filename, argv, envp)); 8409 } 8410 8411 for (argc = 0; argv[argc] != NULL; argc++) { 8412 /* nothing */ ; 8413 } 8414 8415 fd = open(filename, O_RDONLY); 8416 if (fd == -1) { 8417 return get_errno(fd); 8418 } 8419 8420 ret = read(fd, buf, BINPRM_BUF_SIZE); 8421 if (ret == -1) { 8422 close(fd); 8423 return get_errno(ret); 8424 } 8425 8426 /* if we have less than 2 bytes, we can guess it is not executable */ 8427 if (ret < 2) { 8428 close(fd); 8429 return -host_to_target_errno(ENOEXEC); 8430 } 8431 8432 close(fd); 8433 8434 /* adapted from the kernel 8435 * https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/fs/binfmt_script.c 8436 */ 8437 if ((buf[0] == '#') && (buf[1] == '!')) { 8438 /* 8439 * This section does the #! interpretation. 8440 * Sorta complicated, but hopefully it will work. -TYT 8441 */ 8442 8443 buf[BINPRM_BUF_SIZE - 1] = '\0'; 8444 cp = strchr(buf, '\n'); 8445 if (cp == NULL) { 8446 cp = buf + BINPRM_BUF_SIZE - 1; 8447 } 8448 *cp = '\0'; 8449 while (cp > buf) { 8450 cp--; 8451 if ((*cp == ' ') || (*cp == '\t')) { 8452 *cp = '\0'; 8453 } else { 8454 break; 8455 } 8456 } 8457 for (cp = buf + 2; (*cp == ' ') || (*cp == '\t'); cp++) { 8458 /* nothing */ ; 8459 } 8460 if (*cp == '\0') { 8461 return -ENOEXEC; /* No interpreter name found */ 8462 } 8463 i_name = cp; 8464 i_arg = NULL; 8465 for ( ; *cp && (*cp != ' ') && (*cp != '\t'); cp++) { 8466 /* nothing */ ; 8467 } 8468 while ((*cp == ' ') || (*cp == '\t')) { 8469 *cp++ = '\0'; 8470 } 8471 if (*cp) { 8472 i_arg = cp; 8473 } 8474 8475 if (i_arg) { 8476 offset += 2; 8477 } else { 8478 offset += 1; 8479 } 8480 } 8481 8482 new_argp = alloca((argc + offset + 1) * sizeof(void *)); 8483 8484 /* Copy the original arguments with offset */ 8485 for (i = 0; i < argc; i++) { 8486 new_argp[i + offset] = argv[i]; 8487 } 8488 8489 new_argp[0] = strdup(qemu_execve_path); 8490 new_argp[1] = strdup("--execve"); 8491 new_argp[2] = strdup(qemu_execve_path); 8492 new_argp[3] = strdup("-0"); 8493 new_argp[offset] = filename; 8494 new_argp[argc + offset] = NULL; 8495 8496 if (i_name) { 8497 new_argp[4] = i_name; 8498 new_argp[5] = i_name; 8499 8500 if (i_arg) { 8501 new_argp[6] = i_arg; 8502 } 8503 } else { 8504 new_argp[4] = argv[0]; 8505 } 8506 8507 return get_errno(safe_execve(qemu_execve_path, new_argp, envp)); 8508 } 8509 8510 static int target_to_host_cpu_mask(unsigned long *host_mask, 8511 size_t host_size, 8512 abi_ulong target_addr, 8513 size_t target_size) 8514 { 8515 unsigned target_bits = sizeof(abi_ulong) * 8; 8516 unsigned host_bits = sizeof(*host_mask) * 8; 8517 abi_ulong *target_mask; 8518 unsigned i, j; 8519 8520 assert(host_size >= target_size); 8521 8522 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1); 8523 if (!target_mask) { 8524 return -TARGET_EFAULT; 8525 } 8526 memset(host_mask, 0, host_size); 8527 8528 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) { 8529 unsigned bit = i * target_bits; 8530 abi_ulong val; 8531 8532 __get_user(val, &target_mask[i]); 8533 for (j = 0; j < target_bits; j++, bit++) { 8534 if (val & (1UL << j)) { 8535 host_mask[bit / host_bits] |= 1UL << (bit % host_bits); 8536 } 8537 } 8538 } 8539 8540 unlock_user(target_mask, target_addr, 0); 8541 return 0; 8542 } 8543 8544 static int host_to_target_cpu_mask(const unsigned long *host_mask, 8545 size_t host_size, 8546 abi_ulong target_addr, 8547 size_t target_size) 8548 { 8549 unsigned target_bits = sizeof(abi_ulong) * 8; 8550 unsigned host_bits = sizeof(*host_mask) * 8; 8551 abi_ulong *target_mask; 8552 unsigned i, j; 8553 8554 assert(host_size >= target_size); 8555 8556 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0); 8557 if (!target_mask) { 8558 return -TARGET_EFAULT; 8559 } 8560 8561 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) { 8562 unsigned bit = i * target_bits; 8563 abi_ulong val = 0; 8564 8565 for (j = 0; j < target_bits; j++, bit++) { 8566 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) { 8567 val |= 1UL << j; 8568 } 8569 } 8570 __put_user(val, &target_mask[i]); 8571 } 8572 8573 unlock_user(target_mask, target_addr, target_size); 8574 return 0; 8575 } 8576 8577 #ifdef TARGET_NR_getdents 8578 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count) 8579 { 8580 g_autofree void *hdirp = NULL; 8581 void *tdirp; 8582 int hlen, hoff, toff; 8583 int hreclen, treclen; 8584 off64_t prev_diroff = 0; 8585 8586 hdirp = g_try_malloc(count); 8587 if (!hdirp) { 8588 return -TARGET_ENOMEM; 8589 } 8590 8591 #ifdef EMULATE_GETDENTS_WITH_GETDENTS 8592 hlen = sys_getdents(dirfd, hdirp, count); 8593 #else 8594 hlen = sys_getdents64(dirfd, hdirp, count); 8595 #endif 8596 8597 hlen = get_errno(hlen); 8598 if (is_error(hlen)) { 8599 return hlen; 8600 } 8601 8602 tdirp = lock_user(VERIFY_WRITE, arg2, count, 0); 8603 if (!tdirp) { 8604 return -TARGET_EFAULT; 8605 } 8606 8607 for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) { 8608 #ifdef EMULATE_GETDENTS_WITH_GETDENTS 8609 struct linux_dirent *hde = hdirp + hoff; 8610 #else 8611 struct linux_dirent64 *hde = hdirp + hoff; 8612 #endif 8613 struct target_dirent *tde = tdirp + toff; 8614 int namelen; 8615 uint8_t type; 8616 8617 namelen = strlen(hde->d_name); 8618 hreclen = hde->d_reclen; 8619 treclen = offsetof(struct target_dirent, d_name) + namelen + 2; 8620 treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent)); 8621 8622 if (toff + treclen > count) { 8623 /* 8624 * If the host struct is smaller than the target struct, or 8625 * requires less alignment and thus packs into less space, 8626 * then the host can return more entries than we can pass 8627 * on to the guest. 8628 */ 8629 if (toff == 0) { 8630 toff = -TARGET_EINVAL; /* result buffer is too small */ 8631 break; 8632 } 8633 /* 8634 * Return what we have, resetting the file pointer to the 8635 * location of the first record not returned. 8636 */ 8637 lseek64(dirfd, prev_diroff, SEEK_SET); 8638 break; 8639 } 8640 8641 prev_diroff = hde->d_off; 8642 tde->d_ino = tswapal(hde->d_ino); 8643 tde->d_off = tswapal(hde->d_off); 8644 tde->d_reclen = tswap16(treclen); 8645 memcpy(tde->d_name, hde->d_name, namelen + 1); 8646 8647 /* 8648 * The getdents type is in what was formerly a padding byte at the 8649 * end of the structure. 8650 */ 8651 #ifdef EMULATE_GETDENTS_WITH_GETDENTS 8652 type = *((uint8_t *)hde + hreclen - 1); 8653 #else 8654 type = hde->d_type; 8655 #endif 8656 *((uint8_t *)tde + treclen - 1) = type; 8657 } 8658 8659 unlock_user(tdirp, arg2, toff); 8660 return toff; 8661 } 8662 #endif /* TARGET_NR_getdents */ 8663 8664 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) 8665 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count) 8666 { 8667 g_autofree void *hdirp = NULL; 8668 void *tdirp; 8669 int hlen, hoff, toff; 8670 int hreclen, treclen; 8671 off64_t prev_diroff = 0; 8672 8673 hdirp = g_try_malloc(count); 8674 if (!hdirp) { 8675 return -TARGET_ENOMEM; 8676 } 8677 8678 hlen = get_errno(sys_getdents64(dirfd, hdirp, count)); 8679 if (is_error(hlen)) { 8680 return hlen; 8681 } 8682 8683 tdirp = lock_user(VERIFY_WRITE, arg2, count, 0); 8684 if (!tdirp) { 8685 return -TARGET_EFAULT; 8686 } 8687 8688 for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) { 8689 struct linux_dirent64 *hde = hdirp + hoff; 8690 struct target_dirent64 *tde = tdirp + toff; 8691 int namelen; 8692 8693 namelen = strlen(hde->d_name) + 1; 8694 hreclen = hde->d_reclen; 8695 treclen = offsetof(struct target_dirent64, d_name) + namelen; 8696 treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64)); 8697 8698 if (toff + treclen > count) { 8699 /* 8700 * If the host struct is smaller than the target struct, or 8701 * requires less alignment and thus packs into less space, 8702 * then the host can return more entries than we can pass 8703 * on to the guest. 8704 */ 8705 if (toff == 0) { 8706 toff = -TARGET_EINVAL; /* result buffer is too small */ 8707 break; 8708 } 8709 /* 8710 * Return what we have, resetting the file pointer to the 8711 * location of the first record not returned. 8712 */ 8713 lseek64(dirfd, prev_diroff, SEEK_SET); 8714 break; 8715 } 8716 8717 prev_diroff = hde->d_off; 8718 tde->d_ino = tswap64(hde->d_ino); 8719 tde->d_off = tswap64(hde->d_off); 8720 tde->d_reclen = tswap16(treclen); 8721 tde->d_type = hde->d_type; 8722 memcpy(tde->d_name, hde->d_name, namelen); 8723 } 8724 8725 unlock_user(tdirp, arg2, toff); 8726 return toff; 8727 } 8728 #endif /* TARGET_NR_getdents64 */ 8729 8730 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root) 8731 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old) 8732 #endif 8733 8734 /* This is an internal helper for do_syscall so that it is easier 8735 * to have a single return point, so that actions, such as logging 8736 * of syscall results, can be performed. 8737 * All errnos that do_syscall() returns must be -TARGET_<errcode>. 8738 */ 8739 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, 8740 abi_long arg2, abi_long arg3, abi_long arg4, 8741 abi_long arg5, abi_long arg6, abi_long arg7, 8742 abi_long arg8) 8743 { 8744 CPUState *cpu = env_cpu(cpu_env); 8745 abi_long ret; 8746 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \ 8747 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \ 8748 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \ 8749 || defined(TARGET_NR_statx) 8750 struct stat st; 8751 #endif 8752 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \ 8753 || defined(TARGET_NR_fstatfs) 8754 struct statfs stfs; 8755 #endif 8756 void *p; 8757 8758 switch(num) { 8759 case TARGET_NR_exit: 8760 /* In old applications this may be used to implement _exit(2). 8761 However in threaded applications it is used for thread termination, 8762 and _exit_group is used for application termination. 8763 Do thread termination if we have more then one thread. */ 8764 8765 if (block_signals()) { 8766 return -QEMU_ERESTARTSYS; 8767 } 8768 8769 pthread_mutex_lock(&clone_lock); 8770 8771 if (CPU_NEXT(first_cpu)) { 8772 TaskState *ts = cpu->opaque; 8773 8774 object_property_set_bool(OBJECT(cpu), "realized", false, NULL); 8775 object_unref(OBJECT(cpu)); 8776 /* 8777 * At this point the CPU should be unrealized and removed 8778 * from cpu lists. We can clean-up the rest of the thread 8779 * data without the lock held. 8780 */ 8781 8782 pthread_mutex_unlock(&clone_lock); 8783 8784 if (ts->child_tidptr) { 8785 put_user_u32(0, ts->child_tidptr); 8786 do_sys_futex(g2h(cpu, ts->child_tidptr), 8787 FUTEX_WAKE, INT_MAX, NULL, NULL, 0); 8788 } 8789 thread_cpu = NULL; 8790 g_free(ts); 8791 rcu_unregister_thread(); 8792 pthread_exit(NULL); 8793 } 8794 8795 pthread_mutex_unlock(&clone_lock); 8796 preexit_cleanup(cpu_env, arg1); 8797 _exit(arg1); 8798 return 0; /* avoid warning */ 8799 case TARGET_NR_read: 8800 if (arg2 == 0 && arg3 == 0) { 8801 return get_errno(safe_read(arg1, 0, 0)); 8802 } else { 8803 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 8804 return -TARGET_EFAULT; 8805 ret = get_errno(safe_read(arg1, p, arg3)); 8806 if (ret >= 0 && 8807 fd_trans_host_to_target_data(arg1)) { 8808 ret = fd_trans_host_to_target_data(arg1)(p, ret); 8809 } 8810 unlock_user(p, arg2, ret); 8811 } 8812 return ret; 8813 case TARGET_NR_write: 8814 if (arg2 == 0 && arg3 == 0) { 8815 return get_errno(safe_write(arg1, 0, 0)); 8816 } 8817 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 8818 return -TARGET_EFAULT; 8819 if (fd_trans_target_to_host_data(arg1)) { 8820 void *copy = g_malloc(arg3); 8821 memcpy(copy, p, arg3); 8822 ret = fd_trans_target_to_host_data(arg1)(copy, arg3); 8823 if (ret >= 0) { 8824 ret = get_errno(safe_write(arg1, copy, ret)); 8825 } 8826 g_free(copy); 8827 } else { 8828 ret = get_errno(safe_write(arg1, p, arg3)); 8829 } 8830 unlock_user(p, arg2, 0); 8831 return ret; 8832 8833 #ifdef TARGET_NR_open 8834 case TARGET_NR_open: 8835 if (!(p = lock_user_string(arg1))) 8836 return -TARGET_EFAULT; 8837 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p, 8838 target_to_host_bitmask(arg2, fcntl_flags_tbl), 8839 arg3)); 8840 fd_trans_unregister(ret); 8841 unlock_user(p, arg1, 0); 8842 return ret; 8843 #endif 8844 case TARGET_NR_openat: 8845 if (!(p = lock_user_string(arg2))) 8846 return -TARGET_EFAULT; 8847 ret = get_errno(do_openat(cpu_env, arg1, p, 8848 target_to_host_bitmask(arg3, fcntl_flags_tbl), 8849 arg4)); 8850 fd_trans_unregister(ret); 8851 unlock_user(p, arg2, 0); 8852 return ret; 8853 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 8854 case TARGET_NR_name_to_handle_at: 8855 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5); 8856 return ret; 8857 #endif 8858 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 8859 case TARGET_NR_open_by_handle_at: 8860 ret = do_open_by_handle_at(arg1, arg2, arg3); 8861 fd_trans_unregister(ret); 8862 return ret; 8863 #endif 8864 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open) 8865 case TARGET_NR_pidfd_open: 8866 return get_errno(pidfd_open(arg1, arg2)); 8867 #endif 8868 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal) 8869 case TARGET_NR_pidfd_send_signal: 8870 { 8871 siginfo_t uinfo, *puinfo; 8872 8873 if (arg3) { 8874 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1); 8875 if (!p) { 8876 return -TARGET_EFAULT; 8877 } 8878 target_to_host_siginfo(&uinfo, p); 8879 unlock_user(p, arg3, 0); 8880 puinfo = &uinfo; 8881 } else { 8882 puinfo = NULL; 8883 } 8884 ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2), 8885 puinfo, arg4)); 8886 } 8887 return ret; 8888 #endif 8889 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd) 8890 case TARGET_NR_pidfd_getfd: 8891 return get_errno(pidfd_getfd(arg1, arg2, arg3)); 8892 #endif 8893 case TARGET_NR_close: 8894 fd_trans_unregister(arg1); 8895 return get_errno(close(arg1)); 8896 #if defined(__NR_close_range) && defined(TARGET_NR_close_range) 8897 case TARGET_NR_close_range: 8898 ret = get_errno(sys_close_range(arg1, arg2, arg3)); 8899 if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) { 8900 abi_long fd, maxfd; 8901 maxfd = MIN(arg2, target_fd_max); 8902 for (fd = arg1; fd < maxfd; fd++) { 8903 fd_trans_unregister(fd); 8904 } 8905 } 8906 return ret; 8907 #endif 8908 8909 case TARGET_NR_brk: 8910 return do_brk(arg1); 8911 #ifdef TARGET_NR_fork 8912 case TARGET_NR_fork: 8913 return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0)); 8914 #endif 8915 #ifdef TARGET_NR_waitpid 8916 case TARGET_NR_waitpid: 8917 { 8918 int status; 8919 ret = get_errno(safe_wait4(arg1, &status, arg3, 0)); 8920 if (!is_error(ret) && arg2 && ret 8921 && put_user_s32(host_to_target_waitstatus(status), arg2)) 8922 return -TARGET_EFAULT; 8923 } 8924 return ret; 8925 #endif 8926 #ifdef TARGET_NR_waitid 8927 case TARGET_NR_waitid: 8928 { 8929 siginfo_t info; 8930 info.si_pid = 0; 8931 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL)); 8932 if (!is_error(ret) && arg3 && info.si_pid != 0) { 8933 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0))) 8934 return -TARGET_EFAULT; 8935 host_to_target_siginfo(p, &info); 8936 unlock_user(p, arg3, sizeof(target_siginfo_t)); 8937 } 8938 } 8939 return ret; 8940 #endif 8941 #ifdef TARGET_NR_creat /* not on alpha */ 8942 case TARGET_NR_creat: 8943 if (!(p = lock_user_string(arg1))) 8944 return -TARGET_EFAULT; 8945 ret = get_errno(creat(p, arg2)); 8946 fd_trans_unregister(ret); 8947 unlock_user(p, arg1, 0); 8948 return ret; 8949 #endif 8950 #ifdef TARGET_NR_link 8951 case TARGET_NR_link: 8952 { 8953 void * p2; 8954 p = lock_user_string(arg1); 8955 p2 = lock_user_string(arg2); 8956 if (!p || !p2) 8957 ret = -TARGET_EFAULT; 8958 else 8959 ret = get_errno(link(p, p2)); 8960 unlock_user(p2, arg2, 0); 8961 unlock_user(p, arg1, 0); 8962 } 8963 return ret; 8964 #endif 8965 #if defined(TARGET_NR_linkat) 8966 case TARGET_NR_linkat: 8967 { 8968 void * p2 = NULL; 8969 if (!arg2 || !arg4) 8970 return -TARGET_EFAULT; 8971 p = lock_user_string(arg2); 8972 p2 = lock_user_string(arg4); 8973 if (!p || !p2) 8974 ret = -TARGET_EFAULT; 8975 else 8976 ret = get_errno(linkat(arg1, p, arg3, p2, arg5)); 8977 unlock_user(p, arg2, 0); 8978 unlock_user(p2, arg4, 0); 8979 } 8980 return ret; 8981 #endif 8982 #ifdef TARGET_NR_unlink 8983 case TARGET_NR_unlink: 8984 if (!(p = lock_user_string(arg1))) 8985 return -TARGET_EFAULT; 8986 ret = get_errno(unlink(p)); 8987 unlock_user(p, arg1, 0); 8988 return ret; 8989 #endif 8990 #if defined(TARGET_NR_unlinkat) 8991 case TARGET_NR_unlinkat: 8992 if (!(p = lock_user_string(arg2))) 8993 return -TARGET_EFAULT; 8994 ret = get_errno(unlinkat(arg1, p, arg3)); 8995 unlock_user(p, arg2, 0); 8996 return ret; 8997 #endif 8998 case TARGET_NR_execve: 8999 { 9000 char **argp, **envp; 9001 int argc, envc; 9002 abi_ulong gp; 9003 abi_ulong guest_argp; 9004 abi_ulong guest_envp; 9005 abi_ulong addr; 9006 char **q; 9007 9008 argc = 0; 9009 guest_argp = arg2; 9010 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) { 9011 if (get_user_ual(addr, gp)) 9012 return -TARGET_EFAULT; 9013 if (!addr) 9014 break; 9015 argc++; 9016 } 9017 envc = 0; 9018 guest_envp = arg3; 9019 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) { 9020 if (get_user_ual(addr, gp)) 9021 return -TARGET_EFAULT; 9022 if (!addr) 9023 break; 9024 envc++; 9025 } 9026 9027 argp = g_new0(char *, argc + 1); 9028 envp = g_new0(char *, envc + 1); 9029 9030 for (gp = guest_argp, q = argp; gp; 9031 gp += sizeof(abi_ulong), q++) { 9032 if (get_user_ual(addr, gp)) 9033 goto execve_efault; 9034 if (!addr) 9035 break; 9036 if (!(*q = lock_user_string(addr))) 9037 goto execve_efault; 9038 } 9039 *q = NULL; 9040 9041 for (gp = guest_envp, q = envp; gp; 9042 gp += sizeof(abi_ulong), q++) { 9043 if (get_user_ual(addr, gp)) 9044 goto execve_efault; 9045 if (!addr) 9046 break; 9047 if (!(*q = lock_user_string(addr))) 9048 goto execve_efault; 9049 } 9050 *q = NULL; 9051 9052 if (!(p = lock_user_string(arg1))) 9053 goto execve_efault; 9054 if (is_proc_myself(p, "exe")) { 9055 ret = qemu_execve(exec_path, argp, envp); 9056 } else { 9057 ret = qemu_execve(p, argp, envp); 9058 } 9059 unlock_user(p, arg1, 0); 9060 9061 goto execve_end; 9062 9063 execve_efault: 9064 ret = -TARGET_EFAULT; 9065 9066 execve_end: 9067 for (gp = guest_argp, q = argp; *q; 9068 gp += sizeof(abi_ulong), q++) { 9069 if (get_user_ual(addr, gp) 9070 || !addr) 9071 break; 9072 unlock_user(*q, addr, 0); 9073 } 9074 for (gp = guest_envp, q = envp; *q; 9075 gp += sizeof(abi_ulong), q++) { 9076 if (get_user_ual(addr, gp) 9077 || !addr) 9078 break; 9079 unlock_user(*q, addr, 0); 9080 } 9081 9082 g_free(argp); 9083 g_free(envp); 9084 } 9085 return ret; 9086 case TARGET_NR_chdir: 9087 if (!(p = lock_user_string(arg1))) 9088 return -TARGET_EFAULT; 9089 ret = get_errno(chdir(p)); 9090 unlock_user(p, arg1, 0); 9091 return ret; 9092 #ifdef TARGET_NR_time 9093 case TARGET_NR_time: 9094 { 9095 time_t host_time; 9096 ret = get_errno(time(&host_time)); 9097 if (!is_error(ret) 9098 && arg1 9099 && put_user_sal(host_time, arg1)) 9100 return -TARGET_EFAULT; 9101 } 9102 return ret; 9103 #endif 9104 #ifdef TARGET_NR_mknod 9105 case TARGET_NR_mknod: 9106 if (!(p = lock_user_string(arg1))) 9107 return -TARGET_EFAULT; 9108 ret = get_errno(mknod(p, arg2, arg3)); 9109 unlock_user(p, arg1, 0); 9110 return ret; 9111 #endif 9112 #if defined(TARGET_NR_mknodat) 9113 case TARGET_NR_mknodat: 9114 if (!(p = lock_user_string(arg2))) 9115 return -TARGET_EFAULT; 9116 ret = get_errno(mknodat(arg1, p, arg3, arg4)); 9117 unlock_user(p, arg2, 0); 9118 return ret; 9119 #endif 9120 #ifdef TARGET_NR_chmod 9121 case TARGET_NR_chmod: 9122 if (!(p = lock_user_string(arg1))) 9123 return -TARGET_EFAULT; 9124 ret = get_errno(chmod(p, arg2)); 9125 unlock_user(p, arg1, 0); 9126 return ret; 9127 #endif 9128 #ifdef TARGET_NR_lseek 9129 case TARGET_NR_lseek: 9130 return get_errno(lseek(arg1, arg2, arg3)); 9131 #endif 9132 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA) 9133 /* Alpha specific */ 9134 case TARGET_NR_getxpid: 9135 cpu_env->ir[IR_A4] = getppid(); 9136 return get_errno(getpid()); 9137 #endif 9138 #ifdef TARGET_NR_getpid 9139 case TARGET_NR_getpid: 9140 return get_errno(getpid()); 9141 #endif 9142 case TARGET_NR_mount: 9143 { 9144 /* need to look at the data field */ 9145 void *p2, *p3; 9146 9147 if (arg1) { 9148 p = lock_user_string(arg1); 9149 if (!p) { 9150 return -TARGET_EFAULT; 9151 } 9152 } else { 9153 p = NULL; 9154 } 9155 9156 p2 = lock_user_string(arg2); 9157 if (!p2) { 9158 if (arg1) { 9159 unlock_user(p, arg1, 0); 9160 } 9161 return -TARGET_EFAULT; 9162 } 9163 9164 if (arg3) { 9165 p3 = lock_user_string(arg3); 9166 if (!p3) { 9167 if (arg1) { 9168 unlock_user(p, arg1, 0); 9169 } 9170 unlock_user(p2, arg2, 0); 9171 return -TARGET_EFAULT; 9172 } 9173 } else { 9174 p3 = NULL; 9175 } 9176 9177 /* FIXME - arg5 should be locked, but it isn't clear how to 9178 * do that since it's not guaranteed to be a NULL-terminated 9179 * string. 9180 */ 9181 if (!arg5) { 9182 ret = mount(p, p2, p3, (unsigned long)arg4, NULL); 9183 } else { 9184 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5)); 9185 } 9186 ret = get_errno(ret); 9187 9188 if (arg1) { 9189 unlock_user(p, arg1, 0); 9190 } 9191 unlock_user(p2, arg2, 0); 9192 if (arg3) { 9193 unlock_user(p3, arg3, 0); 9194 } 9195 } 9196 return ret; 9197 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount) 9198 #if defined(TARGET_NR_umount) 9199 case TARGET_NR_umount: 9200 #endif 9201 #if defined(TARGET_NR_oldumount) 9202 case TARGET_NR_oldumount: 9203 #endif 9204 if (!(p = lock_user_string(arg1))) 9205 return -TARGET_EFAULT; 9206 ret = get_errno(umount(p)); 9207 unlock_user(p, arg1, 0); 9208 return ret; 9209 #endif 9210 #ifdef TARGET_NR_stime /* not on alpha */ 9211 case TARGET_NR_stime: 9212 { 9213 struct timespec ts; 9214 ts.tv_nsec = 0; 9215 if (get_user_sal(ts.tv_sec, arg1)) { 9216 return -TARGET_EFAULT; 9217 } 9218 return get_errno(clock_settime(CLOCK_REALTIME, &ts)); 9219 } 9220 #endif 9221 #ifdef TARGET_NR_alarm /* not on alpha */ 9222 case TARGET_NR_alarm: 9223 return alarm(arg1); 9224 #endif 9225 #ifdef TARGET_NR_pause /* not on alpha */ 9226 case TARGET_NR_pause: 9227 if (!block_signals()) { 9228 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask); 9229 } 9230 return -TARGET_EINTR; 9231 #endif 9232 #ifdef TARGET_NR_utime 9233 case TARGET_NR_utime: 9234 { 9235 struct utimbuf tbuf, *host_tbuf; 9236 struct target_utimbuf *target_tbuf; 9237 if (arg2) { 9238 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1)) 9239 return -TARGET_EFAULT; 9240 tbuf.actime = tswapal(target_tbuf->actime); 9241 tbuf.modtime = tswapal(target_tbuf->modtime); 9242 unlock_user_struct(target_tbuf, arg2, 0); 9243 host_tbuf = &tbuf; 9244 } else { 9245 host_tbuf = NULL; 9246 } 9247 if (!(p = lock_user_string(arg1))) 9248 return -TARGET_EFAULT; 9249 ret = get_errno(utime(p, host_tbuf)); 9250 unlock_user(p, arg1, 0); 9251 } 9252 return ret; 9253 #endif 9254 #ifdef TARGET_NR_utimes 9255 case TARGET_NR_utimes: 9256 { 9257 struct timeval *tvp, tv[2]; 9258 if (arg2) { 9259 if (copy_from_user_timeval(&tv[0], arg2) 9260 || copy_from_user_timeval(&tv[1], 9261 arg2 + sizeof(struct target_timeval))) 9262 return -TARGET_EFAULT; 9263 tvp = tv; 9264 } else { 9265 tvp = NULL; 9266 } 9267 if (!(p = lock_user_string(arg1))) 9268 return -TARGET_EFAULT; 9269 ret = get_errno(utimes(p, tvp)); 9270 unlock_user(p, arg1, 0); 9271 } 9272 return ret; 9273 #endif 9274 #if defined(TARGET_NR_futimesat) 9275 case TARGET_NR_futimesat: 9276 { 9277 struct timeval *tvp, tv[2]; 9278 if (arg3) { 9279 if (copy_from_user_timeval(&tv[0], arg3) 9280 || copy_from_user_timeval(&tv[1], 9281 arg3 + sizeof(struct target_timeval))) 9282 return -TARGET_EFAULT; 9283 tvp = tv; 9284 } else { 9285 tvp = NULL; 9286 } 9287 if (!(p = lock_user_string(arg2))) { 9288 return -TARGET_EFAULT; 9289 } 9290 ret = get_errno(futimesat(arg1, path(p), tvp)); 9291 unlock_user(p, arg2, 0); 9292 } 9293 return ret; 9294 #endif 9295 #ifdef TARGET_NR_access 9296 case TARGET_NR_access: 9297 if (!(p = lock_user_string(arg1))) { 9298 return -TARGET_EFAULT; 9299 } 9300 ret = get_errno(access(path(p), arg2)); 9301 unlock_user(p, arg1, 0); 9302 return ret; 9303 #endif 9304 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat) 9305 case TARGET_NR_faccessat: 9306 if (!(p = lock_user_string(arg2))) { 9307 return -TARGET_EFAULT; 9308 } 9309 ret = get_errno(faccessat(arg1, p, arg3, 0)); 9310 unlock_user(p, arg2, 0); 9311 return ret; 9312 #endif 9313 #if defined(TARGET_NR_faccessat2) 9314 case TARGET_NR_faccessat2: 9315 if (!(p = lock_user_string(arg2))) { 9316 return -TARGET_EFAULT; 9317 } 9318 ret = get_errno(faccessat(arg1, p, arg3, arg4)); 9319 unlock_user(p, arg2, 0); 9320 return ret; 9321 #endif 9322 #ifdef TARGET_NR_nice /* not on alpha */ 9323 case TARGET_NR_nice: 9324 return get_errno(nice(arg1)); 9325 #endif 9326 case TARGET_NR_sync: 9327 sync(); 9328 return 0; 9329 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS) 9330 case TARGET_NR_syncfs: 9331 return get_errno(syncfs(arg1)); 9332 #endif 9333 case TARGET_NR_kill: 9334 return get_errno(safe_kill(arg1, target_to_host_signal(arg2))); 9335 #ifdef TARGET_NR_rename 9336 case TARGET_NR_rename: 9337 { 9338 void *p2; 9339 p = lock_user_string(arg1); 9340 p2 = lock_user_string(arg2); 9341 if (!p || !p2) 9342 ret = -TARGET_EFAULT; 9343 else 9344 ret = get_errno(rename(p, p2)); 9345 unlock_user(p2, arg2, 0); 9346 unlock_user(p, arg1, 0); 9347 } 9348 return ret; 9349 #endif 9350 #if defined(TARGET_NR_renameat) 9351 case TARGET_NR_renameat: 9352 { 9353 void *p2; 9354 p = lock_user_string(arg2); 9355 p2 = lock_user_string(arg4); 9356 if (!p || !p2) 9357 ret = -TARGET_EFAULT; 9358 else 9359 ret = get_errno(renameat(arg1, p, arg3, p2)); 9360 unlock_user(p2, arg4, 0); 9361 unlock_user(p, arg2, 0); 9362 } 9363 return ret; 9364 #endif 9365 #if defined(TARGET_NR_renameat2) 9366 case TARGET_NR_renameat2: 9367 { 9368 void *p2; 9369 p = lock_user_string(arg2); 9370 p2 = lock_user_string(arg4); 9371 if (!p || !p2) { 9372 ret = -TARGET_EFAULT; 9373 } else { 9374 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5)); 9375 } 9376 unlock_user(p2, arg4, 0); 9377 unlock_user(p, arg2, 0); 9378 } 9379 return ret; 9380 #endif 9381 #ifdef TARGET_NR_mkdir 9382 case TARGET_NR_mkdir: 9383 if (!(p = lock_user_string(arg1))) 9384 return -TARGET_EFAULT; 9385 ret = get_errno(mkdir(p, arg2)); 9386 unlock_user(p, arg1, 0); 9387 return ret; 9388 #endif 9389 #if defined(TARGET_NR_mkdirat) 9390 case TARGET_NR_mkdirat: 9391 if (!(p = lock_user_string(arg2))) 9392 return -TARGET_EFAULT; 9393 ret = get_errno(mkdirat(arg1, p, arg3)); 9394 unlock_user(p, arg2, 0); 9395 return ret; 9396 #endif 9397 #ifdef TARGET_NR_rmdir 9398 case TARGET_NR_rmdir: 9399 if (!(p = lock_user_string(arg1))) 9400 return -TARGET_EFAULT; 9401 ret = get_errno(rmdir(p)); 9402 unlock_user(p, arg1, 0); 9403 return ret; 9404 #endif 9405 case TARGET_NR_dup: 9406 ret = get_errno(dup(arg1)); 9407 if (ret >= 0) { 9408 fd_trans_dup(arg1, ret); 9409 } 9410 return ret; 9411 #ifdef TARGET_NR_pipe 9412 case TARGET_NR_pipe: 9413 return do_pipe(cpu_env, arg1, 0, 0); 9414 #endif 9415 #ifdef TARGET_NR_pipe2 9416 case TARGET_NR_pipe2: 9417 return do_pipe(cpu_env, arg1, 9418 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1); 9419 #endif 9420 case TARGET_NR_times: 9421 { 9422 struct target_tms *tmsp; 9423 struct tms tms; 9424 ret = get_errno(times(&tms)); 9425 if (arg1) { 9426 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0); 9427 if (!tmsp) 9428 return -TARGET_EFAULT; 9429 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime)); 9430 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime)); 9431 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime)); 9432 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime)); 9433 } 9434 if (!is_error(ret)) 9435 ret = host_to_target_clock_t(ret); 9436 } 9437 return ret; 9438 case TARGET_NR_acct: 9439 if (arg1 == 0) { 9440 ret = get_errno(acct(NULL)); 9441 } else { 9442 if (!(p = lock_user_string(arg1))) { 9443 return -TARGET_EFAULT; 9444 } 9445 ret = get_errno(acct(path(p))); 9446 unlock_user(p, arg1, 0); 9447 } 9448 return ret; 9449 #ifdef TARGET_NR_umount2 9450 case TARGET_NR_umount2: 9451 if (!(p = lock_user_string(arg1))) 9452 return -TARGET_EFAULT; 9453 ret = get_errno(umount2(p, arg2)); 9454 unlock_user(p, arg1, 0); 9455 return ret; 9456 #endif 9457 case TARGET_NR_ioctl: 9458 return do_ioctl(arg1, arg2, arg3); 9459 #ifdef TARGET_NR_fcntl 9460 case TARGET_NR_fcntl: 9461 return do_fcntl(arg1, arg2, arg3); 9462 #endif 9463 case TARGET_NR_setpgid: 9464 return get_errno(setpgid(arg1, arg2)); 9465 case TARGET_NR_umask: 9466 return get_errno(umask(arg1)); 9467 case TARGET_NR_chroot: 9468 if (!(p = lock_user_string(arg1))) 9469 return -TARGET_EFAULT; 9470 ret = get_errno(chroot(p)); 9471 unlock_user(p, arg1, 0); 9472 return ret; 9473 #ifdef TARGET_NR_dup2 9474 case TARGET_NR_dup2: 9475 ret = get_errno(dup2(arg1, arg2)); 9476 if (ret >= 0) { 9477 fd_trans_dup(arg1, arg2); 9478 } 9479 return ret; 9480 #endif 9481 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3) 9482 case TARGET_NR_dup3: 9483 { 9484 int host_flags; 9485 9486 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) { 9487 return -EINVAL; 9488 } 9489 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl); 9490 ret = get_errno(dup3(arg1, arg2, host_flags)); 9491 if (ret >= 0) { 9492 fd_trans_dup(arg1, arg2); 9493 } 9494 return ret; 9495 } 9496 #endif 9497 #ifdef TARGET_NR_getppid /* not on alpha */ 9498 case TARGET_NR_getppid: 9499 return get_errno(getppid()); 9500 #endif 9501 #ifdef TARGET_NR_getpgrp 9502 case TARGET_NR_getpgrp: 9503 return get_errno(getpgrp()); 9504 #endif 9505 case TARGET_NR_setsid: 9506 return get_errno(setsid()); 9507 #ifdef TARGET_NR_sigaction 9508 case TARGET_NR_sigaction: 9509 { 9510 #if defined(TARGET_MIPS) 9511 struct target_sigaction act, oact, *pact, *old_act; 9512 9513 if (arg2) { 9514 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 9515 return -TARGET_EFAULT; 9516 act._sa_handler = old_act->_sa_handler; 9517 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]); 9518 act.sa_flags = old_act->sa_flags; 9519 unlock_user_struct(old_act, arg2, 0); 9520 pact = &act; 9521 } else { 9522 pact = NULL; 9523 } 9524 9525 ret = get_errno(do_sigaction(arg1, pact, &oact, 0)); 9526 9527 if (!is_error(ret) && arg3) { 9528 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 9529 return -TARGET_EFAULT; 9530 old_act->_sa_handler = oact._sa_handler; 9531 old_act->sa_flags = oact.sa_flags; 9532 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0]; 9533 old_act->sa_mask.sig[1] = 0; 9534 old_act->sa_mask.sig[2] = 0; 9535 old_act->sa_mask.sig[3] = 0; 9536 unlock_user_struct(old_act, arg3, 1); 9537 } 9538 #else 9539 struct target_old_sigaction *old_act; 9540 struct target_sigaction act, oact, *pact; 9541 if (arg2) { 9542 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 9543 return -TARGET_EFAULT; 9544 act._sa_handler = old_act->_sa_handler; 9545 target_siginitset(&act.sa_mask, old_act->sa_mask); 9546 act.sa_flags = old_act->sa_flags; 9547 #ifdef TARGET_ARCH_HAS_SA_RESTORER 9548 act.sa_restorer = old_act->sa_restorer; 9549 #endif 9550 unlock_user_struct(old_act, arg2, 0); 9551 pact = &act; 9552 } else { 9553 pact = NULL; 9554 } 9555 ret = get_errno(do_sigaction(arg1, pact, &oact, 0)); 9556 if (!is_error(ret) && arg3) { 9557 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 9558 return -TARGET_EFAULT; 9559 old_act->_sa_handler = oact._sa_handler; 9560 old_act->sa_mask = oact.sa_mask.sig[0]; 9561 old_act->sa_flags = oact.sa_flags; 9562 #ifdef TARGET_ARCH_HAS_SA_RESTORER 9563 old_act->sa_restorer = oact.sa_restorer; 9564 #endif 9565 unlock_user_struct(old_act, arg3, 1); 9566 } 9567 #endif 9568 } 9569 return ret; 9570 #endif 9571 case TARGET_NR_rt_sigaction: 9572 { 9573 /* 9574 * For Alpha and SPARC this is a 5 argument syscall, with 9575 * a 'restorer' parameter which must be copied into the 9576 * sa_restorer field of the sigaction struct. 9577 * For Alpha that 'restorer' is arg5; for SPARC it is arg4, 9578 * and arg5 is the sigsetsize. 9579 */ 9580 #if defined(TARGET_ALPHA) 9581 target_ulong sigsetsize = arg4; 9582 target_ulong restorer = arg5; 9583 #elif defined(TARGET_SPARC) 9584 target_ulong restorer = arg4; 9585 target_ulong sigsetsize = arg5; 9586 #else 9587 target_ulong sigsetsize = arg4; 9588 target_ulong restorer = 0; 9589 #endif 9590 struct target_sigaction *act = NULL; 9591 struct target_sigaction *oact = NULL; 9592 9593 if (sigsetsize != sizeof(target_sigset_t)) { 9594 return -TARGET_EINVAL; 9595 } 9596 if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) { 9597 return -TARGET_EFAULT; 9598 } 9599 if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) { 9600 ret = -TARGET_EFAULT; 9601 } else { 9602 ret = get_errno(do_sigaction(arg1, act, oact, restorer)); 9603 if (oact) { 9604 unlock_user_struct(oact, arg3, 1); 9605 } 9606 } 9607 if (act) { 9608 unlock_user_struct(act, arg2, 0); 9609 } 9610 } 9611 return ret; 9612 #ifdef TARGET_NR_sgetmask /* not on alpha */ 9613 case TARGET_NR_sgetmask: 9614 { 9615 sigset_t cur_set; 9616 abi_ulong target_set; 9617 ret = do_sigprocmask(0, NULL, &cur_set); 9618 if (!ret) { 9619 host_to_target_old_sigset(&target_set, &cur_set); 9620 ret = target_set; 9621 } 9622 } 9623 return ret; 9624 #endif 9625 #ifdef TARGET_NR_ssetmask /* not on alpha */ 9626 case TARGET_NR_ssetmask: 9627 { 9628 sigset_t set, oset; 9629 abi_ulong target_set = arg1; 9630 target_to_host_old_sigset(&set, &target_set); 9631 ret = do_sigprocmask(SIG_SETMASK, &set, &oset); 9632 if (!ret) { 9633 host_to_target_old_sigset(&target_set, &oset); 9634 ret = target_set; 9635 } 9636 } 9637 return ret; 9638 #endif 9639 #ifdef TARGET_NR_sigprocmask 9640 case TARGET_NR_sigprocmask: 9641 { 9642 #if defined(TARGET_ALPHA) 9643 sigset_t set, oldset; 9644 abi_ulong mask; 9645 int how; 9646 9647 switch (arg1) { 9648 case TARGET_SIG_BLOCK: 9649 how = SIG_BLOCK; 9650 break; 9651 case TARGET_SIG_UNBLOCK: 9652 how = SIG_UNBLOCK; 9653 break; 9654 case TARGET_SIG_SETMASK: 9655 how = SIG_SETMASK; 9656 break; 9657 default: 9658 return -TARGET_EINVAL; 9659 } 9660 mask = arg2; 9661 target_to_host_old_sigset(&set, &mask); 9662 9663 ret = do_sigprocmask(how, &set, &oldset); 9664 if (!is_error(ret)) { 9665 host_to_target_old_sigset(&mask, &oldset); 9666 ret = mask; 9667 cpu_env->ir[IR_V0] = 0; /* force no error */ 9668 } 9669 #else 9670 sigset_t set, oldset, *set_ptr; 9671 int how; 9672 9673 if (arg2) { 9674 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1); 9675 if (!p) { 9676 return -TARGET_EFAULT; 9677 } 9678 target_to_host_old_sigset(&set, p); 9679 unlock_user(p, arg2, 0); 9680 set_ptr = &set; 9681 switch (arg1) { 9682 case TARGET_SIG_BLOCK: 9683 how = SIG_BLOCK; 9684 break; 9685 case TARGET_SIG_UNBLOCK: 9686 how = SIG_UNBLOCK; 9687 break; 9688 case TARGET_SIG_SETMASK: 9689 how = SIG_SETMASK; 9690 break; 9691 default: 9692 return -TARGET_EINVAL; 9693 } 9694 } else { 9695 how = 0; 9696 set_ptr = NULL; 9697 } 9698 ret = do_sigprocmask(how, set_ptr, &oldset); 9699 if (!is_error(ret) && arg3) { 9700 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 9701 return -TARGET_EFAULT; 9702 host_to_target_old_sigset(p, &oldset); 9703 unlock_user(p, arg3, sizeof(target_sigset_t)); 9704 } 9705 #endif 9706 } 9707 return ret; 9708 #endif 9709 case TARGET_NR_rt_sigprocmask: 9710 { 9711 int how = arg1; 9712 sigset_t set, oldset, *set_ptr; 9713 9714 if (arg4 != sizeof(target_sigset_t)) { 9715 return -TARGET_EINVAL; 9716 } 9717 9718 if (arg2) { 9719 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1); 9720 if (!p) { 9721 return -TARGET_EFAULT; 9722 } 9723 target_to_host_sigset(&set, p); 9724 unlock_user(p, arg2, 0); 9725 set_ptr = &set; 9726 switch(how) { 9727 case TARGET_SIG_BLOCK: 9728 how = SIG_BLOCK; 9729 break; 9730 case TARGET_SIG_UNBLOCK: 9731 how = SIG_UNBLOCK; 9732 break; 9733 case TARGET_SIG_SETMASK: 9734 how = SIG_SETMASK; 9735 break; 9736 default: 9737 return -TARGET_EINVAL; 9738 } 9739 } else { 9740 how = 0; 9741 set_ptr = NULL; 9742 } 9743 ret = do_sigprocmask(how, set_ptr, &oldset); 9744 if (!is_error(ret) && arg3) { 9745 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 9746 return -TARGET_EFAULT; 9747 host_to_target_sigset(p, &oldset); 9748 unlock_user(p, arg3, sizeof(target_sigset_t)); 9749 } 9750 } 9751 return ret; 9752 #ifdef TARGET_NR_sigpending 9753 case TARGET_NR_sigpending: 9754 { 9755 sigset_t set; 9756 ret = get_errno(sigpending(&set)); 9757 if (!is_error(ret)) { 9758 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 9759 return -TARGET_EFAULT; 9760 host_to_target_old_sigset(p, &set); 9761 unlock_user(p, arg1, sizeof(target_sigset_t)); 9762 } 9763 } 9764 return ret; 9765 #endif 9766 case TARGET_NR_rt_sigpending: 9767 { 9768 sigset_t set; 9769 9770 /* Yes, this check is >, not != like most. We follow the kernel's 9771 * logic and it does it like this because it implements 9772 * NR_sigpending through the same code path, and in that case 9773 * the old_sigset_t is smaller in size. 9774 */ 9775 if (arg2 > sizeof(target_sigset_t)) { 9776 return -TARGET_EINVAL; 9777 } 9778 9779 ret = get_errno(sigpending(&set)); 9780 if (!is_error(ret)) { 9781 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 9782 return -TARGET_EFAULT; 9783 host_to_target_sigset(p, &set); 9784 unlock_user(p, arg1, sizeof(target_sigset_t)); 9785 } 9786 } 9787 return ret; 9788 #ifdef TARGET_NR_sigsuspend 9789 case TARGET_NR_sigsuspend: 9790 { 9791 sigset_t *set; 9792 9793 #if defined(TARGET_ALPHA) 9794 TaskState *ts = cpu->opaque; 9795 /* target_to_host_old_sigset will bswap back */ 9796 abi_ulong mask = tswapal(arg1); 9797 set = &ts->sigsuspend_mask; 9798 target_to_host_old_sigset(set, &mask); 9799 #else 9800 ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t)); 9801 if (ret != 0) { 9802 return ret; 9803 } 9804 #endif 9805 ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE)); 9806 finish_sigsuspend_mask(ret); 9807 } 9808 return ret; 9809 #endif 9810 case TARGET_NR_rt_sigsuspend: 9811 { 9812 sigset_t *set; 9813 9814 ret = process_sigsuspend_mask(&set, arg1, arg2); 9815 if (ret != 0) { 9816 return ret; 9817 } 9818 ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE)); 9819 finish_sigsuspend_mask(ret); 9820 } 9821 return ret; 9822 #ifdef TARGET_NR_rt_sigtimedwait 9823 case TARGET_NR_rt_sigtimedwait: 9824 { 9825 sigset_t set; 9826 struct timespec uts, *puts; 9827 siginfo_t uinfo; 9828 9829 if (arg4 != sizeof(target_sigset_t)) { 9830 return -TARGET_EINVAL; 9831 } 9832 9833 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 9834 return -TARGET_EFAULT; 9835 target_to_host_sigset(&set, p); 9836 unlock_user(p, arg1, 0); 9837 if (arg3) { 9838 puts = &uts; 9839 if (target_to_host_timespec(puts, arg3)) { 9840 return -TARGET_EFAULT; 9841 } 9842 } else { 9843 puts = NULL; 9844 } 9845 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts, 9846 SIGSET_T_SIZE)); 9847 if (!is_error(ret)) { 9848 if (arg2) { 9849 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 9850 0); 9851 if (!p) { 9852 return -TARGET_EFAULT; 9853 } 9854 host_to_target_siginfo(p, &uinfo); 9855 unlock_user(p, arg2, sizeof(target_siginfo_t)); 9856 } 9857 ret = host_to_target_signal(ret); 9858 } 9859 } 9860 return ret; 9861 #endif 9862 #ifdef TARGET_NR_rt_sigtimedwait_time64 9863 case TARGET_NR_rt_sigtimedwait_time64: 9864 { 9865 sigset_t set; 9866 struct timespec uts, *puts; 9867 siginfo_t uinfo; 9868 9869 if (arg4 != sizeof(target_sigset_t)) { 9870 return -TARGET_EINVAL; 9871 } 9872 9873 p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1); 9874 if (!p) { 9875 return -TARGET_EFAULT; 9876 } 9877 target_to_host_sigset(&set, p); 9878 unlock_user(p, arg1, 0); 9879 if (arg3) { 9880 puts = &uts; 9881 if (target_to_host_timespec64(puts, arg3)) { 9882 return -TARGET_EFAULT; 9883 } 9884 } else { 9885 puts = NULL; 9886 } 9887 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts, 9888 SIGSET_T_SIZE)); 9889 if (!is_error(ret)) { 9890 if (arg2) { 9891 p = lock_user(VERIFY_WRITE, arg2, 9892 sizeof(target_siginfo_t), 0); 9893 if (!p) { 9894 return -TARGET_EFAULT; 9895 } 9896 host_to_target_siginfo(p, &uinfo); 9897 unlock_user(p, arg2, sizeof(target_siginfo_t)); 9898 } 9899 ret = host_to_target_signal(ret); 9900 } 9901 } 9902 return ret; 9903 #endif 9904 case TARGET_NR_rt_sigqueueinfo: 9905 { 9906 siginfo_t uinfo; 9907 9908 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1); 9909 if (!p) { 9910 return -TARGET_EFAULT; 9911 } 9912 target_to_host_siginfo(&uinfo, p); 9913 unlock_user(p, arg3, 0); 9914 ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo)); 9915 } 9916 return ret; 9917 case TARGET_NR_rt_tgsigqueueinfo: 9918 { 9919 siginfo_t uinfo; 9920 9921 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1); 9922 if (!p) { 9923 return -TARGET_EFAULT; 9924 } 9925 target_to_host_siginfo(&uinfo, p); 9926 unlock_user(p, arg4, 0); 9927 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo)); 9928 } 9929 return ret; 9930 #ifdef TARGET_NR_sigreturn 9931 case TARGET_NR_sigreturn: 9932 if (block_signals()) { 9933 return -QEMU_ERESTARTSYS; 9934 } 9935 return do_sigreturn(cpu_env); 9936 #endif 9937 case TARGET_NR_rt_sigreturn: 9938 if (block_signals()) { 9939 return -QEMU_ERESTARTSYS; 9940 } 9941 return do_rt_sigreturn(cpu_env); 9942 case TARGET_NR_sethostname: 9943 if (!(p = lock_user_string(arg1))) 9944 return -TARGET_EFAULT; 9945 ret = get_errno(sethostname(p, arg2)); 9946 unlock_user(p, arg1, 0); 9947 return ret; 9948 #ifdef TARGET_NR_setrlimit 9949 case TARGET_NR_setrlimit: 9950 { 9951 int resource = target_to_host_resource(arg1); 9952 struct target_rlimit *target_rlim; 9953 struct rlimit rlim; 9954 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1)) 9955 return -TARGET_EFAULT; 9956 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur); 9957 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max); 9958 unlock_user_struct(target_rlim, arg2, 0); 9959 /* 9960 * If we just passed through resource limit settings for memory then 9961 * they would also apply to QEMU's own allocations, and QEMU will 9962 * crash or hang or die if its allocations fail. Ideally we would 9963 * track the guest allocations in QEMU and apply the limits ourselves. 9964 * For now, just tell the guest the call succeeded but don't actually 9965 * limit anything. 9966 */ 9967 if (resource != RLIMIT_AS && 9968 resource != RLIMIT_DATA && 9969 resource != RLIMIT_STACK) { 9970 return get_errno(setrlimit(resource, &rlim)); 9971 } else { 9972 return 0; 9973 } 9974 } 9975 #endif 9976 #ifdef TARGET_NR_getrlimit 9977 case TARGET_NR_getrlimit: 9978 { 9979 int resource = target_to_host_resource(arg1); 9980 struct target_rlimit *target_rlim; 9981 struct rlimit rlim; 9982 9983 ret = get_errno(getrlimit(resource, &rlim)); 9984 if (!is_error(ret)) { 9985 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 9986 return -TARGET_EFAULT; 9987 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 9988 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 9989 unlock_user_struct(target_rlim, arg2, 1); 9990 } 9991 } 9992 return ret; 9993 #endif 9994 case TARGET_NR_getrusage: 9995 { 9996 struct rusage rusage; 9997 ret = get_errno(getrusage(arg1, &rusage)); 9998 if (!is_error(ret)) { 9999 ret = host_to_target_rusage(arg2, &rusage); 10000 } 10001 } 10002 return ret; 10003 #if defined(TARGET_NR_gettimeofday) 10004 case TARGET_NR_gettimeofday: 10005 { 10006 struct timeval tv; 10007 struct timezone tz; 10008 10009 ret = get_errno(gettimeofday(&tv, &tz)); 10010 if (!is_error(ret)) { 10011 if (arg1 && copy_to_user_timeval(arg1, &tv)) { 10012 return -TARGET_EFAULT; 10013 } 10014 if (arg2 && copy_to_user_timezone(arg2, &tz)) { 10015 return -TARGET_EFAULT; 10016 } 10017 } 10018 } 10019 return ret; 10020 #endif 10021 #if defined(TARGET_NR_settimeofday) 10022 case TARGET_NR_settimeofday: 10023 { 10024 struct timeval tv, *ptv = NULL; 10025 struct timezone tz, *ptz = NULL; 10026 10027 if (arg1) { 10028 if (copy_from_user_timeval(&tv, arg1)) { 10029 return -TARGET_EFAULT; 10030 } 10031 ptv = &tv; 10032 } 10033 10034 if (arg2) { 10035 if (copy_from_user_timezone(&tz, arg2)) { 10036 return -TARGET_EFAULT; 10037 } 10038 ptz = &tz; 10039 } 10040 10041 return get_errno(settimeofday(ptv, ptz)); 10042 } 10043 #endif 10044 #if defined(TARGET_NR_select) 10045 case TARGET_NR_select: 10046 #if defined(TARGET_WANT_NI_OLD_SELECT) 10047 /* some architectures used to have old_select here 10048 * but now ENOSYS it. 10049 */ 10050 ret = -TARGET_ENOSYS; 10051 #elif defined(TARGET_WANT_OLD_SYS_SELECT) 10052 ret = do_old_select(arg1); 10053 #else 10054 ret = do_select(arg1, arg2, arg3, arg4, arg5); 10055 #endif 10056 return ret; 10057 #endif 10058 #ifdef TARGET_NR_pselect6 10059 case TARGET_NR_pselect6: 10060 return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false); 10061 #endif 10062 #ifdef TARGET_NR_pselect6_time64 10063 case TARGET_NR_pselect6_time64: 10064 return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true); 10065 #endif 10066 #ifdef TARGET_NR_symlink 10067 case TARGET_NR_symlink: 10068 { 10069 void *p2; 10070 p = lock_user_string(arg1); 10071 p2 = lock_user_string(arg2); 10072 if (!p || !p2) 10073 ret = -TARGET_EFAULT; 10074 else 10075 ret = get_errno(symlink(p, p2)); 10076 unlock_user(p2, arg2, 0); 10077 unlock_user(p, arg1, 0); 10078 } 10079 return ret; 10080 #endif 10081 #if defined(TARGET_NR_symlinkat) 10082 case TARGET_NR_symlinkat: 10083 { 10084 void *p2; 10085 p = lock_user_string(arg1); 10086 p2 = lock_user_string(arg3); 10087 if (!p || !p2) 10088 ret = -TARGET_EFAULT; 10089 else 10090 ret = get_errno(symlinkat(p, arg2, p2)); 10091 unlock_user(p2, arg3, 0); 10092 unlock_user(p, arg1, 0); 10093 } 10094 return ret; 10095 #endif 10096 #ifdef TARGET_NR_readlink 10097 case TARGET_NR_readlink: 10098 { 10099 void *p2; 10100 p = lock_user_string(arg1); 10101 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0); 10102 if (!p || !p2) { 10103 ret = -TARGET_EFAULT; 10104 } else if (!arg3) { 10105 /* Short circuit this for the magic exe check. */ 10106 ret = -TARGET_EINVAL; 10107 } else if (is_proc_myself((const char *)p, "exe")) { 10108 char real[PATH_MAX], *temp; 10109 temp = realpath(exec_path, real); 10110 /* Return value is # of bytes that we wrote to the buffer. */ 10111 if (temp == NULL) { 10112 ret = get_errno(-1); 10113 } else { 10114 /* Don't worry about sign mismatch as earlier mapping 10115 * logic would have thrown a bad address error. */ 10116 ret = MIN(strlen(real), arg3); 10117 /* We cannot NUL terminate the string. */ 10118 memcpy(p2, real, ret); 10119 } 10120 } else { 10121 ret = get_errno(readlink(path(p), p2, arg3)); 10122 } 10123 unlock_user(p2, arg2, ret); 10124 unlock_user(p, arg1, 0); 10125 } 10126 return ret; 10127 #endif 10128 #if defined(TARGET_NR_readlinkat) 10129 case TARGET_NR_readlinkat: 10130 { 10131 void *p2; 10132 p = lock_user_string(arg2); 10133 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0); 10134 if (!p || !p2) { 10135 ret = -TARGET_EFAULT; 10136 } else if (!arg4) { 10137 /* Short circuit this for the magic exe check. */ 10138 ret = -TARGET_EINVAL; 10139 } else if (is_proc_myself((const char *)p, "exe")) { 10140 char real[PATH_MAX], *temp; 10141 temp = realpath(exec_path, real); 10142 /* Return value is # of bytes that we wrote to the buffer. */ 10143 if (temp == NULL) { 10144 ret = get_errno(-1); 10145 } else { 10146 /* Don't worry about sign mismatch as earlier mapping 10147 * logic would have thrown a bad address error. */ 10148 ret = MIN(strlen(real), arg4); 10149 /* We cannot NUL terminate the string. */ 10150 memcpy(p2, real, ret); 10151 } 10152 } else { 10153 ret = get_errno(readlinkat(arg1, path(p), p2, arg4)); 10154 } 10155 unlock_user(p2, arg3, ret); 10156 unlock_user(p, arg2, 0); 10157 } 10158 return ret; 10159 #endif 10160 #ifdef TARGET_NR_swapon 10161 case TARGET_NR_swapon: 10162 if (!(p = lock_user_string(arg1))) 10163 return -TARGET_EFAULT; 10164 ret = get_errno(swapon(p, arg2)); 10165 unlock_user(p, arg1, 0); 10166 return ret; 10167 #endif 10168 case TARGET_NR_reboot: 10169 if (arg3 == LINUX_REBOOT_CMD_RESTART2) { 10170 /* arg4 must be ignored in all other cases */ 10171 p = lock_user_string(arg4); 10172 if (!p) { 10173 return -TARGET_EFAULT; 10174 } 10175 ret = get_errno(reboot(arg1, arg2, arg3, p)); 10176 unlock_user(p, arg4, 0); 10177 } else { 10178 ret = get_errno(reboot(arg1, arg2, arg3, NULL)); 10179 } 10180 return ret; 10181 #ifdef TARGET_NR_mmap 10182 case TARGET_NR_mmap: 10183 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 10184 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \ 10185 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \ 10186 || defined(TARGET_S390X) 10187 { 10188 abi_ulong *v; 10189 abi_ulong v1, v2, v3, v4, v5, v6; 10190 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1))) 10191 return -TARGET_EFAULT; 10192 v1 = tswapal(v[0]); 10193 v2 = tswapal(v[1]); 10194 v3 = tswapal(v[2]); 10195 v4 = tswapal(v[3]); 10196 v5 = tswapal(v[4]); 10197 v6 = tswapal(v[5]); 10198 unlock_user(v, arg1, 0); 10199 ret = get_errno(target_mmap(v1, v2, v3, 10200 target_to_host_bitmask(v4, mmap_flags_tbl), 10201 v5, v6)); 10202 } 10203 #else 10204 /* mmap pointers are always untagged */ 10205 ret = get_errno(target_mmap(arg1, arg2, arg3, 10206 target_to_host_bitmask(arg4, mmap_flags_tbl), 10207 arg5, 10208 arg6)); 10209 #endif 10210 return ret; 10211 #endif 10212 #ifdef TARGET_NR_mmap2 10213 case TARGET_NR_mmap2: 10214 #ifndef MMAP_SHIFT 10215 #define MMAP_SHIFT 12 10216 #endif 10217 ret = target_mmap(arg1, arg2, arg3, 10218 target_to_host_bitmask(arg4, mmap_flags_tbl), 10219 arg5, arg6 << MMAP_SHIFT); 10220 return get_errno(ret); 10221 #endif 10222 case TARGET_NR_munmap: 10223 arg1 = cpu_untagged_addr(cpu, arg1); 10224 return get_errno(target_munmap(arg1, arg2)); 10225 case TARGET_NR_mprotect: 10226 arg1 = cpu_untagged_addr(cpu, arg1); 10227 { 10228 TaskState *ts = cpu->opaque; 10229 /* Special hack to detect libc making the stack executable. */ 10230 if ((arg3 & PROT_GROWSDOWN) 10231 && arg1 >= ts->info->stack_limit 10232 && arg1 <= ts->info->start_stack) { 10233 arg3 &= ~PROT_GROWSDOWN; 10234 arg2 = arg2 + arg1 - ts->info->stack_limit; 10235 arg1 = ts->info->stack_limit; 10236 } 10237 } 10238 return get_errno(target_mprotect(arg1, arg2, arg3)); 10239 #ifdef TARGET_NR_mremap 10240 case TARGET_NR_mremap: 10241 arg1 = cpu_untagged_addr(cpu, arg1); 10242 /* mremap new_addr (arg5) is always untagged */ 10243 return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5)); 10244 #endif 10245 /* ??? msync/mlock/munlock are broken for softmmu. */ 10246 #ifdef TARGET_NR_msync 10247 case TARGET_NR_msync: 10248 return get_errno(msync(g2h(cpu, arg1), arg2, arg3)); 10249 #endif 10250 #ifdef TARGET_NR_mlock 10251 case TARGET_NR_mlock: 10252 return get_errno(mlock(g2h(cpu, arg1), arg2)); 10253 #endif 10254 #ifdef TARGET_NR_munlock 10255 case TARGET_NR_munlock: 10256 return get_errno(munlock(g2h(cpu, arg1), arg2)); 10257 #endif 10258 #ifdef TARGET_NR_mlockall 10259 case TARGET_NR_mlockall: 10260 return get_errno(mlockall(target_to_host_mlockall_arg(arg1))); 10261 #endif 10262 #ifdef TARGET_NR_munlockall 10263 case TARGET_NR_munlockall: 10264 return get_errno(munlockall()); 10265 #endif 10266 #ifdef TARGET_NR_truncate 10267 case TARGET_NR_truncate: 10268 if (!(p = lock_user_string(arg1))) 10269 return -TARGET_EFAULT; 10270 ret = get_errno(truncate(p, arg2)); 10271 unlock_user(p, arg1, 0); 10272 return ret; 10273 #endif 10274 #ifdef TARGET_NR_ftruncate 10275 case TARGET_NR_ftruncate: 10276 return get_errno(ftruncate(arg1, arg2)); 10277 #endif 10278 case TARGET_NR_fchmod: 10279 return get_errno(fchmod(arg1, arg2)); 10280 #if defined(TARGET_NR_fchmodat) 10281 case TARGET_NR_fchmodat: 10282 if (!(p = lock_user_string(arg2))) 10283 return -TARGET_EFAULT; 10284 ret = get_errno(fchmodat(arg1, p, arg3, 0)); 10285 unlock_user(p, arg2, 0); 10286 return ret; 10287 #endif 10288 case TARGET_NR_getpriority: 10289 /* Note that negative values are valid for getpriority, so we must 10290 differentiate based on errno settings. */ 10291 errno = 0; 10292 ret = getpriority(arg1, arg2); 10293 if (ret == -1 && errno != 0) { 10294 return -host_to_target_errno(errno); 10295 } 10296 #ifdef TARGET_ALPHA 10297 /* Return value is the unbiased priority. Signal no error. */ 10298 cpu_env->ir[IR_V0] = 0; 10299 #else 10300 /* Return value is a biased priority to avoid negative numbers. */ 10301 ret = 20 - ret; 10302 #endif 10303 return ret; 10304 case TARGET_NR_setpriority: 10305 return get_errno(setpriority(arg1, arg2, arg3)); 10306 #ifdef TARGET_NR_statfs 10307 case TARGET_NR_statfs: 10308 if (!(p = lock_user_string(arg1))) { 10309 return -TARGET_EFAULT; 10310 } 10311 ret = get_errno(statfs(path(p), &stfs)); 10312 unlock_user(p, arg1, 0); 10313 convert_statfs: 10314 if (!is_error(ret)) { 10315 struct target_statfs *target_stfs; 10316 10317 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0)) 10318 return -TARGET_EFAULT; 10319 __put_user(stfs.f_type, &target_stfs->f_type); 10320 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 10321 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 10322 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 10323 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 10324 __put_user(stfs.f_files, &target_stfs->f_files); 10325 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 10326 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 10327 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 10328 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 10329 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 10330 #ifdef _STATFS_F_FLAGS 10331 __put_user(stfs.f_flags, &target_stfs->f_flags); 10332 #else 10333 __put_user(0, &target_stfs->f_flags); 10334 #endif 10335 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 10336 unlock_user_struct(target_stfs, arg2, 1); 10337 } 10338 return ret; 10339 #endif 10340 #ifdef TARGET_NR_fstatfs 10341 case TARGET_NR_fstatfs: 10342 ret = get_errno(fstatfs(arg1, &stfs)); 10343 goto convert_statfs; 10344 #endif 10345 #ifdef TARGET_NR_statfs64 10346 case TARGET_NR_statfs64: 10347 if (!(p = lock_user_string(arg1))) { 10348 return -TARGET_EFAULT; 10349 } 10350 ret = get_errno(statfs(path(p), &stfs)); 10351 unlock_user(p, arg1, 0); 10352 convert_statfs64: 10353 if (!is_error(ret)) { 10354 struct target_statfs64 *target_stfs; 10355 10356 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0)) 10357 return -TARGET_EFAULT; 10358 __put_user(stfs.f_type, &target_stfs->f_type); 10359 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 10360 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 10361 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 10362 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 10363 __put_user(stfs.f_files, &target_stfs->f_files); 10364 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 10365 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 10366 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 10367 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 10368 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 10369 #ifdef _STATFS_F_FLAGS 10370 __put_user(stfs.f_flags, &target_stfs->f_flags); 10371 #else 10372 __put_user(0, &target_stfs->f_flags); 10373 #endif 10374 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 10375 unlock_user_struct(target_stfs, arg3, 1); 10376 } 10377 return ret; 10378 case TARGET_NR_fstatfs64: 10379 ret = get_errno(fstatfs(arg1, &stfs)); 10380 goto convert_statfs64; 10381 #endif 10382 #ifdef TARGET_NR_socketcall 10383 case TARGET_NR_socketcall: 10384 return do_socketcall(arg1, arg2); 10385 #endif 10386 #ifdef TARGET_NR_accept 10387 case TARGET_NR_accept: 10388 return do_accept4(arg1, arg2, arg3, 0); 10389 #endif 10390 #ifdef TARGET_NR_accept4 10391 case TARGET_NR_accept4: 10392 return do_accept4(arg1, arg2, arg3, arg4); 10393 #endif 10394 #ifdef TARGET_NR_bind 10395 case TARGET_NR_bind: 10396 return do_bind(arg1, arg2, arg3); 10397 #endif 10398 #ifdef TARGET_NR_connect 10399 case TARGET_NR_connect: 10400 return do_connect(arg1, arg2, arg3); 10401 #endif 10402 #ifdef TARGET_NR_getpeername 10403 case TARGET_NR_getpeername: 10404 return do_getpeername(arg1, arg2, arg3); 10405 #endif 10406 #ifdef TARGET_NR_getsockname 10407 case TARGET_NR_getsockname: 10408 return do_getsockname(arg1, arg2, arg3); 10409 #endif 10410 #ifdef TARGET_NR_getsockopt 10411 case TARGET_NR_getsockopt: 10412 return do_getsockopt(arg1, arg2, arg3, arg4, arg5); 10413 #endif 10414 #ifdef TARGET_NR_listen 10415 case TARGET_NR_listen: 10416 return get_errno(listen(arg1, arg2)); 10417 #endif 10418 #ifdef TARGET_NR_recv 10419 case TARGET_NR_recv: 10420 return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0); 10421 #endif 10422 #ifdef TARGET_NR_recvfrom 10423 case TARGET_NR_recvfrom: 10424 return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6); 10425 #endif 10426 #ifdef TARGET_NR_recvmsg 10427 case TARGET_NR_recvmsg: 10428 return do_sendrecvmsg(arg1, arg2, arg3, 0); 10429 #endif 10430 #ifdef TARGET_NR_send 10431 case TARGET_NR_send: 10432 return do_sendto(arg1, arg2, arg3, arg4, 0, 0); 10433 #endif 10434 #ifdef TARGET_NR_sendmsg 10435 case TARGET_NR_sendmsg: 10436 return do_sendrecvmsg(arg1, arg2, arg3, 1); 10437 #endif 10438 #ifdef TARGET_NR_sendmmsg 10439 case TARGET_NR_sendmmsg: 10440 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1); 10441 #endif 10442 #ifdef TARGET_NR_recvmmsg 10443 case TARGET_NR_recvmmsg: 10444 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0); 10445 #endif 10446 #ifdef TARGET_NR_sendto 10447 case TARGET_NR_sendto: 10448 return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6); 10449 #endif 10450 #ifdef TARGET_NR_shutdown 10451 case TARGET_NR_shutdown: 10452 return get_errno(shutdown(arg1, arg2)); 10453 #endif 10454 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom) 10455 case TARGET_NR_getrandom: 10456 p = lock_user(VERIFY_WRITE, arg1, arg2, 0); 10457 if (!p) { 10458 return -TARGET_EFAULT; 10459 } 10460 ret = get_errno(getrandom(p, arg2, arg3)); 10461 unlock_user(p, arg1, ret); 10462 return ret; 10463 #endif 10464 #ifdef TARGET_NR_socket 10465 case TARGET_NR_socket: 10466 return do_socket(arg1, arg2, arg3); 10467 #endif 10468 #ifdef TARGET_NR_socketpair 10469 case TARGET_NR_socketpair: 10470 return do_socketpair(arg1, arg2, arg3, arg4); 10471 #endif 10472 #ifdef TARGET_NR_setsockopt 10473 case TARGET_NR_setsockopt: 10474 return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5); 10475 #endif 10476 #if defined(TARGET_NR_syslog) 10477 case TARGET_NR_syslog: 10478 { 10479 int len = arg2; 10480 10481 switch (arg1) { 10482 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */ 10483 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */ 10484 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */ 10485 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */ 10486 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */ 10487 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */ 10488 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */ 10489 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */ 10490 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3)); 10491 case TARGET_SYSLOG_ACTION_READ: /* Read from log */ 10492 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */ 10493 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */ 10494 { 10495 if (len < 0) { 10496 return -TARGET_EINVAL; 10497 } 10498 if (len == 0) { 10499 return 0; 10500 } 10501 p = lock_user(VERIFY_WRITE, arg2, arg3, 0); 10502 if (!p) { 10503 return -TARGET_EFAULT; 10504 } 10505 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3)); 10506 unlock_user(p, arg2, arg3); 10507 } 10508 return ret; 10509 default: 10510 return -TARGET_EINVAL; 10511 } 10512 } 10513 break; 10514 #endif 10515 case TARGET_NR_setitimer: 10516 { 10517 struct itimerval value, ovalue, *pvalue; 10518 10519 if (arg2) { 10520 pvalue = &value; 10521 if (copy_from_user_timeval(&pvalue->it_interval, arg2) 10522 || copy_from_user_timeval(&pvalue->it_value, 10523 arg2 + sizeof(struct target_timeval))) 10524 return -TARGET_EFAULT; 10525 } else { 10526 pvalue = NULL; 10527 } 10528 ret = get_errno(setitimer(arg1, pvalue, &ovalue)); 10529 if (!is_error(ret) && arg3) { 10530 if (copy_to_user_timeval(arg3, 10531 &ovalue.it_interval) 10532 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval), 10533 &ovalue.it_value)) 10534 return -TARGET_EFAULT; 10535 } 10536 } 10537 return ret; 10538 case TARGET_NR_getitimer: 10539 { 10540 struct itimerval value; 10541 10542 ret = get_errno(getitimer(arg1, &value)); 10543 if (!is_error(ret) && arg2) { 10544 if (copy_to_user_timeval(arg2, 10545 &value.it_interval) 10546 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval), 10547 &value.it_value)) 10548 return -TARGET_EFAULT; 10549 } 10550 } 10551 return ret; 10552 #ifdef TARGET_NR_stat 10553 case TARGET_NR_stat: 10554 if (!(p = lock_user_string(arg1))) { 10555 return -TARGET_EFAULT; 10556 } 10557 ret = get_errno(stat(path(p), &st)); 10558 unlock_user(p, arg1, 0); 10559 goto do_stat; 10560 #endif 10561 #ifdef TARGET_NR_lstat 10562 case TARGET_NR_lstat: 10563 if (!(p = lock_user_string(arg1))) { 10564 return -TARGET_EFAULT; 10565 } 10566 ret = get_errno(lstat(path(p), &st)); 10567 unlock_user(p, arg1, 0); 10568 goto do_stat; 10569 #endif 10570 #ifdef TARGET_NR_fstat 10571 case TARGET_NR_fstat: 10572 { 10573 ret = get_errno(fstat(arg1, &st)); 10574 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat) 10575 do_stat: 10576 #endif 10577 if (!is_error(ret)) { 10578 struct target_stat *target_st; 10579 10580 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0)) 10581 return -TARGET_EFAULT; 10582 memset(target_st, 0, sizeof(*target_st)); 10583 __put_user(st.st_dev, &target_st->st_dev); 10584 __put_user(st.st_ino, &target_st->st_ino); 10585 __put_user(st.st_mode, &target_st->st_mode); 10586 __put_user(st.st_uid, &target_st->st_uid); 10587 __put_user(st.st_gid, &target_st->st_gid); 10588 __put_user(st.st_nlink, &target_st->st_nlink); 10589 __put_user(st.st_rdev, &target_st->st_rdev); 10590 __put_user(st.st_size, &target_st->st_size); 10591 __put_user(st.st_blksize, &target_st->st_blksize); 10592 __put_user(st.st_blocks, &target_st->st_blocks); 10593 __put_user(st.st_atime, &target_st->target_st_atime); 10594 __put_user(st.st_mtime, &target_st->target_st_mtime); 10595 __put_user(st.st_ctime, &target_st->target_st_ctime); 10596 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC) 10597 __put_user(st.st_atim.tv_nsec, 10598 &target_st->target_st_atime_nsec); 10599 __put_user(st.st_mtim.tv_nsec, 10600 &target_st->target_st_mtime_nsec); 10601 __put_user(st.st_ctim.tv_nsec, 10602 &target_st->target_st_ctime_nsec); 10603 #endif 10604 unlock_user_struct(target_st, arg2, 1); 10605 } 10606 } 10607 return ret; 10608 #endif 10609 case TARGET_NR_vhangup: 10610 return get_errno(vhangup()); 10611 #ifdef TARGET_NR_syscall 10612 case TARGET_NR_syscall: 10613 return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5, 10614 arg6, arg7, arg8, 0); 10615 #endif 10616 #if defined(TARGET_NR_wait4) 10617 case TARGET_NR_wait4: 10618 { 10619 int status; 10620 abi_long status_ptr = arg2; 10621 struct rusage rusage, *rusage_ptr; 10622 abi_ulong target_rusage = arg4; 10623 abi_long rusage_err; 10624 if (target_rusage) 10625 rusage_ptr = &rusage; 10626 else 10627 rusage_ptr = NULL; 10628 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr)); 10629 if (!is_error(ret)) { 10630 if (status_ptr && ret) { 10631 status = host_to_target_waitstatus(status); 10632 if (put_user_s32(status, status_ptr)) 10633 return -TARGET_EFAULT; 10634 } 10635 if (target_rusage) { 10636 rusage_err = host_to_target_rusage(target_rusage, &rusage); 10637 if (rusage_err) { 10638 ret = rusage_err; 10639 } 10640 } 10641 } 10642 } 10643 return ret; 10644 #endif 10645 #ifdef TARGET_NR_swapoff 10646 case TARGET_NR_swapoff: 10647 if (!(p = lock_user_string(arg1))) 10648 return -TARGET_EFAULT; 10649 ret = get_errno(swapoff(p)); 10650 unlock_user(p, arg1, 0); 10651 return ret; 10652 #endif 10653 case TARGET_NR_sysinfo: 10654 { 10655 struct target_sysinfo *target_value; 10656 struct sysinfo value; 10657 ret = get_errno(sysinfo(&value)); 10658 if (!is_error(ret) && arg1) 10659 { 10660 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0)) 10661 return -TARGET_EFAULT; 10662 __put_user(value.uptime, &target_value->uptime); 10663 __put_user(value.loads[0], &target_value->loads[0]); 10664 __put_user(value.loads[1], &target_value->loads[1]); 10665 __put_user(value.loads[2], &target_value->loads[2]); 10666 __put_user(value.totalram, &target_value->totalram); 10667 __put_user(value.freeram, &target_value->freeram); 10668 __put_user(value.sharedram, &target_value->sharedram); 10669 __put_user(value.bufferram, &target_value->bufferram); 10670 __put_user(value.totalswap, &target_value->totalswap); 10671 __put_user(value.freeswap, &target_value->freeswap); 10672 __put_user(value.procs, &target_value->procs); 10673 __put_user(value.totalhigh, &target_value->totalhigh); 10674 __put_user(value.freehigh, &target_value->freehigh); 10675 __put_user(value.mem_unit, &target_value->mem_unit); 10676 unlock_user_struct(target_value, arg1, 1); 10677 } 10678 } 10679 return ret; 10680 #ifdef TARGET_NR_ipc 10681 case TARGET_NR_ipc: 10682 return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6); 10683 #endif 10684 #ifdef TARGET_NR_semget 10685 case TARGET_NR_semget: 10686 return get_errno(semget(arg1, arg2, arg3)); 10687 #endif 10688 #ifdef TARGET_NR_semop 10689 case TARGET_NR_semop: 10690 return do_semtimedop(arg1, arg2, arg3, 0, false); 10691 #endif 10692 #ifdef TARGET_NR_semtimedop 10693 case TARGET_NR_semtimedop: 10694 return do_semtimedop(arg1, arg2, arg3, arg4, false); 10695 #endif 10696 #ifdef TARGET_NR_semtimedop_time64 10697 case TARGET_NR_semtimedop_time64: 10698 return do_semtimedop(arg1, arg2, arg3, arg4, true); 10699 #endif 10700 #ifdef TARGET_NR_semctl 10701 case TARGET_NR_semctl: 10702 return do_semctl(arg1, arg2, arg3, arg4); 10703 #endif 10704 #ifdef TARGET_NR_msgctl 10705 case TARGET_NR_msgctl: 10706 return do_msgctl(arg1, arg2, arg3); 10707 #endif 10708 #ifdef TARGET_NR_msgget 10709 case TARGET_NR_msgget: 10710 return get_errno(msgget(arg1, arg2)); 10711 #endif 10712 #ifdef TARGET_NR_msgrcv 10713 case TARGET_NR_msgrcv: 10714 return do_msgrcv(arg1, arg2, arg3, arg4, arg5); 10715 #endif 10716 #ifdef TARGET_NR_msgsnd 10717 case TARGET_NR_msgsnd: 10718 return do_msgsnd(arg1, arg2, arg3, arg4); 10719 #endif 10720 #ifdef TARGET_NR_shmget 10721 case TARGET_NR_shmget: 10722 return get_errno(shmget(arg1, arg2, arg3)); 10723 #endif 10724 #ifdef TARGET_NR_shmctl 10725 case TARGET_NR_shmctl: 10726 return do_shmctl(arg1, arg2, arg3); 10727 #endif 10728 #ifdef TARGET_NR_shmat 10729 case TARGET_NR_shmat: 10730 return do_shmat(cpu_env, arg1, arg2, arg3); 10731 #endif 10732 #ifdef TARGET_NR_shmdt 10733 case TARGET_NR_shmdt: 10734 return do_shmdt(arg1); 10735 #endif 10736 case TARGET_NR_fsync: 10737 return get_errno(fsync(arg1)); 10738 case TARGET_NR_clone: 10739 /* Linux manages to have three different orderings for its 10740 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines 10741 * match the kernel's CONFIG_CLONE_* settings. 10742 * Microblaze is further special in that it uses a sixth 10743 * implicit argument to clone for the TLS pointer. 10744 */ 10745 #if defined(TARGET_MICROBLAZE) 10746 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5)); 10747 #elif defined(TARGET_CLONE_BACKWARDS) 10748 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5)); 10749 #elif defined(TARGET_CLONE_BACKWARDS2) 10750 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4)); 10751 #else 10752 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4)); 10753 #endif 10754 return ret; 10755 #ifdef __NR_exit_group 10756 /* new thread calls */ 10757 case TARGET_NR_exit_group: 10758 preexit_cleanup(cpu_env, arg1); 10759 return get_errno(exit_group(arg1)); 10760 #endif 10761 case TARGET_NR_setdomainname: 10762 if (!(p = lock_user_string(arg1))) 10763 return -TARGET_EFAULT; 10764 ret = get_errno(setdomainname(p, arg2)); 10765 unlock_user(p, arg1, 0); 10766 return ret; 10767 case TARGET_NR_uname: 10768 /* no need to transcode because we use the linux syscall */ 10769 { 10770 struct new_utsname * buf; 10771 10772 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0)) 10773 return -TARGET_EFAULT; 10774 ret = get_errno(sys_uname(buf)); 10775 if (!is_error(ret)) { 10776 /* Overwrite the native machine name with whatever is being 10777 emulated. */ 10778 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env), 10779 sizeof(buf->machine)); 10780 /* Allow the user to override the reported release. */ 10781 if (qemu_uname_release && *qemu_uname_release) { 10782 g_strlcpy(buf->release, qemu_uname_release, 10783 sizeof(buf->release)); 10784 } 10785 } 10786 unlock_user_struct(buf, arg1, 1); 10787 } 10788 return ret; 10789 #ifdef TARGET_I386 10790 case TARGET_NR_modify_ldt: 10791 return do_modify_ldt(cpu_env, arg1, arg2, arg3); 10792 #if !defined(TARGET_X86_64) 10793 case TARGET_NR_vm86: 10794 return do_vm86(cpu_env, arg1, arg2); 10795 #endif 10796 #endif 10797 #if defined(TARGET_NR_adjtimex) 10798 case TARGET_NR_adjtimex: 10799 { 10800 struct timex host_buf; 10801 10802 if (target_to_host_timex(&host_buf, arg1) != 0) { 10803 return -TARGET_EFAULT; 10804 } 10805 ret = get_errno(adjtimex(&host_buf)); 10806 if (!is_error(ret)) { 10807 if (host_to_target_timex(arg1, &host_buf) != 0) { 10808 return -TARGET_EFAULT; 10809 } 10810 } 10811 } 10812 return ret; 10813 #endif 10814 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME) 10815 case TARGET_NR_clock_adjtime: 10816 { 10817 struct timex htx, *phtx = &htx; 10818 10819 if (target_to_host_timex(phtx, arg2) != 0) { 10820 return -TARGET_EFAULT; 10821 } 10822 ret = get_errno(clock_adjtime(arg1, phtx)); 10823 if (!is_error(ret) && phtx) { 10824 if (host_to_target_timex(arg2, phtx) != 0) { 10825 return -TARGET_EFAULT; 10826 } 10827 } 10828 } 10829 return ret; 10830 #endif 10831 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME) 10832 case TARGET_NR_clock_adjtime64: 10833 { 10834 struct timex htx; 10835 10836 if (target_to_host_timex64(&htx, arg2) != 0) { 10837 return -TARGET_EFAULT; 10838 } 10839 ret = get_errno(clock_adjtime(arg1, &htx)); 10840 if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) { 10841 return -TARGET_EFAULT; 10842 } 10843 } 10844 return ret; 10845 #endif 10846 case TARGET_NR_getpgid: 10847 return get_errno(getpgid(arg1)); 10848 case TARGET_NR_fchdir: 10849 return get_errno(fchdir(arg1)); 10850 case TARGET_NR_personality: 10851 return get_errno(personality(arg1)); 10852 #ifdef TARGET_NR__llseek /* Not on alpha */ 10853 case TARGET_NR__llseek: 10854 { 10855 int64_t res; 10856 #if !defined(__NR_llseek) 10857 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5); 10858 if (res == -1) { 10859 ret = get_errno(res); 10860 } else { 10861 ret = 0; 10862 } 10863 #else 10864 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5)); 10865 #endif 10866 if ((ret == 0) && put_user_s64(res, arg4)) { 10867 return -TARGET_EFAULT; 10868 } 10869 } 10870 return ret; 10871 #endif 10872 #ifdef TARGET_NR_getdents 10873 case TARGET_NR_getdents: 10874 return do_getdents(arg1, arg2, arg3); 10875 #endif /* TARGET_NR_getdents */ 10876 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) 10877 case TARGET_NR_getdents64: 10878 return do_getdents64(arg1, arg2, arg3); 10879 #endif /* TARGET_NR_getdents64 */ 10880 #if defined(TARGET_NR__newselect) 10881 case TARGET_NR__newselect: 10882 return do_select(arg1, arg2, arg3, arg4, arg5); 10883 #endif 10884 #ifdef TARGET_NR_poll 10885 case TARGET_NR_poll: 10886 return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false); 10887 #endif 10888 #ifdef TARGET_NR_ppoll 10889 case TARGET_NR_ppoll: 10890 return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false); 10891 #endif 10892 #ifdef TARGET_NR_ppoll_time64 10893 case TARGET_NR_ppoll_time64: 10894 return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true); 10895 #endif 10896 case TARGET_NR_flock: 10897 /* NOTE: the flock constant seems to be the same for every 10898 Linux platform */ 10899 return get_errno(safe_flock(arg1, arg2)); 10900 case TARGET_NR_readv: 10901 { 10902 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0); 10903 if (vec != NULL) { 10904 ret = get_errno(safe_readv(arg1, vec, arg3)); 10905 unlock_iovec(vec, arg2, arg3, 1); 10906 } else { 10907 ret = -host_to_target_errno(errno); 10908 } 10909 } 10910 return ret; 10911 case TARGET_NR_writev: 10912 { 10913 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 10914 if (vec != NULL) { 10915 ret = get_errno(safe_writev(arg1, vec, arg3)); 10916 unlock_iovec(vec, arg2, arg3, 0); 10917 } else { 10918 ret = -host_to_target_errno(errno); 10919 } 10920 } 10921 return ret; 10922 #if defined(TARGET_NR_preadv) 10923 case TARGET_NR_preadv: 10924 { 10925 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0); 10926 if (vec != NULL) { 10927 unsigned long low, high; 10928 10929 target_to_host_low_high(arg4, arg5, &low, &high); 10930 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high)); 10931 unlock_iovec(vec, arg2, arg3, 1); 10932 } else { 10933 ret = -host_to_target_errno(errno); 10934 } 10935 } 10936 return ret; 10937 #endif 10938 #if defined(TARGET_NR_pwritev) 10939 case TARGET_NR_pwritev: 10940 { 10941 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 10942 if (vec != NULL) { 10943 unsigned long low, high; 10944 10945 target_to_host_low_high(arg4, arg5, &low, &high); 10946 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high)); 10947 unlock_iovec(vec, arg2, arg3, 0); 10948 } else { 10949 ret = -host_to_target_errno(errno); 10950 } 10951 } 10952 return ret; 10953 #endif 10954 case TARGET_NR_getsid: 10955 return get_errno(getsid(arg1)); 10956 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */ 10957 case TARGET_NR_fdatasync: 10958 return get_errno(fdatasync(arg1)); 10959 #endif 10960 case TARGET_NR_sched_getaffinity: 10961 { 10962 unsigned int mask_size; 10963 unsigned long *mask; 10964 10965 /* 10966 * sched_getaffinity needs multiples of ulong, so need to take 10967 * care of mismatches between target ulong and host ulong sizes. 10968 */ 10969 if (arg2 & (sizeof(abi_ulong) - 1)) { 10970 return -TARGET_EINVAL; 10971 } 10972 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 10973 10974 mask = alloca(mask_size); 10975 memset(mask, 0, mask_size); 10976 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask)); 10977 10978 if (!is_error(ret)) { 10979 if (ret > arg2) { 10980 /* More data returned than the caller's buffer will fit. 10981 * This only happens if sizeof(abi_long) < sizeof(long) 10982 * and the caller passed us a buffer holding an odd number 10983 * of abi_longs. If the host kernel is actually using the 10984 * extra 4 bytes then fail EINVAL; otherwise we can just 10985 * ignore them and only copy the interesting part. 10986 */ 10987 int numcpus = sysconf(_SC_NPROCESSORS_CONF); 10988 if (numcpus > arg2 * 8) { 10989 return -TARGET_EINVAL; 10990 } 10991 ret = arg2; 10992 } 10993 10994 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) { 10995 return -TARGET_EFAULT; 10996 } 10997 } 10998 } 10999 return ret; 11000 case TARGET_NR_sched_setaffinity: 11001 { 11002 unsigned int mask_size; 11003 unsigned long *mask; 11004 11005 /* 11006 * sched_setaffinity needs multiples of ulong, so need to take 11007 * care of mismatches between target ulong and host ulong sizes. 11008 */ 11009 if (arg2 & (sizeof(abi_ulong) - 1)) { 11010 return -TARGET_EINVAL; 11011 } 11012 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 11013 mask = alloca(mask_size); 11014 11015 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2); 11016 if (ret) { 11017 return ret; 11018 } 11019 11020 return get_errno(sys_sched_setaffinity(arg1, mask_size, mask)); 11021 } 11022 case TARGET_NR_getcpu: 11023 { 11024 unsigned cpu, node; 11025 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL, 11026 arg2 ? &node : NULL, 11027 NULL)); 11028 if (is_error(ret)) { 11029 return ret; 11030 } 11031 if (arg1 && put_user_u32(cpu, arg1)) { 11032 return -TARGET_EFAULT; 11033 } 11034 if (arg2 && put_user_u32(node, arg2)) { 11035 return -TARGET_EFAULT; 11036 } 11037 } 11038 return ret; 11039 case TARGET_NR_sched_setparam: 11040 { 11041 struct target_sched_param *target_schp; 11042 struct sched_param schp; 11043 11044 if (arg2 == 0) { 11045 return -TARGET_EINVAL; 11046 } 11047 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) { 11048 return -TARGET_EFAULT; 11049 } 11050 schp.sched_priority = tswap32(target_schp->sched_priority); 11051 unlock_user_struct(target_schp, arg2, 0); 11052 return get_errno(sys_sched_setparam(arg1, &schp)); 11053 } 11054 case TARGET_NR_sched_getparam: 11055 { 11056 struct target_sched_param *target_schp; 11057 struct sched_param schp; 11058 11059 if (arg2 == 0) { 11060 return -TARGET_EINVAL; 11061 } 11062 ret = get_errno(sys_sched_getparam(arg1, &schp)); 11063 if (!is_error(ret)) { 11064 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) { 11065 return -TARGET_EFAULT; 11066 } 11067 target_schp->sched_priority = tswap32(schp.sched_priority); 11068 unlock_user_struct(target_schp, arg2, 1); 11069 } 11070 } 11071 return ret; 11072 case TARGET_NR_sched_setscheduler: 11073 { 11074 struct target_sched_param *target_schp; 11075 struct sched_param schp; 11076 if (arg3 == 0) { 11077 return -TARGET_EINVAL; 11078 } 11079 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) { 11080 return -TARGET_EFAULT; 11081 } 11082 schp.sched_priority = tswap32(target_schp->sched_priority); 11083 unlock_user_struct(target_schp, arg3, 0); 11084 return get_errno(sys_sched_setscheduler(arg1, arg2, &schp)); 11085 } 11086 case TARGET_NR_sched_getscheduler: 11087 return get_errno(sys_sched_getscheduler(arg1)); 11088 case TARGET_NR_sched_getattr: 11089 { 11090 struct target_sched_attr *target_scha; 11091 struct sched_attr scha; 11092 if (arg2 == 0) { 11093 return -TARGET_EINVAL; 11094 } 11095 if (arg3 > sizeof(scha)) { 11096 arg3 = sizeof(scha); 11097 } 11098 ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4)); 11099 if (!is_error(ret)) { 11100 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0); 11101 if (!target_scha) { 11102 return -TARGET_EFAULT; 11103 } 11104 target_scha->size = tswap32(scha.size); 11105 target_scha->sched_policy = tswap32(scha.sched_policy); 11106 target_scha->sched_flags = tswap64(scha.sched_flags); 11107 target_scha->sched_nice = tswap32(scha.sched_nice); 11108 target_scha->sched_priority = tswap32(scha.sched_priority); 11109 target_scha->sched_runtime = tswap64(scha.sched_runtime); 11110 target_scha->sched_deadline = tswap64(scha.sched_deadline); 11111 target_scha->sched_period = tswap64(scha.sched_period); 11112 if (scha.size > offsetof(struct sched_attr, sched_util_min)) { 11113 target_scha->sched_util_min = tswap32(scha.sched_util_min); 11114 target_scha->sched_util_max = tswap32(scha.sched_util_max); 11115 } 11116 unlock_user(target_scha, arg2, arg3); 11117 } 11118 return ret; 11119 } 11120 case TARGET_NR_sched_setattr: 11121 { 11122 struct target_sched_attr *target_scha; 11123 struct sched_attr scha; 11124 uint32_t size; 11125 int zeroed; 11126 if (arg2 == 0) { 11127 return -TARGET_EINVAL; 11128 } 11129 if (get_user_u32(size, arg2)) { 11130 return -TARGET_EFAULT; 11131 } 11132 if (!size) { 11133 size = offsetof(struct target_sched_attr, sched_util_min); 11134 } 11135 if (size < offsetof(struct target_sched_attr, sched_util_min)) { 11136 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) { 11137 return -TARGET_EFAULT; 11138 } 11139 return -TARGET_E2BIG; 11140 } 11141 11142 zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size); 11143 if (zeroed < 0) { 11144 return zeroed; 11145 } else if (zeroed == 0) { 11146 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) { 11147 return -TARGET_EFAULT; 11148 } 11149 return -TARGET_E2BIG; 11150 } 11151 if (size > sizeof(struct target_sched_attr)) { 11152 size = sizeof(struct target_sched_attr); 11153 } 11154 11155 target_scha = lock_user(VERIFY_READ, arg2, size, 1); 11156 if (!target_scha) { 11157 return -TARGET_EFAULT; 11158 } 11159 scha.size = size; 11160 scha.sched_policy = tswap32(target_scha->sched_policy); 11161 scha.sched_flags = tswap64(target_scha->sched_flags); 11162 scha.sched_nice = tswap32(target_scha->sched_nice); 11163 scha.sched_priority = tswap32(target_scha->sched_priority); 11164 scha.sched_runtime = tswap64(target_scha->sched_runtime); 11165 scha.sched_deadline = tswap64(target_scha->sched_deadline); 11166 scha.sched_period = tswap64(target_scha->sched_period); 11167 if (size > offsetof(struct target_sched_attr, sched_util_min)) { 11168 scha.sched_util_min = tswap32(target_scha->sched_util_min); 11169 scha.sched_util_max = tswap32(target_scha->sched_util_max); 11170 } 11171 unlock_user(target_scha, arg2, 0); 11172 return get_errno(sys_sched_setattr(arg1, &scha, arg3)); 11173 } 11174 case TARGET_NR_sched_yield: 11175 return get_errno(sched_yield()); 11176 case TARGET_NR_sched_get_priority_max: 11177 return get_errno(sched_get_priority_max(arg1)); 11178 case TARGET_NR_sched_get_priority_min: 11179 return get_errno(sched_get_priority_min(arg1)); 11180 #ifdef TARGET_NR_sched_rr_get_interval 11181 case TARGET_NR_sched_rr_get_interval: 11182 { 11183 struct timespec ts; 11184 ret = get_errno(sched_rr_get_interval(arg1, &ts)); 11185 if (!is_error(ret)) { 11186 ret = host_to_target_timespec(arg2, &ts); 11187 } 11188 } 11189 return ret; 11190 #endif 11191 #ifdef TARGET_NR_sched_rr_get_interval_time64 11192 case TARGET_NR_sched_rr_get_interval_time64: 11193 { 11194 struct timespec ts; 11195 ret = get_errno(sched_rr_get_interval(arg1, &ts)); 11196 if (!is_error(ret)) { 11197 ret = host_to_target_timespec64(arg2, &ts); 11198 } 11199 } 11200 return ret; 11201 #endif 11202 #if defined(TARGET_NR_nanosleep) 11203 case TARGET_NR_nanosleep: 11204 { 11205 struct timespec req, rem; 11206 target_to_host_timespec(&req, arg1); 11207 ret = get_errno(safe_nanosleep(&req, &rem)); 11208 if (is_error(ret) && arg2) { 11209 host_to_target_timespec(arg2, &rem); 11210 } 11211 } 11212 return ret; 11213 #endif 11214 case TARGET_NR_prctl: 11215 return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5); 11216 break; 11217 #ifdef TARGET_NR_arch_prctl 11218 case TARGET_NR_arch_prctl: 11219 return do_arch_prctl(cpu_env, arg1, arg2); 11220 #endif 11221 #ifdef TARGET_NR_pread64 11222 case TARGET_NR_pread64: 11223 if (regpairs_aligned(cpu_env, num)) { 11224 arg4 = arg5; 11225 arg5 = arg6; 11226 } 11227 if (arg2 == 0 && arg3 == 0) { 11228 /* Special-case NULL buffer and zero length, which should succeed */ 11229 p = 0; 11230 } else { 11231 p = lock_user(VERIFY_WRITE, arg2, arg3, 0); 11232 if (!p) { 11233 return -TARGET_EFAULT; 11234 } 11235 } 11236 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5))); 11237 unlock_user(p, arg2, ret); 11238 return ret; 11239 case TARGET_NR_pwrite64: 11240 if (regpairs_aligned(cpu_env, num)) { 11241 arg4 = arg5; 11242 arg5 = arg6; 11243 } 11244 if (arg2 == 0 && arg3 == 0) { 11245 /* Special-case NULL buffer and zero length, which should succeed */ 11246 p = 0; 11247 } else { 11248 p = lock_user(VERIFY_READ, arg2, arg3, 1); 11249 if (!p) { 11250 return -TARGET_EFAULT; 11251 } 11252 } 11253 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5))); 11254 unlock_user(p, arg2, 0); 11255 return ret; 11256 #endif 11257 case TARGET_NR_getcwd: 11258 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0))) 11259 return -TARGET_EFAULT; 11260 ret = get_errno(sys_getcwd1(p, arg2)); 11261 unlock_user(p, arg1, ret); 11262 return ret; 11263 case TARGET_NR_capget: 11264 case TARGET_NR_capset: 11265 { 11266 struct target_user_cap_header *target_header; 11267 struct target_user_cap_data *target_data = NULL; 11268 struct __user_cap_header_struct header; 11269 struct __user_cap_data_struct data[2]; 11270 struct __user_cap_data_struct *dataptr = NULL; 11271 int i, target_datalen; 11272 int data_items = 1; 11273 11274 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) { 11275 return -TARGET_EFAULT; 11276 } 11277 header.version = tswap32(target_header->version); 11278 header.pid = tswap32(target_header->pid); 11279 11280 if (header.version != _LINUX_CAPABILITY_VERSION) { 11281 /* Version 2 and up takes pointer to two user_data structs */ 11282 data_items = 2; 11283 } 11284 11285 target_datalen = sizeof(*target_data) * data_items; 11286 11287 if (arg2) { 11288 if (num == TARGET_NR_capget) { 11289 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0); 11290 } else { 11291 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1); 11292 } 11293 if (!target_data) { 11294 unlock_user_struct(target_header, arg1, 0); 11295 return -TARGET_EFAULT; 11296 } 11297 11298 if (num == TARGET_NR_capset) { 11299 for (i = 0; i < data_items; i++) { 11300 data[i].effective = tswap32(target_data[i].effective); 11301 data[i].permitted = tswap32(target_data[i].permitted); 11302 data[i].inheritable = tswap32(target_data[i].inheritable); 11303 } 11304 } 11305 11306 dataptr = data; 11307 } 11308 11309 if (num == TARGET_NR_capget) { 11310 ret = get_errno(capget(&header, dataptr)); 11311 } else { 11312 ret = get_errno(capset(&header, dataptr)); 11313 } 11314 11315 /* The kernel always updates version for both capget and capset */ 11316 target_header->version = tswap32(header.version); 11317 unlock_user_struct(target_header, arg1, 1); 11318 11319 if (arg2) { 11320 if (num == TARGET_NR_capget) { 11321 for (i = 0; i < data_items; i++) { 11322 target_data[i].effective = tswap32(data[i].effective); 11323 target_data[i].permitted = tswap32(data[i].permitted); 11324 target_data[i].inheritable = tswap32(data[i].inheritable); 11325 } 11326 unlock_user(target_data, arg2, target_datalen); 11327 } else { 11328 unlock_user(target_data, arg2, 0); 11329 } 11330 } 11331 return ret; 11332 } 11333 case TARGET_NR_sigaltstack: 11334 return do_sigaltstack(arg1, arg2, cpu_env); 11335 11336 #ifdef CONFIG_SENDFILE 11337 #ifdef TARGET_NR_sendfile 11338 case TARGET_NR_sendfile: 11339 { 11340 off_t *offp = NULL; 11341 off_t off; 11342 if (arg3) { 11343 ret = get_user_sal(off, arg3); 11344 if (is_error(ret)) { 11345 return ret; 11346 } 11347 offp = &off; 11348 } 11349 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 11350 if (!is_error(ret) && arg3) { 11351 abi_long ret2 = put_user_sal(off, arg3); 11352 if (is_error(ret2)) { 11353 ret = ret2; 11354 } 11355 } 11356 return ret; 11357 } 11358 #endif 11359 #ifdef TARGET_NR_sendfile64 11360 case TARGET_NR_sendfile64: 11361 { 11362 off_t *offp = NULL; 11363 off_t off; 11364 if (arg3) { 11365 ret = get_user_s64(off, arg3); 11366 if (is_error(ret)) { 11367 return ret; 11368 } 11369 offp = &off; 11370 } 11371 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 11372 if (!is_error(ret) && arg3) { 11373 abi_long ret2 = put_user_s64(off, arg3); 11374 if (is_error(ret2)) { 11375 ret = ret2; 11376 } 11377 } 11378 return ret; 11379 } 11380 #endif 11381 #endif 11382 #ifdef TARGET_NR_vfork 11383 case TARGET_NR_vfork: 11384 return get_errno(do_fork(cpu_env, 11385 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD, 11386 0, 0, 0, 0)); 11387 #endif 11388 #ifdef TARGET_NR_ugetrlimit 11389 case TARGET_NR_ugetrlimit: 11390 { 11391 struct rlimit rlim; 11392 int resource = target_to_host_resource(arg1); 11393 ret = get_errno(getrlimit(resource, &rlim)); 11394 if (!is_error(ret)) { 11395 struct target_rlimit *target_rlim; 11396 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 11397 return -TARGET_EFAULT; 11398 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 11399 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 11400 unlock_user_struct(target_rlim, arg2, 1); 11401 } 11402 return ret; 11403 } 11404 #endif 11405 #ifdef TARGET_NR_truncate64 11406 case TARGET_NR_truncate64: 11407 if (!(p = lock_user_string(arg1))) 11408 return -TARGET_EFAULT; 11409 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4); 11410 unlock_user(p, arg1, 0); 11411 return ret; 11412 #endif 11413 #ifdef TARGET_NR_ftruncate64 11414 case TARGET_NR_ftruncate64: 11415 return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4); 11416 #endif 11417 #ifdef TARGET_NR_stat64 11418 case TARGET_NR_stat64: 11419 if (!(p = lock_user_string(arg1))) { 11420 return -TARGET_EFAULT; 11421 } 11422 ret = get_errno(stat(path(p), &st)); 11423 unlock_user(p, arg1, 0); 11424 if (!is_error(ret)) 11425 ret = host_to_target_stat64(cpu_env, arg2, &st); 11426 return ret; 11427 #endif 11428 #ifdef TARGET_NR_lstat64 11429 case TARGET_NR_lstat64: 11430 if (!(p = lock_user_string(arg1))) { 11431 return -TARGET_EFAULT; 11432 } 11433 ret = get_errno(lstat(path(p), &st)); 11434 unlock_user(p, arg1, 0); 11435 if (!is_error(ret)) 11436 ret = host_to_target_stat64(cpu_env, arg2, &st); 11437 return ret; 11438 #endif 11439 #ifdef TARGET_NR_fstat64 11440 case TARGET_NR_fstat64: 11441 ret = get_errno(fstat(arg1, &st)); 11442 if (!is_error(ret)) 11443 ret = host_to_target_stat64(cpu_env, arg2, &st); 11444 return ret; 11445 #endif 11446 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) 11447 #ifdef TARGET_NR_fstatat64 11448 case TARGET_NR_fstatat64: 11449 #endif 11450 #ifdef TARGET_NR_newfstatat 11451 case TARGET_NR_newfstatat: 11452 #endif 11453 if (!(p = lock_user_string(arg2))) { 11454 return -TARGET_EFAULT; 11455 } 11456 ret = get_errno(fstatat(arg1, path(p), &st, arg4)); 11457 unlock_user(p, arg2, 0); 11458 if (!is_error(ret)) 11459 ret = host_to_target_stat64(cpu_env, arg3, &st); 11460 return ret; 11461 #endif 11462 #if defined(TARGET_NR_statx) 11463 case TARGET_NR_statx: 11464 { 11465 struct target_statx *target_stx; 11466 int dirfd = arg1; 11467 int flags = arg3; 11468 11469 p = lock_user_string(arg2); 11470 if (p == NULL) { 11471 return -TARGET_EFAULT; 11472 } 11473 #if defined(__NR_statx) 11474 { 11475 /* 11476 * It is assumed that struct statx is architecture independent. 11477 */ 11478 struct target_statx host_stx; 11479 int mask = arg4; 11480 11481 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx)); 11482 if (!is_error(ret)) { 11483 if (host_to_target_statx(&host_stx, arg5) != 0) { 11484 unlock_user(p, arg2, 0); 11485 return -TARGET_EFAULT; 11486 } 11487 } 11488 11489 if (ret != -TARGET_ENOSYS) { 11490 unlock_user(p, arg2, 0); 11491 return ret; 11492 } 11493 } 11494 #endif 11495 ret = get_errno(fstatat(dirfd, path(p), &st, flags)); 11496 unlock_user(p, arg2, 0); 11497 11498 if (!is_error(ret)) { 11499 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) { 11500 return -TARGET_EFAULT; 11501 } 11502 memset(target_stx, 0, sizeof(*target_stx)); 11503 __put_user(major(st.st_dev), &target_stx->stx_dev_major); 11504 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor); 11505 __put_user(st.st_ino, &target_stx->stx_ino); 11506 __put_user(st.st_mode, &target_stx->stx_mode); 11507 __put_user(st.st_uid, &target_stx->stx_uid); 11508 __put_user(st.st_gid, &target_stx->stx_gid); 11509 __put_user(st.st_nlink, &target_stx->stx_nlink); 11510 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major); 11511 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor); 11512 __put_user(st.st_size, &target_stx->stx_size); 11513 __put_user(st.st_blksize, &target_stx->stx_blksize); 11514 __put_user(st.st_blocks, &target_stx->stx_blocks); 11515 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec); 11516 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec); 11517 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec); 11518 unlock_user_struct(target_stx, arg5, 1); 11519 } 11520 } 11521 return ret; 11522 #endif 11523 #ifdef TARGET_NR_lchown 11524 case TARGET_NR_lchown: 11525 if (!(p = lock_user_string(arg1))) 11526 return -TARGET_EFAULT; 11527 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3))); 11528 unlock_user(p, arg1, 0); 11529 return ret; 11530 #endif 11531 #ifdef TARGET_NR_getuid 11532 case TARGET_NR_getuid: 11533 return get_errno(high2lowuid(getuid())); 11534 #endif 11535 #ifdef TARGET_NR_getgid 11536 case TARGET_NR_getgid: 11537 return get_errno(high2lowgid(getgid())); 11538 #endif 11539 #ifdef TARGET_NR_geteuid 11540 case TARGET_NR_geteuid: 11541 return get_errno(high2lowuid(geteuid())); 11542 #endif 11543 #ifdef TARGET_NR_getegid 11544 case TARGET_NR_getegid: 11545 return get_errno(high2lowgid(getegid())); 11546 #endif 11547 case TARGET_NR_setreuid: 11548 return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2))); 11549 case TARGET_NR_setregid: 11550 return get_errno(setregid(low2highgid(arg1), low2highgid(arg2))); 11551 case TARGET_NR_getgroups: 11552 { 11553 int gidsetsize = arg1; 11554 target_id *target_grouplist; 11555 gid_t *grouplist; 11556 int i; 11557 11558 grouplist = alloca(gidsetsize * sizeof(gid_t)); 11559 ret = get_errno(getgroups(gidsetsize, grouplist)); 11560 if (gidsetsize == 0) 11561 return ret; 11562 if (!is_error(ret)) { 11563 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0); 11564 if (!target_grouplist) 11565 return -TARGET_EFAULT; 11566 for(i = 0;i < ret; i++) 11567 target_grouplist[i] = tswapid(high2lowgid(grouplist[i])); 11568 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id)); 11569 } 11570 } 11571 return ret; 11572 case TARGET_NR_setgroups: 11573 { 11574 int gidsetsize = arg1; 11575 target_id *target_grouplist; 11576 gid_t *grouplist = NULL; 11577 int i; 11578 if (gidsetsize) { 11579 grouplist = alloca(gidsetsize * sizeof(gid_t)); 11580 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1); 11581 if (!target_grouplist) { 11582 return -TARGET_EFAULT; 11583 } 11584 for (i = 0; i < gidsetsize; i++) { 11585 grouplist[i] = low2highgid(tswapid(target_grouplist[i])); 11586 } 11587 unlock_user(target_grouplist, arg2, 0); 11588 } 11589 return get_errno(setgroups(gidsetsize, grouplist)); 11590 } 11591 case TARGET_NR_fchown: 11592 return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3))); 11593 #if defined(TARGET_NR_fchownat) 11594 case TARGET_NR_fchownat: 11595 if (!(p = lock_user_string(arg2))) 11596 return -TARGET_EFAULT; 11597 ret = get_errno(fchownat(arg1, p, low2highuid(arg3), 11598 low2highgid(arg4), arg5)); 11599 unlock_user(p, arg2, 0); 11600 return ret; 11601 #endif 11602 #ifdef TARGET_NR_setresuid 11603 case TARGET_NR_setresuid: 11604 return get_errno(sys_setresuid(low2highuid(arg1), 11605 low2highuid(arg2), 11606 low2highuid(arg3))); 11607 #endif 11608 #ifdef TARGET_NR_getresuid 11609 case TARGET_NR_getresuid: 11610 { 11611 uid_t ruid, euid, suid; 11612 ret = get_errno(getresuid(&ruid, &euid, &suid)); 11613 if (!is_error(ret)) { 11614 if (put_user_id(high2lowuid(ruid), arg1) 11615 || put_user_id(high2lowuid(euid), arg2) 11616 || put_user_id(high2lowuid(suid), arg3)) 11617 return -TARGET_EFAULT; 11618 } 11619 } 11620 return ret; 11621 #endif 11622 #ifdef TARGET_NR_getresgid 11623 case TARGET_NR_setresgid: 11624 return get_errno(sys_setresgid(low2highgid(arg1), 11625 low2highgid(arg2), 11626 low2highgid(arg3))); 11627 #endif 11628 #ifdef TARGET_NR_getresgid 11629 case TARGET_NR_getresgid: 11630 { 11631 gid_t rgid, egid, sgid; 11632 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 11633 if (!is_error(ret)) { 11634 if (put_user_id(high2lowgid(rgid), arg1) 11635 || put_user_id(high2lowgid(egid), arg2) 11636 || put_user_id(high2lowgid(sgid), arg3)) 11637 return -TARGET_EFAULT; 11638 } 11639 } 11640 return ret; 11641 #endif 11642 #ifdef TARGET_NR_chown 11643 case TARGET_NR_chown: 11644 if (!(p = lock_user_string(arg1))) 11645 return -TARGET_EFAULT; 11646 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3))); 11647 unlock_user(p, arg1, 0); 11648 return ret; 11649 #endif 11650 case TARGET_NR_setuid: 11651 return get_errno(sys_setuid(low2highuid(arg1))); 11652 case TARGET_NR_setgid: 11653 return get_errno(sys_setgid(low2highgid(arg1))); 11654 case TARGET_NR_setfsuid: 11655 return get_errno(setfsuid(arg1)); 11656 case TARGET_NR_setfsgid: 11657 return get_errno(setfsgid(arg1)); 11658 11659 #ifdef TARGET_NR_lchown32 11660 case TARGET_NR_lchown32: 11661 if (!(p = lock_user_string(arg1))) 11662 return -TARGET_EFAULT; 11663 ret = get_errno(lchown(p, arg2, arg3)); 11664 unlock_user(p, arg1, 0); 11665 return ret; 11666 #endif 11667 #ifdef TARGET_NR_getuid32 11668 case TARGET_NR_getuid32: 11669 return get_errno(getuid()); 11670 #endif 11671 11672 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA) 11673 /* Alpha specific */ 11674 case TARGET_NR_getxuid: 11675 { 11676 uid_t euid; 11677 euid=geteuid(); 11678 cpu_env->ir[IR_A4]=euid; 11679 } 11680 return get_errno(getuid()); 11681 #endif 11682 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA) 11683 /* Alpha specific */ 11684 case TARGET_NR_getxgid: 11685 { 11686 uid_t egid; 11687 egid=getegid(); 11688 cpu_env->ir[IR_A4]=egid; 11689 } 11690 return get_errno(getgid()); 11691 #endif 11692 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA) 11693 /* Alpha specific */ 11694 case TARGET_NR_osf_getsysinfo: 11695 ret = -TARGET_EOPNOTSUPP; 11696 switch (arg1) { 11697 case TARGET_GSI_IEEE_FP_CONTROL: 11698 { 11699 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env); 11700 uint64_t swcr = cpu_env->swcr; 11701 11702 swcr &= ~SWCR_STATUS_MASK; 11703 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK; 11704 11705 if (put_user_u64 (swcr, arg2)) 11706 return -TARGET_EFAULT; 11707 ret = 0; 11708 } 11709 break; 11710 11711 /* case GSI_IEEE_STATE_AT_SIGNAL: 11712 -- Not implemented in linux kernel. 11713 case GSI_UACPROC: 11714 -- Retrieves current unaligned access state; not much used. 11715 case GSI_PROC_TYPE: 11716 -- Retrieves implver information; surely not used. 11717 case GSI_GET_HWRPB: 11718 -- Grabs a copy of the HWRPB; surely not used. 11719 */ 11720 } 11721 return ret; 11722 #endif 11723 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA) 11724 /* Alpha specific */ 11725 case TARGET_NR_osf_setsysinfo: 11726 ret = -TARGET_EOPNOTSUPP; 11727 switch (arg1) { 11728 case TARGET_SSI_IEEE_FP_CONTROL: 11729 { 11730 uint64_t swcr, fpcr; 11731 11732 if (get_user_u64 (swcr, arg2)) { 11733 return -TARGET_EFAULT; 11734 } 11735 11736 /* 11737 * The kernel calls swcr_update_status to update the 11738 * status bits from the fpcr at every point that it 11739 * could be queried. Therefore, we store the status 11740 * bits only in FPCR. 11741 */ 11742 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK); 11743 11744 fpcr = cpu_alpha_load_fpcr(cpu_env); 11745 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32); 11746 fpcr |= alpha_ieee_swcr_to_fpcr(swcr); 11747 cpu_alpha_store_fpcr(cpu_env, fpcr); 11748 ret = 0; 11749 } 11750 break; 11751 11752 case TARGET_SSI_IEEE_RAISE_EXCEPTION: 11753 { 11754 uint64_t exc, fpcr, fex; 11755 11756 if (get_user_u64(exc, arg2)) { 11757 return -TARGET_EFAULT; 11758 } 11759 exc &= SWCR_STATUS_MASK; 11760 fpcr = cpu_alpha_load_fpcr(cpu_env); 11761 11762 /* Old exceptions are not signaled. */ 11763 fex = alpha_ieee_fpcr_to_swcr(fpcr); 11764 fex = exc & ~fex; 11765 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT; 11766 fex &= (cpu_env)->swcr; 11767 11768 /* Update the hardware fpcr. */ 11769 fpcr |= alpha_ieee_swcr_to_fpcr(exc); 11770 cpu_alpha_store_fpcr(cpu_env, fpcr); 11771 11772 if (fex) { 11773 int si_code = TARGET_FPE_FLTUNK; 11774 target_siginfo_t info; 11775 11776 if (fex & SWCR_TRAP_ENABLE_DNO) { 11777 si_code = TARGET_FPE_FLTUND; 11778 } 11779 if (fex & SWCR_TRAP_ENABLE_INE) { 11780 si_code = TARGET_FPE_FLTRES; 11781 } 11782 if (fex & SWCR_TRAP_ENABLE_UNF) { 11783 si_code = TARGET_FPE_FLTUND; 11784 } 11785 if (fex & SWCR_TRAP_ENABLE_OVF) { 11786 si_code = TARGET_FPE_FLTOVF; 11787 } 11788 if (fex & SWCR_TRAP_ENABLE_DZE) { 11789 si_code = TARGET_FPE_FLTDIV; 11790 } 11791 if (fex & SWCR_TRAP_ENABLE_INV) { 11792 si_code = TARGET_FPE_FLTINV; 11793 } 11794 11795 info.si_signo = SIGFPE; 11796 info.si_errno = 0; 11797 info.si_code = si_code; 11798 info._sifields._sigfault._addr = (cpu_env)->pc; 11799 queue_signal(cpu_env, info.si_signo, 11800 QEMU_SI_FAULT, &info); 11801 } 11802 ret = 0; 11803 } 11804 break; 11805 11806 /* case SSI_NVPAIRS: 11807 -- Used with SSIN_UACPROC to enable unaligned accesses. 11808 case SSI_IEEE_STATE_AT_SIGNAL: 11809 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL: 11810 -- Not implemented in linux kernel 11811 */ 11812 } 11813 return ret; 11814 #endif 11815 #ifdef TARGET_NR_osf_sigprocmask 11816 /* Alpha specific. */ 11817 case TARGET_NR_osf_sigprocmask: 11818 { 11819 abi_ulong mask; 11820 int how; 11821 sigset_t set, oldset; 11822 11823 switch(arg1) { 11824 case TARGET_SIG_BLOCK: 11825 how = SIG_BLOCK; 11826 break; 11827 case TARGET_SIG_UNBLOCK: 11828 how = SIG_UNBLOCK; 11829 break; 11830 case TARGET_SIG_SETMASK: 11831 how = SIG_SETMASK; 11832 break; 11833 default: 11834 return -TARGET_EINVAL; 11835 } 11836 mask = arg2; 11837 target_to_host_old_sigset(&set, &mask); 11838 ret = do_sigprocmask(how, &set, &oldset); 11839 if (!ret) { 11840 host_to_target_old_sigset(&mask, &oldset); 11841 ret = mask; 11842 } 11843 } 11844 return ret; 11845 #endif 11846 11847 #ifdef TARGET_NR_getgid32 11848 case TARGET_NR_getgid32: 11849 return get_errno(getgid()); 11850 #endif 11851 #ifdef TARGET_NR_geteuid32 11852 case TARGET_NR_geteuid32: 11853 return get_errno(geteuid()); 11854 #endif 11855 #ifdef TARGET_NR_getegid32 11856 case TARGET_NR_getegid32: 11857 return get_errno(getegid()); 11858 #endif 11859 #ifdef TARGET_NR_setreuid32 11860 case TARGET_NR_setreuid32: 11861 return get_errno(setreuid(arg1, arg2)); 11862 #endif 11863 #ifdef TARGET_NR_setregid32 11864 case TARGET_NR_setregid32: 11865 return get_errno(setregid(arg1, arg2)); 11866 #endif 11867 #ifdef TARGET_NR_getgroups32 11868 case TARGET_NR_getgroups32: 11869 { 11870 int gidsetsize = arg1; 11871 uint32_t *target_grouplist; 11872 gid_t *grouplist; 11873 int i; 11874 11875 grouplist = alloca(gidsetsize * sizeof(gid_t)); 11876 ret = get_errno(getgroups(gidsetsize, grouplist)); 11877 if (gidsetsize == 0) 11878 return ret; 11879 if (!is_error(ret)) { 11880 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0); 11881 if (!target_grouplist) { 11882 return -TARGET_EFAULT; 11883 } 11884 for(i = 0;i < ret; i++) 11885 target_grouplist[i] = tswap32(grouplist[i]); 11886 unlock_user(target_grouplist, arg2, gidsetsize * 4); 11887 } 11888 } 11889 return ret; 11890 #endif 11891 #ifdef TARGET_NR_setgroups32 11892 case TARGET_NR_setgroups32: 11893 { 11894 int gidsetsize = arg1; 11895 uint32_t *target_grouplist; 11896 gid_t *grouplist; 11897 int i; 11898 11899 grouplist = alloca(gidsetsize * sizeof(gid_t)); 11900 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1); 11901 if (!target_grouplist) { 11902 return -TARGET_EFAULT; 11903 } 11904 for(i = 0;i < gidsetsize; i++) 11905 grouplist[i] = tswap32(target_grouplist[i]); 11906 unlock_user(target_grouplist, arg2, 0); 11907 return get_errno(setgroups(gidsetsize, grouplist)); 11908 } 11909 #endif 11910 #ifdef TARGET_NR_fchown32 11911 case TARGET_NR_fchown32: 11912 return get_errno(fchown(arg1, arg2, arg3)); 11913 #endif 11914 #ifdef TARGET_NR_setresuid32 11915 case TARGET_NR_setresuid32: 11916 return get_errno(sys_setresuid(arg1, arg2, arg3)); 11917 #endif 11918 #ifdef TARGET_NR_getresuid32 11919 case TARGET_NR_getresuid32: 11920 { 11921 uid_t ruid, euid, suid; 11922 ret = get_errno(getresuid(&ruid, &euid, &suid)); 11923 if (!is_error(ret)) { 11924 if (put_user_u32(ruid, arg1) 11925 || put_user_u32(euid, arg2) 11926 || put_user_u32(suid, arg3)) 11927 return -TARGET_EFAULT; 11928 } 11929 } 11930 return ret; 11931 #endif 11932 #ifdef TARGET_NR_setresgid32 11933 case TARGET_NR_setresgid32: 11934 return get_errno(sys_setresgid(arg1, arg2, arg3)); 11935 #endif 11936 #ifdef TARGET_NR_getresgid32 11937 case TARGET_NR_getresgid32: 11938 { 11939 gid_t rgid, egid, sgid; 11940 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 11941 if (!is_error(ret)) { 11942 if (put_user_u32(rgid, arg1) 11943 || put_user_u32(egid, arg2) 11944 || put_user_u32(sgid, arg3)) 11945 return -TARGET_EFAULT; 11946 } 11947 } 11948 return ret; 11949 #endif 11950 #ifdef TARGET_NR_chown32 11951 case TARGET_NR_chown32: 11952 if (!(p = lock_user_string(arg1))) 11953 return -TARGET_EFAULT; 11954 ret = get_errno(chown(p, arg2, arg3)); 11955 unlock_user(p, arg1, 0); 11956 return ret; 11957 #endif 11958 #ifdef TARGET_NR_setuid32 11959 case TARGET_NR_setuid32: 11960 return get_errno(sys_setuid(arg1)); 11961 #endif 11962 #ifdef TARGET_NR_setgid32 11963 case TARGET_NR_setgid32: 11964 return get_errno(sys_setgid(arg1)); 11965 #endif 11966 #ifdef TARGET_NR_setfsuid32 11967 case TARGET_NR_setfsuid32: 11968 return get_errno(setfsuid(arg1)); 11969 #endif 11970 #ifdef TARGET_NR_setfsgid32 11971 case TARGET_NR_setfsgid32: 11972 return get_errno(setfsgid(arg1)); 11973 #endif 11974 #ifdef TARGET_NR_mincore 11975 case TARGET_NR_mincore: 11976 { 11977 void *a = lock_user(VERIFY_READ, arg1, arg2, 0); 11978 if (!a) { 11979 return -TARGET_ENOMEM; 11980 } 11981 p = lock_user_string(arg3); 11982 if (!p) { 11983 ret = -TARGET_EFAULT; 11984 } else { 11985 ret = get_errno(mincore(a, arg2, p)); 11986 unlock_user(p, arg3, ret); 11987 } 11988 unlock_user(a, arg1, 0); 11989 } 11990 return ret; 11991 #endif 11992 #ifdef TARGET_NR_arm_fadvise64_64 11993 case TARGET_NR_arm_fadvise64_64: 11994 /* arm_fadvise64_64 looks like fadvise64_64 but 11995 * with different argument order: fd, advice, offset, len 11996 * rather than the usual fd, offset, len, advice. 11997 * Note that offset and len are both 64-bit so appear as 11998 * pairs of 32-bit registers. 11999 */ 12000 ret = posix_fadvise(arg1, target_offset64(arg3, arg4), 12001 target_offset64(arg5, arg6), arg2); 12002 return -host_to_target_errno(ret); 12003 #endif 12004 12005 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32) 12006 12007 #ifdef TARGET_NR_fadvise64_64 12008 case TARGET_NR_fadvise64_64: 12009 #if defined(TARGET_PPC) || defined(TARGET_XTENSA) 12010 /* 6 args: fd, advice, offset (high, low), len (high, low) */ 12011 ret = arg2; 12012 arg2 = arg3; 12013 arg3 = arg4; 12014 arg4 = arg5; 12015 arg5 = arg6; 12016 arg6 = ret; 12017 #else 12018 /* 6 args: fd, offset (high, low), len (high, low), advice */ 12019 if (regpairs_aligned(cpu_env, num)) { 12020 /* offset is in (3,4), len in (5,6) and advice in 7 */ 12021 arg2 = arg3; 12022 arg3 = arg4; 12023 arg4 = arg5; 12024 arg5 = arg6; 12025 arg6 = arg7; 12026 } 12027 #endif 12028 ret = posix_fadvise(arg1, target_offset64(arg2, arg3), 12029 target_offset64(arg4, arg5), arg6); 12030 return -host_to_target_errno(ret); 12031 #endif 12032 12033 #ifdef TARGET_NR_fadvise64 12034 case TARGET_NR_fadvise64: 12035 /* 5 args: fd, offset (high, low), len, advice */ 12036 if (regpairs_aligned(cpu_env, num)) { 12037 /* offset is in (3,4), len in 5 and advice in 6 */ 12038 arg2 = arg3; 12039 arg3 = arg4; 12040 arg4 = arg5; 12041 arg5 = arg6; 12042 } 12043 ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5); 12044 return -host_to_target_errno(ret); 12045 #endif 12046 12047 #else /* not a 32-bit ABI */ 12048 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64) 12049 #ifdef TARGET_NR_fadvise64_64 12050 case TARGET_NR_fadvise64_64: 12051 #endif 12052 #ifdef TARGET_NR_fadvise64 12053 case TARGET_NR_fadvise64: 12054 #endif 12055 #ifdef TARGET_S390X 12056 switch (arg4) { 12057 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */ 12058 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */ 12059 case 6: arg4 = POSIX_FADV_DONTNEED; break; 12060 case 7: arg4 = POSIX_FADV_NOREUSE; break; 12061 default: break; 12062 } 12063 #endif 12064 return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4)); 12065 #endif 12066 #endif /* end of 64-bit ABI fadvise handling */ 12067 12068 #ifdef TARGET_NR_madvise 12069 case TARGET_NR_madvise: 12070 return target_madvise(arg1, arg2, arg3); 12071 #endif 12072 #ifdef TARGET_NR_fcntl64 12073 case TARGET_NR_fcntl64: 12074 { 12075 int cmd; 12076 struct flock64 fl; 12077 from_flock64_fn *copyfrom = copy_from_user_flock64; 12078 to_flock64_fn *copyto = copy_to_user_flock64; 12079 12080 #ifdef TARGET_ARM 12081 if (!cpu_env->eabi) { 12082 copyfrom = copy_from_user_oabi_flock64; 12083 copyto = copy_to_user_oabi_flock64; 12084 } 12085 #endif 12086 12087 cmd = target_to_host_fcntl_cmd(arg2); 12088 if (cmd == -TARGET_EINVAL) { 12089 return cmd; 12090 } 12091 12092 switch(arg2) { 12093 case TARGET_F_GETLK64: 12094 ret = copyfrom(&fl, arg3); 12095 if (ret) { 12096 break; 12097 } 12098 ret = get_errno(safe_fcntl(arg1, cmd, &fl)); 12099 if (ret == 0) { 12100 ret = copyto(arg3, &fl); 12101 } 12102 break; 12103 12104 case TARGET_F_SETLK64: 12105 case TARGET_F_SETLKW64: 12106 ret = copyfrom(&fl, arg3); 12107 if (ret) { 12108 break; 12109 } 12110 ret = get_errno(safe_fcntl(arg1, cmd, &fl)); 12111 break; 12112 default: 12113 ret = do_fcntl(arg1, arg2, arg3); 12114 break; 12115 } 12116 return ret; 12117 } 12118 #endif 12119 #ifdef TARGET_NR_cacheflush 12120 case TARGET_NR_cacheflush: 12121 /* self-modifying code is handled automatically, so nothing needed */ 12122 return 0; 12123 #endif 12124 #ifdef TARGET_NR_getpagesize 12125 case TARGET_NR_getpagesize: 12126 return TARGET_PAGE_SIZE; 12127 #endif 12128 case TARGET_NR_gettid: 12129 return get_errno(sys_gettid()); 12130 #ifdef TARGET_NR_readahead 12131 case TARGET_NR_readahead: 12132 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32) 12133 if (regpairs_aligned(cpu_env, num)) { 12134 arg2 = arg3; 12135 arg3 = arg4; 12136 arg4 = arg5; 12137 } 12138 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4)); 12139 #else 12140 ret = get_errno(readahead(arg1, arg2, arg3)); 12141 #endif 12142 return ret; 12143 #endif 12144 #ifdef CONFIG_ATTR 12145 #ifdef TARGET_NR_setxattr 12146 case TARGET_NR_listxattr: 12147 case TARGET_NR_llistxattr: 12148 { 12149 void *p, *b = 0; 12150 if (arg2) { 12151 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 12152 if (!b) { 12153 return -TARGET_EFAULT; 12154 } 12155 } 12156 p = lock_user_string(arg1); 12157 if (p) { 12158 if (num == TARGET_NR_listxattr) { 12159 ret = get_errno(listxattr(p, b, arg3)); 12160 } else { 12161 ret = get_errno(llistxattr(p, b, arg3)); 12162 } 12163 } else { 12164 ret = -TARGET_EFAULT; 12165 } 12166 unlock_user(p, arg1, 0); 12167 unlock_user(b, arg2, arg3); 12168 return ret; 12169 } 12170 case TARGET_NR_flistxattr: 12171 { 12172 void *b = 0; 12173 if (arg2) { 12174 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 12175 if (!b) { 12176 return -TARGET_EFAULT; 12177 } 12178 } 12179 ret = get_errno(flistxattr(arg1, b, arg3)); 12180 unlock_user(b, arg2, arg3); 12181 return ret; 12182 } 12183 case TARGET_NR_setxattr: 12184 case TARGET_NR_lsetxattr: 12185 { 12186 void *p, *n, *v = 0; 12187 if (arg3) { 12188 v = lock_user(VERIFY_READ, arg3, arg4, 1); 12189 if (!v) { 12190 return -TARGET_EFAULT; 12191 } 12192 } 12193 p = lock_user_string(arg1); 12194 n = lock_user_string(arg2); 12195 if (p && n) { 12196 if (num == TARGET_NR_setxattr) { 12197 ret = get_errno(setxattr(p, n, v, arg4, arg5)); 12198 } else { 12199 ret = get_errno(lsetxattr(p, n, v, arg4, arg5)); 12200 } 12201 } else { 12202 ret = -TARGET_EFAULT; 12203 } 12204 unlock_user(p, arg1, 0); 12205 unlock_user(n, arg2, 0); 12206 unlock_user(v, arg3, 0); 12207 } 12208 return ret; 12209 case TARGET_NR_fsetxattr: 12210 { 12211 void *n, *v = 0; 12212 if (arg3) { 12213 v = lock_user(VERIFY_READ, arg3, arg4, 1); 12214 if (!v) { 12215 return -TARGET_EFAULT; 12216 } 12217 } 12218 n = lock_user_string(arg2); 12219 if (n) { 12220 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5)); 12221 } else { 12222 ret = -TARGET_EFAULT; 12223 } 12224 unlock_user(n, arg2, 0); 12225 unlock_user(v, arg3, 0); 12226 } 12227 return ret; 12228 case TARGET_NR_getxattr: 12229 case TARGET_NR_lgetxattr: 12230 { 12231 void *p, *n, *v = 0; 12232 if (arg3) { 12233 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 12234 if (!v) { 12235 return -TARGET_EFAULT; 12236 } 12237 } 12238 p = lock_user_string(arg1); 12239 n = lock_user_string(arg2); 12240 if (p && n) { 12241 if (num == TARGET_NR_getxattr) { 12242 ret = get_errno(getxattr(p, n, v, arg4)); 12243 } else { 12244 ret = get_errno(lgetxattr(p, n, v, arg4)); 12245 } 12246 } else { 12247 ret = -TARGET_EFAULT; 12248 } 12249 unlock_user(p, arg1, 0); 12250 unlock_user(n, arg2, 0); 12251 unlock_user(v, arg3, arg4); 12252 } 12253 return ret; 12254 case TARGET_NR_fgetxattr: 12255 { 12256 void *n, *v = 0; 12257 if (arg3) { 12258 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 12259 if (!v) { 12260 return -TARGET_EFAULT; 12261 } 12262 } 12263 n = lock_user_string(arg2); 12264 if (n) { 12265 ret = get_errno(fgetxattr(arg1, n, v, arg4)); 12266 } else { 12267 ret = -TARGET_EFAULT; 12268 } 12269 unlock_user(n, arg2, 0); 12270 unlock_user(v, arg3, arg4); 12271 } 12272 return ret; 12273 case TARGET_NR_removexattr: 12274 case TARGET_NR_lremovexattr: 12275 { 12276 void *p, *n; 12277 p = lock_user_string(arg1); 12278 n = lock_user_string(arg2); 12279 if (p && n) { 12280 if (num == TARGET_NR_removexattr) { 12281 ret = get_errno(removexattr(p, n)); 12282 } else { 12283 ret = get_errno(lremovexattr(p, n)); 12284 } 12285 } else { 12286 ret = -TARGET_EFAULT; 12287 } 12288 unlock_user(p, arg1, 0); 12289 unlock_user(n, arg2, 0); 12290 } 12291 return ret; 12292 case TARGET_NR_fremovexattr: 12293 { 12294 void *n; 12295 n = lock_user_string(arg2); 12296 if (n) { 12297 ret = get_errno(fremovexattr(arg1, n)); 12298 } else { 12299 ret = -TARGET_EFAULT; 12300 } 12301 unlock_user(n, arg2, 0); 12302 } 12303 return ret; 12304 #endif 12305 #endif /* CONFIG_ATTR */ 12306 #ifdef TARGET_NR_set_thread_area 12307 case TARGET_NR_set_thread_area: 12308 #if defined(TARGET_MIPS) 12309 cpu_env->active_tc.CP0_UserLocal = arg1; 12310 return 0; 12311 #elif defined(TARGET_CRIS) 12312 if (arg1 & 0xff) 12313 ret = -TARGET_EINVAL; 12314 else { 12315 cpu_env->pregs[PR_PID] = arg1; 12316 ret = 0; 12317 } 12318 return ret; 12319 #elif defined(TARGET_I386) && defined(TARGET_ABI32) 12320 return do_set_thread_area(cpu_env, arg1); 12321 #elif defined(TARGET_M68K) 12322 { 12323 TaskState *ts = cpu->opaque; 12324 ts->tp_value = arg1; 12325 return 0; 12326 } 12327 #else 12328 return -TARGET_ENOSYS; 12329 #endif 12330 #endif 12331 #ifdef TARGET_NR_get_thread_area 12332 case TARGET_NR_get_thread_area: 12333 #if defined(TARGET_I386) && defined(TARGET_ABI32) 12334 return do_get_thread_area(cpu_env, arg1); 12335 #elif defined(TARGET_M68K) 12336 { 12337 TaskState *ts = cpu->opaque; 12338 return ts->tp_value; 12339 } 12340 #else 12341 return -TARGET_ENOSYS; 12342 #endif 12343 #endif 12344 #ifdef TARGET_NR_getdomainname 12345 case TARGET_NR_getdomainname: 12346 return -TARGET_ENOSYS; 12347 #endif 12348 12349 #ifdef TARGET_NR_clock_settime 12350 case TARGET_NR_clock_settime: 12351 { 12352 struct timespec ts; 12353 12354 ret = target_to_host_timespec(&ts, arg2); 12355 if (!is_error(ret)) { 12356 ret = get_errno(clock_settime(arg1, &ts)); 12357 } 12358 return ret; 12359 } 12360 #endif 12361 #ifdef TARGET_NR_clock_settime64 12362 case TARGET_NR_clock_settime64: 12363 { 12364 struct timespec ts; 12365 12366 ret = target_to_host_timespec64(&ts, arg2); 12367 if (!is_error(ret)) { 12368 ret = get_errno(clock_settime(arg1, &ts)); 12369 } 12370 return ret; 12371 } 12372 #endif 12373 #ifdef TARGET_NR_clock_gettime 12374 case TARGET_NR_clock_gettime: 12375 { 12376 struct timespec ts; 12377 ret = get_errno(clock_gettime(arg1, &ts)); 12378 if (!is_error(ret)) { 12379 ret = host_to_target_timespec(arg2, &ts); 12380 } 12381 return ret; 12382 } 12383 #endif 12384 #ifdef TARGET_NR_clock_gettime64 12385 case TARGET_NR_clock_gettime64: 12386 { 12387 struct timespec ts; 12388 ret = get_errno(clock_gettime(arg1, &ts)); 12389 if (!is_error(ret)) { 12390 ret = host_to_target_timespec64(arg2, &ts); 12391 } 12392 return ret; 12393 } 12394 #endif 12395 #ifdef TARGET_NR_clock_getres 12396 case TARGET_NR_clock_getres: 12397 { 12398 struct timespec ts; 12399 ret = get_errno(clock_getres(arg1, &ts)); 12400 if (!is_error(ret)) { 12401 host_to_target_timespec(arg2, &ts); 12402 } 12403 return ret; 12404 } 12405 #endif 12406 #ifdef TARGET_NR_clock_getres_time64 12407 case TARGET_NR_clock_getres_time64: 12408 { 12409 struct timespec ts; 12410 ret = get_errno(clock_getres(arg1, &ts)); 12411 if (!is_error(ret)) { 12412 host_to_target_timespec64(arg2, &ts); 12413 } 12414 return ret; 12415 } 12416 #endif 12417 #ifdef TARGET_NR_clock_nanosleep 12418 case TARGET_NR_clock_nanosleep: 12419 { 12420 struct timespec ts; 12421 if (target_to_host_timespec(&ts, arg3)) { 12422 return -TARGET_EFAULT; 12423 } 12424 ret = get_errno(safe_clock_nanosleep(arg1, arg2, 12425 &ts, arg4 ? &ts : NULL)); 12426 /* 12427 * if the call is interrupted by a signal handler, it fails 12428 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not 12429 * TIMER_ABSTIME, it returns the remaining unslept time in arg4. 12430 */ 12431 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME && 12432 host_to_target_timespec(arg4, &ts)) { 12433 return -TARGET_EFAULT; 12434 } 12435 12436 return ret; 12437 } 12438 #endif 12439 #ifdef TARGET_NR_clock_nanosleep_time64 12440 case TARGET_NR_clock_nanosleep_time64: 12441 { 12442 struct timespec ts; 12443 12444 if (target_to_host_timespec64(&ts, arg3)) { 12445 return -TARGET_EFAULT; 12446 } 12447 12448 ret = get_errno(safe_clock_nanosleep(arg1, arg2, 12449 &ts, arg4 ? &ts : NULL)); 12450 12451 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME && 12452 host_to_target_timespec64(arg4, &ts)) { 12453 return -TARGET_EFAULT; 12454 } 12455 return ret; 12456 } 12457 #endif 12458 12459 #if defined(TARGET_NR_set_tid_address) 12460 case TARGET_NR_set_tid_address: 12461 { 12462 TaskState *ts = cpu->opaque; 12463 ts->child_tidptr = arg1; 12464 /* do not call host set_tid_address() syscall, instead return tid() */ 12465 return get_errno(sys_gettid()); 12466 } 12467 #endif 12468 12469 case TARGET_NR_tkill: 12470 return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2))); 12471 12472 case TARGET_NR_tgkill: 12473 return get_errno(safe_tgkill((int)arg1, (int)arg2, 12474 target_to_host_signal(arg3))); 12475 12476 #ifdef TARGET_NR_set_robust_list 12477 case TARGET_NR_set_robust_list: 12478 case TARGET_NR_get_robust_list: 12479 /* The ABI for supporting robust futexes has userspace pass 12480 * the kernel a pointer to a linked list which is updated by 12481 * userspace after the syscall; the list is walked by the kernel 12482 * when the thread exits. Since the linked list in QEMU guest 12483 * memory isn't a valid linked list for the host and we have 12484 * no way to reliably intercept the thread-death event, we can't 12485 * support these. Silently return ENOSYS so that guest userspace 12486 * falls back to a non-robust futex implementation (which should 12487 * be OK except in the corner case of the guest crashing while 12488 * holding a mutex that is shared with another process via 12489 * shared memory). 12490 */ 12491 return -TARGET_ENOSYS; 12492 #endif 12493 12494 #if defined(TARGET_NR_utimensat) 12495 case TARGET_NR_utimensat: 12496 { 12497 struct timespec *tsp, ts[2]; 12498 if (!arg3) { 12499 tsp = NULL; 12500 } else { 12501 if (target_to_host_timespec(ts, arg3)) { 12502 return -TARGET_EFAULT; 12503 } 12504 if (target_to_host_timespec(ts + 1, arg3 + 12505 sizeof(struct target_timespec))) { 12506 return -TARGET_EFAULT; 12507 } 12508 tsp = ts; 12509 } 12510 if (!arg2) 12511 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4)); 12512 else { 12513 if (!(p = lock_user_string(arg2))) { 12514 return -TARGET_EFAULT; 12515 } 12516 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4)); 12517 unlock_user(p, arg2, 0); 12518 } 12519 } 12520 return ret; 12521 #endif 12522 #ifdef TARGET_NR_utimensat_time64 12523 case TARGET_NR_utimensat_time64: 12524 { 12525 struct timespec *tsp, ts[2]; 12526 if (!arg3) { 12527 tsp = NULL; 12528 } else { 12529 if (target_to_host_timespec64(ts, arg3)) { 12530 return -TARGET_EFAULT; 12531 } 12532 if (target_to_host_timespec64(ts + 1, arg3 + 12533 sizeof(struct target__kernel_timespec))) { 12534 return -TARGET_EFAULT; 12535 } 12536 tsp = ts; 12537 } 12538 if (!arg2) 12539 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4)); 12540 else { 12541 p = lock_user_string(arg2); 12542 if (!p) { 12543 return -TARGET_EFAULT; 12544 } 12545 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4)); 12546 unlock_user(p, arg2, 0); 12547 } 12548 } 12549 return ret; 12550 #endif 12551 #ifdef TARGET_NR_futex 12552 case TARGET_NR_futex: 12553 return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6); 12554 #endif 12555 #ifdef TARGET_NR_futex_time64 12556 case TARGET_NR_futex_time64: 12557 return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6); 12558 #endif 12559 #ifdef CONFIG_INOTIFY 12560 #if defined(TARGET_NR_inotify_init) 12561 case TARGET_NR_inotify_init: 12562 ret = get_errno(inotify_init()); 12563 if (ret >= 0) { 12564 fd_trans_register(ret, &target_inotify_trans); 12565 } 12566 return ret; 12567 #endif 12568 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1) 12569 case TARGET_NR_inotify_init1: 12570 ret = get_errno(inotify_init1(target_to_host_bitmask(arg1, 12571 fcntl_flags_tbl))); 12572 if (ret >= 0) { 12573 fd_trans_register(ret, &target_inotify_trans); 12574 } 12575 return ret; 12576 #endif 12577 #if defined(TARGET_NR_inotify_add_watch) 12578 case TARGET_NR_inotify_add_watch: 12579 p = lock_user_string(arg2); 12580 ret = get_errno(inotify_add_watch(arg1, path(p), arg3)); 12581 unlock_user(p, arg2, 0); 12582 return ret; 12583 #endif 12584 #if defined(TARGET_NR_inotify_rm_watch) 12585 case TARGET_NR_inotify_rm_watch: 12586 return get_errno(inotify_rm_watch(arg1, arg2)); 12587 #endif 12588 #endif 12589 12590 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 12591 case TARGET_NR_mq_open: 12592 { 12593 struct mq_attr posix_mq_attr; 12594 struct mq_attr *pposix_mq_attr; 12595 int host_flags; 12596 12597 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl); 12598 pposix_mq_attr = NULL; 12599 if (arg4) { 12600 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) { 12601 return -TARGET_EFAULT; 12602 } 12603 pposix_mq_attr = &posix_mq_attr; 12604 } 12605 p = lock_user_string(arg1 - 1); 12606 if (!p) { 12607 return -TARGET_EFAULT; 12608 } 12609 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr)); 12610 unlock_user (p, arg1, 0); 12611 } 12612 return ret; 12613 12614 case TARGET_NR_mq_unlink: 12615 p = lock_user_string(arg1 - 1); 12616 if (!p) { 12617 return -TARGET_EFAULT; 12618 } 12619 ret = get_errno(mq_unlink(p)); 12620 unlock_user (p, arg1, 0); 12621 return ret; 12622 12623 #ifdef TARGET_NR_mq_timedsend 12624 case TARGET_NR_mq_timedsend: 12625 { 12626 struct timespec ts; 12627 12628 p = lock_user (VERIFY_READ, arg2, arg3, 1); 12629 if (arg5 != 0) { 12630 if (target_to_host_timespec(&ts, arg5)) { 12631 return -TARGET_EFAULT; 12632 } 12633 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts)); 12634 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) { 12635 return -TARGET_EFAULT; 12636 } 12637 } else { 12638 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL)); 12639 } 12640 unlock_user (p, arg2, arg3); 12641 } 12642 return ret; 12643 #endif 12644 #ifdef TARGET_NR_mq_timedsend_time64 12645 case TARGET_NR_mq_timedsend_time64: 12646 { 12647 struct timespec ts; 12648 12649 p = lock_user(VERIFY_READ, arg2, arg3, 1); 12650 if (arg5 != 0) { 12651 if (target_to_host_timespec64(&ts, arg5)) { 12652 return -TARGET_EFAULT; 12653 } 12654 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts)); 12655 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) { 12656 return -TARGET_EFAULT; 12657 } 12658 } else { 12659 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL)); 12660 } 12661 unlock_user(p, arg2, arg3); 12662 } 12663 return ret; 12664 #endif 12665 12666 #ifdef TARGET_NR_mq_timedreceive 12667 case TARGET_NR_mq_timedreceive: 12668 { 12669 struct timespec ts; 12670 unsigned int prio; 12671 12672 p = lock_user (VERIFY_READ, arg2, arg3, 1); 12673 if (arg5 != 0) { 12674 if (target_to_host_timespec(&ts, arg5)) { 12675 return -TARGET_EFAULT; 12676 } 12677 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3, 12678 &prio, &ts)); 12679 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) { 12680 return -TARGET_EFAULT; 12681 } 12682 } else { 12683 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3, 12684 &prio, NULL)); 12685 } 12686 unlock_user (p, arg2, arg3); 12687 if (arg4 != 0) 12688 put_user_u32(prio, arg4); 12689 } 12690 return ret; 12691 #endif 12692 #ifdef TARGET_NR_mq_timedreceive_time64 12693 case TARGET_NR_mq_timedreceive_time64: 12694 { 12695 struct timespec ts; 12696 unsigned int prio; 12697 12698 p = lock_user(VERIFY_READ, arg2, arg3, 1); 12699 if (arg5 != 0) { 12700 if (target_to_host_timespec64(&ts, arg5)) { 12701 return -TARGET_EFAULT; 12702 } 12703 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3, 12704 &prio, &ts)); 12705 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) { 12706 return -TARGET_EFAULT; 12707 } 12708 } else { 12709 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3, 12710 &prio, NULL)); 12711 } 12712 unlock_user(p, arg2, arg3); 12713 if (arg4 != 0) { 12714 put_user_u32(prio, arg4); 12715 } 12716 } 12717 return ret; 12718 #endif 12719 12720 /* Not implemented for now... */ 12721 /* case TARGET_NR_mq_notify: */ 12722 /* break; */ 12723 12724 case TARGET_NR_mq_getsetattr: 12725 { 12726 struct mq_attr posix_mq_attr_in, posix_mq_attr_out; 12727 ret = 0; 12728 if (arg2 != 0) { 12729 copy_from_user_mq_attr(&posix_mq_attr_in, arg2); 12730 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in, 12731 &posix_mq_attr_out)); 12732 } else if (arg3 != 0) { 12733 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out)); 12734 } 12735 if (ret == 0 && arg3 != 0) { 12736 copy_to_user_mq_attr(arg3, &posix_mq_attr_out); 12737 } 12738 } 12739 return ret; 12740 #endif 12741 12742 #ifdef CONFIG_SPLICE 12743 #ifdef TARGET_NR_tee 12744 case TARGET_NR_tee: 12745 { 12746 ret = get_errno(tee(arg1,arg2,arg3,arg4)); 12747 } 12748 return ret; 12749 #endif 12750 #ifdef TARGET_NR_splice 12751 case TARGET_NR_splice: 12752 { 12753 loff_t loff_in, loff_out; 12754 loff_t *ploff_in = NULL, *ploff_out = NULL; 12755 if (arg2) { 12756 if (get_user_u64(loff_in, arg2)) { 12757 return -TARGET_EFAULT; 12758 } 12759 ploff_in = &loff_in; 12760 } 12761 if (arg4) { 12762 if (get_user_u64(loff_out, arg4)) { 12763 return -TARGET_EFAULT; 12764 } 12765 ploff_out = &loff_out; 12766 } 12767 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6)); 12768 if (arg2) { 12769 if (put_user_u64(loff_in, arg2)) { 12770 return -TARGET_EFAULT; 12771 } 12772 } 12773 if (arg4) { 12774 if (put_user_u64(loff_out, arg4)) { 12775 return -TARGET_EFAULT; 12776 } 12777 } 12778 } 12779 return ret; 12780 #endif 12781 #ifdef TARGET_NR_vmsplice 12782 case TARGET_NR_vmsplice: 12783 { 12784 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 12785 if (vec != NULL) { 12786 ret = get_errno(vmsplice(arg1, vec, arg3, arg4)); 12787 unlock_iovec(vec, arg2, arg3, 0); 12788 } else { 12789 ret = -host_to_target_errno(errno); 12790 } 12791 } 12792 return ret; 12793 #endif 12794 #endif /* CONFIG_SPLICE */ 12795 #ifdef CONFIG_EVENTFD 12796 #if defined(TARGET_NR_eventfd) 12797 case TARGET_NR_eventfd: 12798 ret = get_errno(eventfd(arg1, 0)); 12799 if (ret >= 0) { 12800 fd_trans_register(ret, &target_eventfd_trans); 12801 } 12802 return ret; 12803 #endif 12804 #if defined(TARGET_NR_eventfd2) 12805 case TARGET_NR_eventfd2: 12806 { 12807 int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)); 12808 if (arg2 & TARGET_O_NONBLOCK) { 12809 host_flags |= O_NONBLOCK; 12810 } 12811 if (arg2 & TARGET_O_CLOEXEC) { 12812 host_flags |= O_CLOEXEC; 12813 } 12814 ret = get_errno(eventfd(arg1, host_flags)); 12815 if (ret >= 0) { 12816 fd_trans_register(ret, &target_eventfd_trans); 12817 } 12818 return ret; 12819 } 12820 #endif 12821 #endif /* CONFIG_EVENTFD */ 12822 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate) 12823 case TARGET_NR_fallocate: 12824 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32) 12825 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4), 12826 target_offset64(arg5, arg6))); 12827 #else 12828 ret = get_errno(fallocate(arg1, arg2, arg3, arg4)); 12829 #endif 12830 return ret; 12831 #endif 12832 #if defined(CONFIG_SYNC_FILE_RANGE) 12833 #if defined(TARGET_NR_sync_file_range) 12834 case TARGET_NR_sync_file_range: 12835 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32) 12836 #if defined(TARGET_MIPS) 12837 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 12838 target_offset64(arg5, arg6), arg7)); 12839 #else 12840 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3), 12841 target_offset64(arg4, arg5), arg6)); 12842 #endif /* !TARGET_MIPS */ 12843 #else 12844 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4)); 12845 #endif 12846 return ret; 12847 #endif 12848 #if defined(TARGET_NR_sync_file_range2) || \ 12849 defined(TARGET_NR_arm_sync_file_range) 12850 #if defined(TARGET_NR_sync_file_range2) 12851 case TARGET_NR_sync_file_range2: 12852 #endif 12853 #if defined(TARGET_NR_arm_sync_file_range) 12854 case TARGET_NR_arm_sync_file_range: 12855 #endif 12856 /* This is like sync_file_range but the arguments are reordered */ 12857 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32) 12858 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 12859 target_offset64(arg5, arg6), arg2)); 12860 #else 12861 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2)); 12862 #endif 12863 return ret; 12864 #endif 12865 #endif 12866 #if defined(TARGET_NR_signalfd4) 12867 case TARGET_NR_signalfd4: 12868 return do_signalfd4(arg1, arg2, arg4); 12869 #endif 12870 #if defined(TARGET_NR_signalfd) 12871 case TARGET_NR_signalfd: 12872 return do_signalfd4(arg1, arg2, 0); 12873 #endif 12874 #if defined(CONFIG_EPOLL) 12875 #if defined(TARGET_NR_epoll_create) 12876 case TARGET_NR_epoll_create: 12877 return get_errno(epoll_create(arg1)); 12878 #endif 12879 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1) 12880 case TARGET_NR_epoll_create1: 12881 return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl))); 12882 #endif 12883 #if defined(TARGET_NR_epoll_ctl) 12884 case TARGET_NR_epoll_ctl: 12885 { 12886 struct epoll_event ep; 12887 struct epoll_event *epp = 0; 12888 if (arg4) { 12889 if (arg2 != EPOLL_CTL_DEL) { 12890 struct target_epoll_event *target_ep; 12891 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) { 12892 return -TARGET_EFAULT; 12893 } 12894 ep.events = tswap32(target_ep->events); 12895 /* 12896 * The epoll_data_t union is just opaque data to the kernel, 12897 * so we transfer all 64 bits across and need not worry what 12898 * actual data type it is. 12899 */ 12900 ep.data.u64 = tswap64(target_ep->data.u64); 12901 unlock_user_struct(target_ep, arg4, 0); 12902 } 12903 /* 12904 * before kernel 2.6.9, EPOLL_CTL_DEL operation required a 12905 * non-null pointer, even though this argument is ignored. 12906 * 12907 */ 12908 epp = &ep; 12909 } 12910 return get_errno(epoll_ctl(arg1, arg2, arg3, epp)); 12911 } 12912 #endif 12913 12914 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait) 12915 #if defined(TARGET_NR_epoll_wait) 12916 case TARGET_NR_epoll_wait: 12917 #endif 12918 #if defined(TARGET_NR_epoll_pwait) 12919 case TARGET_NR_epoll_pwait: 12920 #endif 12921 { 12922 struct target_epoll_event *target_ep; 12923 struct epoll_event *ep; 12924 int epfd = arg1; 12925 int maxevents = arg3; 12926 int timeout = arg4; 12927 12928 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) { 12929 return -TARGET_EINVAL; 12930 } 12931 12932 target_ep = lock_user(VERIFY_WRITE, arg2, 12933 maxevents * sizeof(struct target_epoll_event), 1); 12934 if (!target_ep) { 12935 return -TARGET_EFAULT; 12936 } 12937 12938 ep = g_try_new(struct epoll_event, maxevents); 12939 if (!ep) { 12940 unlock_user(target_ep, arg2, 0); 12941 return -TARGET_ENOMEM; 12942 } 12943 12944 switch (num) { 12945 #if defined(TARGET_NR_epoll_pwait) 12946 case TARGET_NR_epoll_pwait: 12947 { 12948 sigset_t *set = NULL; 12949 12950 if (arg5) { 12951 ret = process_sigsuspend_mask(&set, arg5, arg6); 12952 if (ret != 0) { 12953 break; 12954 } 12955 } 12956 12957 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout, 12958 set, SIGSET_T_SIZE)); 12959 12960 if (set) { 12961 finish_sigsuspend_mask(ret); 12962 } 12963 break; 12964 } 12965 #endif 12966 #if defined(TARGET_NR_epoll_wait) 12967 case TARGET_NR_epoll_wait: 12968 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout, 12969 NULL, 0)); 12970 break; 12971 #endif 12972 default: 12973 ret = -TARGET_ENOSYS; 12974 } 12975 if (!is_error(ret)) { 12976 int i; 12977 for (i = 0; i < ret; i++) { 12978 target_ep[i].events = tswap32(ep[i].events); 12979 target_ep[i].data.u64 = tswap64(ep[i].data.u64); 12980 } 12981 unlock_user(target_ep, arg2, 12982 ret * sizeof(struct target_epoll_event)); 12983 } else { 12984 unlock_user(target_ep, arg2, 0); 12985 } 12986 g_free(ep); 12987 return ret; 12988 } 12989 #endif 12990 #endif 12991 #ifdef TARGET_NR_prlimit64 12992 case TARGET_NR_prlimit64: 12993 { 12994 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */ 12995 struct target_rlimit64 *target_rnew, *target_rold; 12996 struct host_rlimit64 rnew, rold, *rnewp = 0; 12997 int resource = target_to_host_resource(arg2); 12998 12999 if (arg3 && (resource != RLIMIT_AS && 13000 resource != RLIMIT_DATA && 13001 resource != RLIMIT_STACK)) { 13002 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) { 13003 return -TARGET_EFAULT; 13004 } 13005 rnew.rlim_cur = tswap64(target_rnew->rlim_cur); 13006 rnew.rlim_max = tswap64(target_rnew->rlim_max); 13007 unlock_user_struct(target_rnew, arg3, 0); 13008 rnewp = &rnew; 13009 } 13010 13011 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0)); 13012 if (!is_error(ret) && arg4) { 13013 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) { 13014 return -TARGET_EFAULT; 13015 } 13016 target_rold->rlim_cur = tswap64(rold.rlim_cur); 13017 target_rold->rlim_max = tswap64(rold.rlim_max); 13018 unlock_user_struct(target_rold, arg4, 1); 13019 } 13020 return ret; 13021 } 13022 #endif 13023 #ifdef TARGET_NR_gethostname 13024 case TARGET_NR_gethostname: 13025 { 13026 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0); 13027 if (name) { 13028 ret = get_errno(gethostname(name, arg2)); 13029 unlock_user(name, arg1, arg2); 13030 } else { 13031 ret = -TARGET_EFAULT; 13032 } 13033 return ret; 13034 } 13035 #endif 13036 #ifdef TARGET_NR_atomic_cmpxchg_32 13037 case TARGET_NR_atomic_cmpxchg_32: 13038 { 13039 /* should use start_exclusive from main.c */ 13040 abi_ulong mem_value; 13041 if (get_user_u32(mem_value, arg6)) { 13042 target_siginfo_t info; 13043 info.si_signo = SIGSEGV; 13044 info.si_errno = 0; 13045 info.si_code = TARGET_SEGV_MAPERR; 13046 info._sifields._sigfault._addr = arg6; 13047 queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info); 13048 ret = 0xdeadbeef; 13049 13050 } 13051 if (mem_value == arg2) 13052 put_user_u32(arg1, arg6); 13053 return mem_value; 13054 } 13055 #endif 13056 #ifdef TARGET_NR_atomic_barrier 13057 case TARGET_NR_atomic_barrier: 13058 /* Like the kernel implementation and the 13059 qemu arm barrier, no-op this? */ 13060 return 0; 13061 #endif 13062 13063 #ifdef TARGET_NR_timer_create 13064 case TARGET_NR_timer_create: 13065 { 13066 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */ 13067 13068 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL; 13069 13070 int clkid = arg1; 13071 int timer_index = next_free_host_timer(); 13072 13073 if (timer_index < 0) { 13074 ret = -TARGET_EAGAIN; 13075 } else { 13076 timer_t *phtimer = g_posix_timers + timer_index; 13077 13078 if (arg2) { 13079 phost_sevp = &host_sevp; 13080 ret = target_to_host_sigevent(phost_sevp, arg2); 13081 if (ret != 0) { 13082 free_host_timer_slot(timer_index); 13083 return ret; 13084 } 13085 } 13086 13087 ret = get_errno(timer_create(clkid, phost_sevp, phtimer)); 13088 if (ret) { 13089 free_host_timer_slot(timer_index); 13090 } else { 13091 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) { 13092 timer_delete(*phtimer); 13093 free_host_timer_slot(timer_index); 13094 return -TARGET_EFAULT; 13095 } 13096 } 13097 } 13098 return ret; 13099 } 13100 #endif 13101 13102 #ifdef TARGET_NR_timer_settime 13103 case TARGET_NR_timer_settime: 13104 { 13105 /* args: timer_t timerid, int flags, const struct itimerspec *new_value, 13106 * struct itimerspec * old_value */ 13107 target_timer_t timerid = get_timer_id(arg1); 13108 13109 if (timerid < 0) { 13110 ret = timerid; 13111 } else if (arg3 == 0) { 13112 ret = -TARGET_EINVAL; 13113 } else { 13114 timer_t htimer = g_posix_timers[timerid]; 13115 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},}; 13116 13117 if (target_to_host_itimerspec(&hspec_new, arg3)) { 13118 return -TARGET_EFAULT; 13119 } 13120 ret = get_errno( 13121 timer_settime(htimer, arg2, &hspec_new, &hspec_old)); 13122 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) { 13123 return -TARGET_EFAULT; 13124 } 13125 } 13126 return ret; 13127 } 13128 #endif 13129 13130 #ifdef TARGET_NR_timer_settime64 13131 case TARGET_NR_timer_settime64: 13132 { 13133 target_timer_t timerid = get_timer_id(arg1); 13134 13135 if (timerid < 0) { 13136 ret = timerid; 13137 } else if (arg3 == 0) { 13138 ret = -TARGET_EINVAL; 13139 } else { 13140 timer_t htimer = g_posix_timers[timerid]; 13141 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},}; 13142 13143 if (target_to_host_itimerspec64(&hspec_new, arg3)) { 13144 return -TARGET_EFAULT; 13145 } 13146 ret = get_errno( 13147 timer_settime(htimer, arg2, &hspec_new, &hspec_old)); 13148 if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) { 13149 return -TARGET_EFAULT; 13150 } 13151 } 13152 return ret; 13153 } 13154 #endif 13155 13156 #ifdef TARGET_NR_timer_gettime 13157 case TARGET_NR_timer_gettime: 13158 { 13159 /* args: timer_t timerid, struct itimerspec *curr_value */ 13160 target_timer_t timerid = get_timer_id(arg1); 13161 13162 if (timerid < 0) { 13163 ret = timerid; 13164 } else if (!arg2) { 13165 ret = -TARGET_EFAULT; 13166 } else { 13167 timer_t htimer = g_posix_timers[timerid]; 13168 struct itimerspec hspec; 13169 ret = get_errno(timer_gettime(htimer, &hspec)); 13170 13171 if (host_to_target_itimerspec(arg2, &hspec)) { 13172 ret = -TARGET_EFAULT; 13173 } 13174 } 13175 return ret; 13176 } 13177 #endif 13178 13179 #ifdef TARGET_NR_timer_gettime64 13180 case TARGET_NR_timer_gettime64: 13181 { 13182 /* args: timer_t timerid, struct itimerspec64 *curr_value */ 13183 target_timer_t timerid = get_timer_id(arg1); 13184 13185 if (timerid < 0) { 13186 ret = timerid; 13187 } else if (!arg2) { 13188 ret = -TARGET_EFAULT; 13189 } else { 13190 timer_t htimer = g_posix_timers[timerid]; 13191 struct itimerspec hspec; 13192 ret = get_errno(timer_gettime(htimer, &hspec)); 13193 13194 if (host_to_target_itimerspec64(arg2, &hspec)) { 13195 ret = -TARGET_EFAULT; 13196 } 13197 } 13198 return ret; 13199 } 13200 #endif 13201 13202 #ifdef TARGET_NR_timer_getoverrun 13203 case TARGET_NR_timer_getoverrun: 13204 { 13205 /* args: timer_t timerid */ 13206 target_timer_t timerid = get_timer_id(arg1); 13207 13208 if (timerid < 0) { 13209 ret = timerid; 13210 } else { 13211 timer_t htimer = g_posix_timers[timerid]; 13212 ret = get_errno(timer_getoverrun(htimer)); 13213 } 13214 return ret; 13215 } 13216 #endif 13217 13218 #ifdef TARGET_NR_timer_delete 13219 case TARGET_NR_timer_delete: 13220 { 13221 /* args: timer_t timerid */ 13222 target_timer_t timerid = get_timer_id(arg1); 13223 13224 if (timerid < 0) { 13225 ret = timerid; 13226 } else { 13227 timer_t htimer = g_posix_timers[timerid]; 13228 ret = get_errno(timer_delete(htimer)); 13229 free_host_timer_slot(timerid); 13230 } 13231 return ret; 13232 } 13233 #endif 13234 13235 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD) 13236 case TARGET_NR_timerfd_create: 13237 return get_errno(timerfd_create(arg1, 13238 target_to_host_bitmask(arg2, fcntl_flags_tbl))); 13239 #endif 13240 13241 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD) 13242 case TARGET_NR_timerfd_gettime: 13243 { 13244 struct itimerspec its_curr; 13245 13246 ret = get_errno(timerfd_gettime(arg1, &its_curr)); 13247 13248 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) { 13249 return -TARGET_EFAULT; 13250 } 13251 } 13252 return ret; 13253 #endif 13254 13255 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD) 13256 case TARGET_NR_timerfd_gettime64: 13257 { 13258 struct itimerspec its_curr; 13259 13260 ret = get_errno(timerfd_gettime(arg1, &its_curr)); 13261 13262 if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) { 13263 return -TARGET_EFAULT; 13264 } 13265 } 13266 return ret; 13267 #endif 13268 13269 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD) 13270 case TARGET_NR_timerfd_settime: 13271 { 13272 struct itimerspec its_new, its_old, *p_new; 13273 13274 if (arg3) { 13275 if (target_to_host_itimerspec(&its_new, arg3)) { 13276 return -TARGET_EFAULT; 13277 } 13278 p_new = &its_new; 13279 } else { 13280 p_new = NULL; 13281 } 13282 13283 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old)); 13284 13285 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) { 13286 return -TARGET_EFAULT; 13287 } 13288 } 13289 return ret; 13290 #endif 13291 13292 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD) 13293 case TARGET_NR_timerfd_settime64: 13294 { 13295 struct itimerspec its_new, its_old, *p_new; 13296 13297 if (arg3) { 13298 if (target_to_host_itimerspec64(&its_new, arg3)) { 13299 return -TARGET_EFAULT; 13300 } 13301 p_new = &its_new; 13302 } else { 13303 p_new = NULL; 13304 } 13305 13306 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old)); 13307 13308 if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) { 13309 return -TARGET_EFAULT; 13310 } 13311 } 13312 return ret; 13313 #endif 13314 13315 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get) 13316 case TARGET_NR_ioprio_get: 13317 return get_errno(ioprio_get(arg1, arg2)); 13318 #endif 13319 13320 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set) 13321 case TARGET_NR_ioprio_set: 13322 return get_errno(ioprio_set(arg1, arg2, arg3)); 13323 #endif 13324 13325 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS) 13326 case TARGET_NR_setns: 13327 return get_errno(setns(arg1, arg2)); 13328 #endif 13329 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS) 13330 case TARGET_NR_unshare: 13331 return get_errno(unshare(arg1)); 13332 #endif 13333 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp) 13334 case TARGET_NR_kcmp: 13335 return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5)); 13336 #endif 13337 #ifdef TARGET_NR_swapcontext 13338 case TARGET_NR_swapcontext: 13339 /* PowerPC specific. */ 13340 return do_swapcontext(cpu_env, arg1, arg2, arg3); 13341 #endif 13342 #ifdef TARGET_NR_memfd_create 13343 case TARGET_NR_memfd_create: 13344 p = lock_user_string(arg1); 13345 if (!p) { 13346 return -TARGET_EFAULT; 13347 } 13348 ret = get_errno(memfd_create(p, arg2)); 13349 fd_trans_unregister(ret); 13350 unlock_user(p, arg1, 0); 13351 return ret; 13352 #endif 13353 #if defined TARGET_NR_membarrier && defined __NR_membarrier 13354 case TARGET_NR_membarrier: 13355 return get_errno(membarrier(arg1, arg2)); 13356 #endif 13357 13358 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range) 13359 case TARGET_NR_copy_file_range: 13360 { 13361 loff_t inoff, outoff; 13362 loff_t *pinoff = NULL, *poutoff = NULL; 13363 13364 if (arg2) { 13365 if (get_user_u64(inoff, arg2)) { 13366 return -TARGET_EFAULT; 13367 } 13368 pinoff = &inoff; 13369 } 13370 if (arg4) { 13371 if (get_user_u64(outoff, arg4)) { 13372 return -TARGET_EFAULT; 13373 } 13374 poutoff = &outoff; 13375 } 13376 /* Do not sign-extend the count parameter. */ 13377 ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff, 13378 (abi_ulong)arg5, arg6)); 13379 if (!is_error(ret) && ret > 0) { 13380 if (arg2) { 13381 if (put_user_u64(inoff, arg2)) { 13382 return -TARGET_EFAULT; 13383 } 13384 } 13385 if (arg4) { 13386 if (put_user_u64(outoff, arg4)) { 13387 return -TARGET_EFAULT; 13388 } 13389 } 13390 } 13391 } 13392 return ret; 13393 #endif 13394 13395 #if defined(TARGET_NR_pivot_root) 13396 case TARGET_NR_pivot_root: 13397 { 13398 void *p2; 13399 p = lock_user_string(arg1); /* new_root */ 13400 p2 = lock_user_string(arg2); /* put_old */ 13401 if (!p || !p2) { 13402 ret = -TARGET_EFAULT; 13403 } else { 13404 ret = get_errno(pivot_root(p, p2)); 13405 } 13406 unlock_user(p2, arg2, 0); 13407 unlock_user(p, arg1, 0); 13408 } 13409 return ret; 13410 #endif 13411 13412 default: 13413 qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num); 13414 return -TARGET_ENOSYS; 13415 } 13416 return ret; 13417 } 13418 13419 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1, 13420 abi_long arg2, abi_long arg3, abi_long arg4, 13421 abi_long arg5, abi_long arg6, abi_long arg7, 13422 abi_long arg8) 13423 { 13424 CPUState *cpu = env_cpu(cpu_env); 13425 abi_long ret; 13426 13427 #ifdef DEBUG_ERESTARTSYS 13428 /* Debug-only code for exercising the syscall-restart code paths 13429 * in the per-architecture cpu main loops: restart every syscall 13430 * the guest makes once before letting it through. 13431 */ 13432 { 13433 static bool flag; 13434 flag = !flag; 13435 if (flag) { 13436 return -QEMU_ERESTARTSYS; 13437 } 13438 } 13439 #endif 13440 13441 record_syscall_start(cpu, num, arg1, 13442 arg2, arg3, arg4, arg5, arg6, arg7, arg8); 13443 13444 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) { 13445 print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6); 13446 } 13447 13448 ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4, 13449 arg5, arg6, arg7, arg8); 13450 13451 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) { 13452 print_syscall_ret(cpu_env, num, ret, arg1, arg2, 13453 arg3, arg4, arg5, arg6); 13454 } 13455 13456 record_syscall_return(cpu, num, ret); 13457 return ret; 13458 }