| File: | linux-user/mmap.c |
| Location: | line 747, column 13 |
| Description: | Value stored to 'host_addr' is never read |
| 1 | /* |
| 2 | * mmap support for qemu |
| 3 | * |
| 4 | * Copyright (c) 2003 Fabrice Bellard |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License as published by |
| 8 | * the Free Software Foundation; either version 2 of the License, or |
| 9 | * (at your option) any later version. |
| 10 | * |
| 11 | * This program is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 14 | * GNU General Public License for more details. |
| 15 | * |
| 16 | * You should have received a copy of the GNU General Public License |
| 17 | * along with this program; if not, see <http://www.gnu.org/licenses/>. |
| 18 | */ |
| 19 | #include <stdlib.h> |
| 20 | #include <stdio.h> |
| 21 | #include <stdarg.h> |
| 22 | #include <string.h> |
| 23 | #include <unistd.h> |
| 24 | #include <errno(*__errno_location ()).h> |
| 25 | #include <sys/types.h> |
| 26 | #include <sys/stat.h> |
| 27 | #include <sys/mman.h> |
| 28 | #include <linux1/mman.h> |
| 29 | #include <linux1/unistd.h> |
| 30 | |
| 31 | #include "qemu.h" |
| 32 | #include "qemu-common.h" |
| 33 | |
| 34 | //#define DEBUG_MMAP |
| 35 | |
| 36 | static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER{ { 0, 0, 0, 0, 0, 0, { 0, 0 } } }; |
| 37 | static __thread int mmap_lock_count; |
| 38 | |
| 39 | void mmap_lock(void) |
| 40 | { |
| 41 | if (mmap_lock_count++ == 0) { |
| 42 | pthread_mutex_lock(&mmap_mutex); |
| 43 | } |
| 44 | } |
| 45 | |
| 46 | void mmap_unlock(void) |
| 47 | { |
| 48 | if (--mmap_lock_count == 0) { |
| 49 | pthread_mutex_unlock(&mmap_mutex); |
| 50 | } |
| 51 | } |
| 52 | |
| 53 | /* Grab lock to make sure things are in a consistent state after fork(). */ |
| 54 | void mmap_fork_start(void) |
| 55 | { |
| 56 | if (mmap_lock_count) |
| 57 | abort(); |
| 58 | pthread_mutex_lock(&mmap_mutex); |
| 59 | } |
| 60 | |
| 61 | void mmap_fork_end(int child) |
| 62 | { |
| 63 | if (child) |
| 64 | pthread_mutex_init(&mmap_mutex, NULL((void*)0)); |
| 65 | else |
| 66 | pthread_mutex_unlock(&mmap_mutex); |
| 67 | } |
| 68 | |
| 69 | /* NOTE: all the constants are the HOST ones, but addresses are target. */ |
| 70 | int target_mprotect(abi_ulong start, abi_ulong len, int prot) |
| 71 | { |
| 72 | abi_ulong end, host_start, host_end, addr; |
| 73 | int prot1, ret; |
| 74 | |
| 75 | #ifdef DEBUG_MMAP |
| 76 | printf("mprotect: start=0x" TARGET_ABI_FMT_lx"%08x" |
| 77 | "len=0x" TARGET_ABI_FMT_lx"%08x" " prot=%c%c%c\n", start, len, |
| 78 | prot & PROT_READ0x1 ? 'r' : '-', |
| 79 | prot & PROT_WRITE0x2 ? 'w' : '-', |
| 80 | prot & PROT_EXEC0x4 ? 'x' : '-'); |
| 81 | #endif |
| 82 | |
| 83 | if ((start & ~TARGET_PAGE_MASK~((1 << 12) - 1)) != 0) |
| 84 | return -EINVAL22; |
| 85 | len = TARGET_PAGE_ALIGN(len)(((len) + (1 << 12) - 1) & ~((1 << 12) - 1)); |
| 86 | end = start + len; |
| 87 | if (end < start) |
| 88 | return -EINVAL22; |
| 89 | prot &= PROT_READ0x1 | PROT_WRITE0x2 | PROT_EXEC0x4; |
| 90 | if (len == 0) |
| 91 | return 0; |
| 92 | |
| 93 | mmap_lock(); |
| 94 | host_start = start & qemu_host_page_mask; |
| 95 | host_end = HOST_PAGE_ALIGN(end)(((end) + qemu_host_page_size - 1) & qemu_host_page_mask); |
| 96 | if (start > host_start) { |
| 97 | /* handle host page containing start */ |
| 98 | prot1 = prot; |
| 99 | for(addr = host_start; addr < start; addr += TARGET_PAGE_SIZE(1 << 12)) { |
| 100 | prot1 |= page_get_flags(addr); |
| 101 | } |
| 102 | if (host_end == host_start + qemu_host_page_size) { |
| 103 | for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE(1 << 12)) { |
| 104 | prot1 |= page_get_flags(addr); |
| 105 | } |
| 106 | end = host_end; |
| 107 | } |
| 108 | ret = mprotect(g2h(host_start)((void *)((unsigned long)(target_ulong)(host_start) + guest_base )), qemu_host_page_size, prot1 & PAGE_BITS(0x0001 | 0x0002 | 0x0004)); |
| 109 | if (ret != 0) |
| 110 | goto error; |
| 111 | host_start += qemu_host_page_size; |
| 112 | } |
| 113 | if (end < host_end) { |
| 114 | prot1 = prot; |
| 115 | for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE(1 << 12)) { |
| 116 | prot1 |= page_get_flags(addr); |
| 117 | } |
| 118 | ret = mprotect(g2h(host_end - qemu_host_page_size)((void *)((unsigned long)(target_ulong)(host_end - qemu_host_page_size ) + guest_base)), qemu_host_page_size, |
| 119 | prot1 & PAGE_BITS(0x0001 | 0x0002 | 0x0004)); |
| 120 | if (ret != 0) |
| 121 | goto error; |
| 122 | host_end -= qemu_host_page_size; |
| 123 | } |
| 124 | |
| 125 | /* handle the pages in the middle */ |
| 126 | if (host_start < host_end) { |
| 127 | ret = mprotect(g2h(host_start)((void *)((unsigned long)(target_ulong)(host_start) + guest_base )), host_end - host_start, prot); |
| 128 | if (ret != 0) |
| 129 | goto error; |
| 130 | } |
| 131 | page_set_flags(start, start + len, prot | PAGE_VALID0x0008); |
| 132 | mmap_unlock(); |
| 133 | return 0; |
| 134 | error: |
| 135 | mmap_unlock(); |
| 136 | return ret; |
| 137 | } |
| 138 | |
| 139 | /* map an incomplete host page */ |
| 140 | static int mmap_frag(abi_ulong real_start, |
| 141 | abi_ulong start, abi_ulong end, |
| 142 | int prot, int flags, int fd, abi_ulong offset) |
| 143 | { |
| 144 | abi_ulong real_end, addr; |
| 145 | void *host_start; |
| 146 | int prot1, prot_new; |
| 147 | |
| 148 | real_end = real_start + qemu_host_page_size; |
| 149 | host_start = g2h(real_start)((void *)((unsigned long)(target_ulong)(real_start) + guest_base )); |
| 150 | |
| 151 | /* get the protection of the target pages outside the mapping */ |
| 152 | prot1 = 0; |
| 153 | for(addr = real_start; addr < real_end; addr++) { |
| 154 | if (addr < start || addr >= end) |
| 155 | prot1 |= page_get_flags(addr); |
| 156 | } |
| 157 | |
| 158 | if (prot1 == 0) { |
| 159 | /* no page was there, so we allocate one */ |
| 160 | void *p = mmap(host_start, qemu_host_page_size, prot, |
| 161 | flags | MAP_ANONYMOUS0x20, -1, 0); |
| 162 | if (p == MAP_FAILED((void *) -1)) |
| 163 | return -1; |
| 164 | prot1 = prot; |
| 165 | } |
| 166 | prot1 &= PAGE_BITS(0x0001 | 0x0002 | 0x0004); |
| 167 | |
| 168 | prot_new = prot | prot1; |
| 169 | if (!(flags & MAP_ANONYMOUS0x20)) { |
| 170 | /* msync() won't work here, so we return an error if write is |
| 171 | possible while it is a shared mapping */ |
| 172 | if ((flags & MAP_TYPE0x0f) == MAP_SHARED0x01 && |
| 173 | (prot & PROT_WRITE0x2)) |
| 174 | return -1; |
| 175 | |
| 176 | /* adjust protection to be able to read */ |
| 177 | if (!(prot1 & PROT_WRITE0x2)) |
| 178 | mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE0x2); |
| 179 | |
| 180 | /* read the corresponding file data */ |
| 181 | if (pread(fd, g2h(start)((void *)((unsigned long)(target_ulong)(start) + guest_base)), end - start, offset) == -1) |
| 182 | return -1; |
| 183 | |
| 184 | /* put final protection */ |
| 185 | if (prot_new != (prot1 | PROT_WRITE0x2)) |
| 186 | mprotect(host_start, qemu_host_page_size, prot_new); |
| 187 | } else { |
| 188 | /* just update the protection */ |
| 189 | if (prot_new != prot1) { |
| 190 | mprotect(host_start, qemu_host_page_size, prot_new); |
| 191 | } |
| 192 | } |
| 193 | return 0; |
| 194 | } |
| 195 | |
| 196 | #if HOST_LONG_BITS64 == 64 && TARGET_ABI_BITS32 == 64 |
| 197 | # define TASK_UNMAPPED_BASE0x40000000 (1ul << 38) |
| 198 | #elif defined(__CYGWIN__) |
| 199 | /* Cygwin doesn't have a whole lot of address space. */ |
| 200 | # define TASK_UNMAPPED_BASE0x40000000 0x18000000 |
| 201 | #else |
| 202 | # define TASK_UNMAPPED_BASE0x40000000 0x40000000 |
| 203 | #endif |
| 204 | abi_ulong mmap_next_start = TASK_UNMAPPED_BASE0x40000000; |
| 205 | |
| 206 | unsigned long last_brk; |
| 207 | |
| 208 | #ifdef CONFIG_USE_GUEST_BASE1 |
| 209 | /* Subroutine of mmap_find_vma, used when we have pre-allocated a chunk |
| 210 | of guest address space. */ |
| 211 | static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size) |
| 212 | { |
| 213 | abi_ulong addr; |
| 214 | abi_ulong end_addr; |
| 215 | int prot; |
| 216 | int looped = 0; |
| 217 | |
| 218 | if (size > RESERVED_VAreserved_va) { |
| 219 | return (abi_ulong)-1; |
| 220 | } |
| 221 | |
| 222 | size = HOST_PAGE_ALIGN(size)(((size) + qemu_host_page_size - 1) & qemu_host_page_mask ); |
| 223 | end_addr = start + size; |
| 224 | if (end_addr > RESERVED_VAreserved_va) { |
| 225 | end_addr = RESERVED_VAreserved_va; |
| 226 | } |
| 227 | addr = end_addr - qemu_host_page_size; |
| 228 | |
| 229 | while (1) { |
| 230 | if (addr > end_addr) { |
| 231 | if (looped) { |
| 232 | return (abi_ulong)-1; |
| 233 | } |
| 234 | end_addr = RESERVED_VAreserved_va; |
| 235 | addr = end_addr - qemu_host_page_size; |
| 236 | looped = 1; |
| 237 | continue; |
| 238 | } |
| 239 | prot = page_get_flags(addr); |
| 240 | if (prot) { |
| 241 | end_addr = addr; |
| 242 | } |
| 243 | if (addr + size == end_addr) { |
| 244 | break; |
| 245 | } |
| 246 | addr -= qemu_host_page_size; |
| 247 | } |
| 248 | |
| 249 | if (start == mmap_next_start) { |
| 250 | mmap_next_start = addr; |
| 251 | } |
| 252 | |
| 253 | return addr; |
| 254 | } |
| 255 | #endif |
| 256 | |
| 257 | /* |
| 258 | * Find and reserve a free memory area of size 'size'. The search |
| 259 | * starts at 'start'. |
| 260 | * It must be called with mmap_lock() held. |
| 261 | * Return -1 if error. |
| 262 | */ |
| 263 | abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size) |
| 264 | { |
| 265 | void *ptr, *prev; |
| 266 | abi_ulong addr; |
| 267 | int wrapped, repeat; |
| 268 | |
| 269 | /* If 'start' == 0, then a default start address is used. */ |
| 270 | if (start == 0) { |
| 271 | start = mmap_next_start; |
| 272 | } else { |
| 273 | start &= qemu_host_page_mask; |
| 274 | } |
| 275 | |
| 276 | size = HOST_PAGE_ALIGN(size)(((size) + qemu_host_page_size - 1) & qemu_host_page_mask ); |
| 277 | |
| 278 | #ifdef CONFIG_USE_GUEST_BASE1 |
| 279 | if (RESERVED_VAreserved_va) { |
| 280 | return mmap_find_vma_reserved(start, size); |
| 281 | } |
| 282 | #endif |
| 283 | |
| 284 | addr = start; |
| 285 | wrapped = repeat = 0; |
| 286 | prev = 0; |
| 287 | |
| 288 | for (;; prev = ptr) { |
| 289 | /* |
| 290 | * Reserve needed memory area to avoid a race. |
| 291 | * It should be discarded using: |
| 292 | * - mmap() with MAP_FIXED flag |
| 293 | * - mremap() with MREMAP_FIXED flag |
| 294 | * - shmat() with SHM_REMAP flag |
| 295 | */ |
| 296 | ptr = mmap(g2h(addr)((void *)((unsigned long)(target_ulong)(addr) + guest_base)), size, PROT_NONE0x0, |
| 297 | MAP_ANONYMOUS0x20|MAP_PRIVATE0x02|MAP_NORESERVE0x4000, -1, 0); |
| 298 | |
| 299 | /* ENOMEM, if host address space has no memory */ |
| 300 | if (ptr == MAP_FAILED((void *) -1)) { |
| 301 | return (abi_ulong)-1; |
| 302 | } |
| 303 | |
| 304 | /* Count the number of sequential returns of the same address. |
| 305 | This is used to modify the search algorithm below. */ |
| 306 | repeat = (ptr == prev ? repeat + 1 : 0); |
| 307 | |
| 308 | if (h2g_valid(ptr + size - 1)({ unsigned long __guest = (unsigned long)(ptr + size - 1) - guest_base ; (__guest < (1ul << 42)) && (!reserved_va || (__guest < reserved_va)); })) { |
| 309 | addr = h2g(ptr)({ ((({ unsigned long __guest = (unsigned long)(ptr) - guest_base ; (__guest < (1ul << 42)) && (!reserved_va || (__guest < reserved_va)); })) ? (void) (0) : __assert_fail ("({ unsigned long __guest = (unsigned long)(ptr) - guest_base; (__guest < (1ul << 42)) && (!reserved_va || (__guest < reserved_va)); })" , "/home/stefan/src/qemu/qemu.org/qemu/linux-user/mmap.c", 309 , __PRETTY_FUNCTION__)); ({ unsigned long __ret = (unsigned long )(ptr) - guest_base; (abi_ulong)__ret; }); }); |
| 310 | |
| 311 | if ((addr & ~TARGET_PAGE_MASK~((1 << 12) - 1)) == 0) { |
| 312 | /* Success. */ |
| 313 | if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE0x40000000) { |
| 314 | mmap_next_start = addr + size; |
| 315 | } |
| 316 | return addr; |
| 317 | } |
| 318 | |
| 319 | /* The address is not properly aligned for the target. */ |
| 320 | switch (repeat) { |
| 321 | case 0: |
| 322 | /* Assume the result that the kernel gave us is the |
| 323 | first with enough free space, so start again at the |
| 324 | next higher target page. */ |
| 325 | addr = TARGET_PAGE_ALIGN(addr)(((addr) + (1 << 12) - 1) & ~((1 << 12) - 1)); |
| 326 | break; |
| 327 | case 1: |
| 328 | /* Sometimes the kernel decides to perform the allocation |
| 329 | at the top end of memory instead. */ |
| 330 | addr &= TARGET_PAGE_MASK~((1 << 12) - 1); |
| 331 | break; |
| 332 | case 2: |
| 333 | /* Start over at low memory. */ |
| 334 | addr = 0; |
| 335 | break; |
| 336 | default: |
| 337 | /* Fail. This unaligned block must the last. */ |
| 338 | addr = -1; |
| 339 | break; |
| 340 | } |
| 341 | } else { |
| 342 | /* Since the result the kernel gave didn't fit, start |
| 343 | again at low memory. If any repetition, fail. */ |
| 344 | addr = (repeat ? -1 : 0); |
| 345 | } |
| 346 | |
| 347 | /* Unmap and try again. */ |
| 348 | munmap(ptr, size); |
| 349 | |
| 350 | /* ENOMEM if we checked the whole of the target address space. */ |
| 351 | if (addr == (abi_ulong)-1) { |
| 352 | return (abi_ulong)-1; |
| 353 | } else if (addr == 0) { |
| 354 | if (wrapped) { |
| 355 | return (abi_ulong)-1; |
| 356 | } |
| 357 | wrapped = 1; |
| 358 | /* Don't actually use 0 when wrapping, instead indicate |
| 359 | that we'd truly like an allocation in low memory. */ |
| 360 | addr = (mmap_min_addr > TARGET_PAGE_SIZE(1 << 12) |
| 361 | ? TARGET_PAGE_ALIGN(mmap_min_addr)(((mmap_min_addr) + (1 << 12) - 1) & ~((1 << 12 ) - 1)) |
| 362 | : TARGET_PAGE_SIZE(1 << 12)); |
| 363 | } else if (wrapped && addr >= start) { |
| 364 | return (abi_ulong)-1; |
| 365 | } |
| 366 | } |
| 367 | } |
| 368 | |
| 369 | /* NOTE: all the constants are the HOST ones */ |
| 370 | abi_long target_mmap(abi_ulong start, abi_ulong len, int prot, |
| 371 | int flags, int fd, abi_ulong offset) |
| 372 | { |
| 373 | abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len; |
| 374 | |
| 375 | mmap_lock(); |
| 376 | #ifdef DEBUG_MMAP |
| 377 | { |
| 378 | printf("mmap: start=0x" TARGET_ABI_FMT_lx"%08x" |
| 379 | " len=0x" TARGET_ABI_FMT_lx"%08x" " prot=%c%c%c flags=", |
| 380 | start, len, |
| 381 | prot & PROT_READ0x1 ? 'r' : '-', |
| 382 | prot & PROT_WRITE0x2 ? 'w' : '-', |
| 383 | prot & PROT_EXEC0x4 ? 'x' : '-'); |
| 384 | if (flags & MAP_FIXED0x10) |
| 385 | printf("MAP_FIXED "); |
| 386 | if (flags & MAP_ANONYMOUS0x20) |
| 387 | printf("MAP_ANON "); |
| 388 | switch(flags & MAP_TYPE0x0f) { |
| 389 | case MAP_PRIVATE0x02: |
| 390 | printf("MAP_PRIVATE "); |
| 391 | break; |
| 392 | case MAP_SHARED0x01: |
| 393 | printf("MAP_SHARED "); |
| 394 | break; |
| 395 | default: |
| 396 | printf("[MAP_TYPE=0x%x] ", flags & MAP_TYPE0x0f); |
| 397 | break; |
| 398 | } |
| 399 | printf("fd=%d offset=" TARGET_ABI_FMT_lx"%08x" "\n", fd, offset); |
| 400 | } |
| 401 | #endif |
| 402 | |
| 403 | if (offset & ~TARGET_PAGE_MASK~((1 << 12) - 1)) { |
| 404 | errno(*__errno_location ()) = EINVAL22; |
| 405 | goto fail; |
| 406 | } |
| 407 | |
| 408 | len = TARGET_PAGE_ALIGN(len)(((len) + (1 << 12) - 1) & ~((1 << 12) - 1)); |
| 409 | if (len == 0) |
| 410 | goto the_end; |
| 411 | real_start = start & qemu_host_page_mask; |
| 412 | host_offset = offset & qemu_host_page_mask; |
| 413 | |
| 414 | /* If the user is asking for the kernel to find a location, do that |
| 415 | before we truncate the length for mapping files below. */ |
| 416 | if (!(flags & MAP_FIXED0x10)) { |
| 417 | host_len = len + offset - host_offset; |
| 418 | host_len = HOST_PAGE_ALIGN(host_len)(((host_len) + qemu_host_page_size - 1) & qemu_host_page_mask ); |
| 419 | start = mmap_find_vma(real_start, host_len); |
| 420 | if (start == (abi_ulong)-1) { |
| 421 | errno(*__errno_location ()) = ENOMEM12; |
| 422 | goto fail; |
| 423 | } |
| 424 | } |
| 425 | |
| 426 | /* When mapping files into a memory area larger than the file, accesses |
| 427 | to pages beyond the file size will cause a SIGBUS. |
| 428 | |
| 429 | For example, if mmaping a file of 100 bytes on a host with 4K pages |
| 430 | emulating a target with 8K pages, the target expects to be able to |
| 431 | access the first 8K. But the host will trap us on any access beyond |
| 432 | 4K. |
| 433 | |
| 434 | When emulating a target with a larger page-size than the hosts, we |
| 435 | may need to truncate file maps at EOF and add extra anonymous pages |
| 436 | up to the targets page boundary. */ |
| 437 | |
| 438 | if ((qemu_real_host_page_size < TARGET_PAGE_SIZE(1 << 12)) |
| 439 | && !(flags & MAP_ANONYMOUS0x20)) { |
| 440 | struct stat sb; |
| 441 | |
| 442 | if (fstat (fd, &sb) == -1) |
| 443 | goto fail; |
| 444 | |
| 445 | /* Are we trying to create a map beyond EOF?. */ |
| 446 | if (offset + len > sb.st_size) { |
| 447 | /* If so, truncate the file map at eof aligned with |
| 448 | the hosts real pagesize. Additional anonymous maps |
| 449 | will be created beyond EOF. */ |
| 450 | len = (sb.st_size - offset); |
| 451 | len += qemu_real_host_page_size - 1; |
| 452 | len &= ~(qemu_real_host_page_size - 1); |
| 453 | } |
| 454 | } |
| 455 | |
| 456 | if (!(flags & MAP_FIXED0x10)) { |
| 457 | unsigned long host_start; |
| 458 | void *p; |
| 459 | |
| 460 | host_len = len + offset - host_offset; |
| 461 | host_len = HOST_PAGE_ALIGN(host_len)(((host_len) + qemu_host_page_size - 1) & qemu_host_page_mask ); |
| 462 | |
| 463 | /* Note: we prefer to control the mapping address. It is |
| 464 | especially important if qemu_host_page_size > |
| 465 | qemu_real_host_page_size */ |
| 466 | p = mmap(g2h(start)((void *)((unsigned long)(target_ulong)(start) + guest_base)), host_len, prot, |
| 467 | flags | MAP_FIXED0x10 | MAP_ANONYMOUS0x20, -1, 0); |
| 468 | if (p == MAP_FAILED((void *) -1)) |
| 469 | goto fail; |
| 470 | /* update start so that it points to the file position at 'offset' */ |
| 471 | host_start = (unsigned long)p; |
| 472 | if (!(flags & MAP_ANONYMOUS0x20)) { |
| 473 | p = mmap(g2h(start)((void *)((unsigned long)(target_ulong)(start) + guest_base)), len, prot, |
| 474 | flags | MAP_FIXED0x10, fd, host_offset); |
| 475 | if (p == MAP_FAILED((void *) -1)) { |
| 476 | munmap(g2h(start)((void *)((unsigned long)(target_ulong)(start) + guest_base)), host_len); |
| 477 | goto fail; |
| 478 | } |
| 479 | host_start += offset - host_offset; |
| 480 | } |
| 481 | start = h2g(host_start)({ ((({ unsigned long __guest = (unsigned long)(host_start) - guest_base; (__guest < (1ul << 42)) && (!reserved_va || (__guest < reserved_va)); })) ? (void) (0) : __assert_fail ("({ unsigned long __guest = (unsigned long)(host_start) - guest_base; (__guest < (1ul << 42)) && (!reserved_va || (__guest < reserved_va)); })" , "/home/stefan/src/qemu/qemu.org/qemu/linux-user/mmap.c", 481 , __PRETTY_FUNCTION__)); ({ unsigned long __ret = (unsigned long )(host_start) - guest_base; (abi_ulong)__ret; }); }); |
| 482 | } else { |
| 483 | if (start & ~TARGET_PAGE_MASK~((1 << 12) - 1)) { |
| 484 | errno(*__errno_location ()) = EINVAL22; |
| 485 | goto fail; |
| 486 | } |
| 487 | end = start + len; |
| 488 | real_end = HOST_PAGE_ALIGN(end)(((end) + qemu_host_page_size - 1) & qemu_host_page_mask); |
| 489 | |
| 490 | /* |
| 491 | * Test if requested memory area fits target address space |
| 492 | * It can fail only on 64-bit host with 32-bit target. |
| 493 | * On any other target/host host mmap() handles this error correctly. |
| 494 | */ |
| 495 | if ((unsigned long)start + len - 1 > (abi_ulong) -1) { |
| 496 | errno(*__errno_location ()) = EINVAL22; |
| 497 | goto fail; |
| 498 | } |
| 499 | |
| 500 | /* worst case: we cannot map the file because the offset is not |
| 501 | aligned, so we read it */ |
| 502 | if (!(flags & MAP_ANONYMOUS0x20) && |
| 503 | (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) { |
| 504 | /* msync() won't work here, so we return an error if write is |
| 505 | possible while it is a shared mapping */ |
| 506 | if ((flags & MAP_TYPE0x0f) == MAP_SHARED0x01 && |
| 507 | (prot & PROT_WRITE0x2)) { |
| 508 | errno(*__errno_location ()) = EINVAL22; |
| 509 | goto fail; |
| 510 | } |
| 511 | retaddr = target_mmap(start, len, prot | PROT_WRITE0x2, |
| 512 | MAP_FIXED0x10 | MAP_PRIVATE0x02 | MAP_ANONYMOUS0x20, |
| 513 | -1, 0); |
| 514 | if (retaddr == -1) |
| 515 | goto fail; |
| 516 | if (pread(fd, g2h(start)((void *)((unsigned long)(target_ulong)(start) + guest_base)), len, offset) == -1) |
| 517 | goto fail; |
| 518 | if (!(prot & PROT_WRITE0x2)) { |
| 519 | ret = target_mprotect(start, len, prot); |
| 520 | if (ret != 0) { |
| 521 | start = ret; |
| 522 | goto the_end; |
| 523 | } |
| 524 | } |
| 525 | goto the_end; |
| 526 | } |
| 527 | |
| 528 | /* handle the start of the mapping */ |
| 529 | if (start > real_start) { |
| 530 | if (real_end == real_start + qemu_host_page_size) { |
| 531 | /* one single host page */ |
| 532 | ret = mmap_frag(real_start, start, end, |
| 533 | prot, flags, fd, offset); |
| 534 | if (ret == -1) |
| 535 | goto fail; |
| 536 | goto the_end1; |
| 537 | } |
| 538 | ret = mmap_frag(real_start, start, real_start + qemu_host_page_size, |
| 539 | prot, flags, fd, offset); |
| 540 | if (ret == -1) |
| 541 | goto fail; |
| 542 | real_start += qemu_host_page_size; |
| 543 | } |
| 544 | /* handle the end of the mapping */ |
| 545 | if (end < real_end) { |
| 546 | ret = mmap_frag(real_end - qemu_host_page_size, |
| 547 | real_end - qemu_host_page_size, real_end, |
| 548 | prot, flags, fd, |
| 549 | offset + real_end - qemu_host_page_size - start); |
| 550 | if (ret == -1) |
| 551 | goto fail; |
| 552 | real_end -= qemu_host_page_size; |
| 553 | } |
| 554 | |
| 555 | /* map the middle (easier) */ |
| 556 | if (real_start < real_end) { |
| 557 | void *p; |
| 558 | unsigned long offset1; |
| 559 | if (flags & MAP_ANONYMOUS0x20) |
| 560 | offset1 = 0; |
| 561 | else |
| 562 | offset1 = offset + real_start - start; |
| 563 | p = mmap(g2h(real_start)((void *)((unsigned long)(target_ulong)(real_start) + guest_base )), real_end - real_start, |
| 564 | prot, flags, fd, offset1); |
| 565 | if (p == MAP_FAILED((void *) -1)) |
| 566 | goto fail; |
| 567 | } |
| 568 | } |
| 569 | the_end1: |
| 570 | page_set_flags(start, start + len, prot | PAGE_VALID0x0008); |
| 571 | the_end: |
| 572 | #ifdef DEBUG_MMAP |
| 573 | printf("ret=0x" TARGET_ABI_FMT_lx"%08x" "\n", start); |
| 574 | page_dump(stdoutstdout); |
| 575 | printf("\n"); |
| 576 | #endif |
| 577 | tb_invalidate_phys_range(start, start + len, 0); |
| 578 | mmap_unlock(); |
| 579 | return start; |
| 580 | fail: |
| 581 | mmap_unlock(); |
| 582 | return -1; |
| 583 | } |
| 584 | |
| 585 | static void mmap_reserve(abi_ulong start, abi_ulong size) |
| 586 | { |
| 587 | abi_ulong real_start; |
| 588 | abi_ulong real_end; |
| 589 | abi_ulong addr; |
| 590 | abi_ulong end; |
| 591 | int prot; |
| 592 | |
| 593 | real_start = start & qemu_host_page_mask; |
| 594 | real_end = HOST_PAGE_ALIGN(start + size)(((start + size) + qemu_host_page_size - 1) & qemu_host_page_mask ); |
| 595 | end = start + size; |
| 596 | if (start > real_start) { |
| 597 | /* handle host page containing start */ |
| 598 | prot = 0; |
| 599 | for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE(1 << 12)) { |
| 600 | prot |= page_get_flags(addr); |
| 601 | } |
| 602 | if (real_end == real_start + qemu_host_page_size) { |
| 603 | for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE(1 << 12)) { |
| 604 | prot |= page_get_flags(addr); |
| 605 | } |
| 606 | end = real_end; |
| 607 | } |
| 608 | if (prot != 0) |
| 609 | real_start += qemu_host_page_size; |
| 610 | } |
| 611 | if (end < real_end) { |
| 612 | prot = 0; |
| 613 | for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE(1 << 12)) { |
| 614 | prot |= page_get_flags(addr); |
| 615 | } |
| 616 | if (prot != 0) |
| 617 | real_end -= qemu_host_page_size; |
| 618 | } |
| 619 | if (real_start != real_end) { |
| 620 | mmap(g2h(real_start)((void *)((unsigned long)(target_ulong)(real_start) + guest_base )), real_end - real_start, PROT_NONE0x0, |
| 621 | MAP_FIXED0x10 | MAP_ANONYMOUS0x20 | MAP_PRIVATE0x02 | MAP_NORESERVE0x4000, |
| 622 | -1, 0); |
| 623 | } |
| 624 | } |
| 625 | |
| 626 | int target_munmap(abi_ulong start, abi_ulong len) |
| 627 | { |
| 628 | abi_ulong end, real_start, real_end, addr; |
| 629 | int prot, ret; |
| 630 | |
| 631 | #ifdef DEBUG_MMAP |
| 632 | printf("munmap: start=0x" TARGET_ABI_FMT_lx"%08x" " len=0x" |
| 633 | TARGET_ABI_FMT_lx"%08x" "\n", |
| 634 | start, len); |
| 635 | #endif |
| 636 | if (start & ~TARGET_PAGE_MASK~((1 << 12) - 1)) |
| 637 | return -EINVAL22; |
| 638 | len = TARGET_PAGE_ALIGN(len)(((len) + (1 << 12) - 1) & ~((1 << 12) - 1)); |
| 639 | if (len == 0) |
| 640 | return -EINVAL22; |
| 641 | mmap_lock(); |
| 642 | end = start + len; |
| 643 | real_start = start & qemu_host_page_mask; |
| 644 | real_end = HOST_PAGE_ALIGN(end)(((end) + qemu_host_page_size - 1) & qemu_host_page_mask); |
| 645 | |
| 646 | if (start > real_start) { |
| 647 | /* handle host page containing start */ |
| 648 | prot = 0; |
| 649 | for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE(1 << 12)) { |
| 650 | prot |= page_get_flags(addr); |
| 651 | } |
| 652 | if (real_end == real_start + qemu_host_page_size) { |
| 653 | for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE(1 << 12)) { |
| 654 | prot |= page_get_flags(addr); |
| 655 | } |
| 656 | end = real_end; |
| 657 | } |
| 658 | if (prot != 0) |
| 659 | real_start += qemu_host_page_size; |
| 660 | } |
| 661 | if (end < real_end) { |
| 662 | prot = 0; |
| 663 | for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE(1 << 12)) { |
| 664 | prot |= page_get_flags(addr); |
| 665 | } |
| 666 | if (prot != 0) |
| 667 | real_end -= qemu_host_page_size; |
| 668 | } |
| 669 | |
| 670 | ret = 0; |
| 671 | /* unmap what we can */ |
| 672 | if (real_start < real_end) { |
| 673 | if (RESERVED_VAreserved_va) { |
| 674 | mmap_reserve(real_start, real_end - real_start); |
| 675 | } else { |
| 676 | ret = munmap(g2h(real_start)((void *)((unsigned long)(target_ulong)(real_start) + guest_base )), real_end - real_start); |
| 677 | } |
| 678 | } |
| 679 | |
| 680 | if (ret == 0) { |
| 681 | page_set_flags(start, start + len, 0); |
| 682 | tb_invalidate_phys_range(start, start + len, 0); |
| 683 | } |
| 684 | mmap_unlock(); |
| 685 | return ret; |
| 686 | } |
| 687 | |
| 688 | abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size, |
| 689 | abi_ulong new_size, unsigned long flags, |
| 690 | abi_ulong new_addr) |
| 691 | { |
| 692 | int prot; |
| 693 | void *host_addr; |
| 694 | |
| 695 | mmap_lock(); |
| 696 | |
| 697 | if (flags & MREMAP_FIXED2) { |
| 698 | host_addr = (void *) syscall(__NR_mremap25, g2h(old_addr)((void *)((unsigned long)(target_ulong)(old_addr) + guest_base )), |
| 699 | old_size, new_size, |
| 700 | flags, |
| 701 | g2h(new_addr)((void *)((unsigned long)(target_ulong)(new_addr) + guest_base ))); |
| 702 | |
| 703 | if (RESERVED_VAreserved_va && host_addr != MAP_FAILED((void *) -1)) { |
| 704 | /* If new and old addresses overlap then the above mremap will |
| 705 | already have failed with EINVAL. */ |
| 706 | mmap_reserve(old_addr, old_size); |
| 707 | } |
| 708 | } else if (flags & MREMAP_MAYMOVE1) { |
| 709 | abi_ulong mmap_start; |
| 710 | |
| 711 | mmap_start = mmap_find_vma(0, new_size); |
| 712 | |
| 713 | if (mmap_start == -1) { |
| 714 | errno(*__errno_location ()) = ENOMEM12; |
| 715 | host_addr = MAP_FAILED((void *) -1); |
| 716 | } else { |
| 717 | host_addr = (void *) syscall(__NR_mremap25, g2h(old_addr)((void *)((unsigned long)(target_ulong)(old_addr) + guest_base )), |
| 718 | old_size, new_size, |
| 719 | flags | MREMAP_FIXED2, |
| 720 | g2h(mmap_start)((void *)((unsigned long)(target_ulong)(mmap_start) + guest_base ))); |
| 721 | if ( RESERVED_VAreserved_va ) { |
| 722 | mmap_reserve(old_addr, old_size); |
| 723 | } |
| 724 | } |
| 725 | } else { |
| 726 | int prot = 0; |
| 727 | if (RESERVED_VAreserved_va && old_size < new_size) { |
| 728 | abi_ulong addr; |
| 729 | for (addr = old_addr + old_size; |
| 730 | addr < old_addr + new_size; |
| 731 | addr++) { |
| 732 | prot |= page_get_flags(addr); |
| 733 | } |
| 734 | } |
| 735 | if (prot == 0) { |
| 736 | host_addr = mremap(g2h(old_addr)((void *)((unsigned long)(target_ulong)(old_addr) + guest_base )), old_size, new_size, flags); |
| 737 | if (host_addr != MAP_FAILED((void *) -1) && RESERVED_VAreserved_va && old_size > new_size) { |
| 738 | mmap_reserve(old_addr + old_size, new_size - old_size); |
| 739 | } |
| 740 | } else { |
| 741 | errno(*__errno_location ()) = ENOMEM12; |
| 742 | host_addr = MAP_FAILED((void *) -1); |
| 743 | } |
| 744 | /* Check if address fits target address space */ |
| 745 | if ((unsigned long)host_addr + new_size > (abi_ulong)-1) { |
| 746 | /* Revert mremap() changes */ |
| 747 | host_addr = mremap(g2h(old_addr)((void *)((unsigned long)(target_ulong)(old_addr) + guest_base )), new_size, old_size, flags); |
Value stored to 'host_addr' is never read | |
| 748 | errno(*__errno_location ()) = ENOMEM12; |
| 749 | host_addr = MAP_FAILED((void *) -1); |
| 750 | } |
| 751 | } |
| 752 | |
| 753 | if (host_addr == MAP_FAILED((void *) -1)) { |
| 754 | new_addr = -1; |
| 755 | } else { |
| 756 | new_addr = h2g(host_addr)({ ((({ unsigned long __guest = (unsigned long)(host_addr) - guest_base ; (__guest < (1ul << 42)) && (!reserved_va || (__guest < reserved_va)); })) ? (void) (0) : __assert_fail ("({ unsigned long __guest = (unsigned long)(host_addr) - guest_base; (__guest < (1ul << 42)) && (!reserved_va || (__guest < reserved_va)); })" , "/home/stefan/src/qemu/qemu.org/qemu/linux-user/mmap.c", 756 , __PRETTY_FUNCTION__)); ({ unsigned long __ret = (unsigned long )(host_addr) - guest_base; (abi_ulong)__ret; }); }); |
| 757 | prot = page_get_flags(old_addr); |
| 758 | page_set_flags(old_addr, old_addr + old_size, 0); |
| 759 | page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID0x0008); |
| 760 | } |
| 761 | tb_invalidate_phys_range(new_addr, new_addr + new_size, 0); |
| 762 | mmap_unlock(); |
| 763 | return new_addr; |
| 764 | } |
| 765 | |
| 766 | int target_msync(abi_ulong start, abi_ulong len, int flags) |
| 767 | { |
| 768 | abi_ulong end; |
| 769 | |
| 770 | if (start & ~TARGET_PAGE_MASK~((1 << 12) - 1)) |
| 771 | return -EINVAL22; |
| 772 | len = TARGET_PAGE_ALIGN(len)(((len) + (1 << 12) - 1) & ~((1 << 12) - 1)); |
| 773 | end = start + len; |
| 774 | if (end < start) |
| 775 | return -EINVAL22; |
| 776 | if (end == start) |
| 777 | return 0; |
| 778 | |
| 779 | start &= qemu_host_page_mask; |
| 780 | return msync(g2h(start)((void *)((unsigned long)(target_ulong)(start) + guest_base)), end - start, flags); |
| 781 | } |